diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..7fd6dc5d20bce5b8ca21095100b8beb5c6d1a0f4 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,3 +33,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +AllinonSAM/eval/lits/output_demo.nii filter=lfs diff=lfs merge=lfs -text +AllinonSAM/wandb/run-20241018_210810-zrrx3qz9/run-zrrx3qz9.wandb filter=lfs diff=lfs merge=lfs -text +AllinonSAM/wandb/run-20241018_162125-i4stmvih/run-i4stmvih.wandb filter=lfs diff=lfs merge=lfs -text +AllinonSAM/wandb/run-20240915_215641-1usjns7w/run-1usjns7w.wandb filter=lfs diff=lfs merge=lfs -text diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..a9d74e4b30a792f41845ba348f30f9732ec3f4a8 --- /dev/null +++ b/.gitignore @@ -0,0 +1,14 @@ +*.pyc +*.cpython-38.pyc +*.pth +*.gz +*.zip +*.png +*.jpg +*.JPG +*.tif +*.bmp +*.out +*.txt +AllinonSAM/wandb/ +__pycache__ \ No newline at end of file diff --git a/AllinonSAM/LICENSE b/AllinonSAM/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..2332ba0cec0ca1c22d406afe7900e68b4ab7a688 --- /dev/null +++ b/AllinonSAM/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 Ahmed Heakl + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/AllinonSAM/README.md b/AllinonSAM/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/AllinonSAM/__pycache__/axialnet.cpython-38.pyc b/AllinonSAM/__pycache__/axialnet.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6788a366353cb6d9fc0079c075bfdbd487239775 Binary files /dev/null and b/AllinonSAM/__pycache__/axialnet.cpython-38.pyc differ diff --git a/AllinonSAM/__pycache__/baselines.cpython-38.pyc b/AllinonSAM/__pycache__/baselines.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1cd396883144a6029c017775d601bea3b0ee8175 Binary files /dev/null and b/AllinonSAM/__pycache__/baselines.cpython-38.pyc differ diff --git a/AllinonSAM/__pycache__/combined_model.cpython-38.pyc b/AllinonSAM/__pycache__/combined_model.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bfca060bb87d8ac8f50544156740ff83e0d7bf9e Binary files /dev/null and b/AllinonSAM/__pycache__/combined_model.cpython-38.pyc differ diff --git a/AllinonSAM/__pycache__/data_utils.cpython-312.pyc b/AllinonSAM/__pycache__/data_utils.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..844c09abccfc76dfc2b54d4b92fa07e72b3cf4ac Binary files /dev/null and b/AllinonSAM/__pycache__/data_utils.cpython-312.pyc differ diff --git a/AllinonSAM/__pycache__/data_utils.cpython-38.pyc b/AllinonSAM/__pycache__/data_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf53d0ea197d409b4b9653fdb9b403b8875189fd Binary files /dev/null and b/AllinonSAM/__pycache__/data_utils.cpython-38.pyc differ diff --git a/AllinonSAM/__pycache__/model.cpython-312.pyc b/AllinonSAM/__pycache__/model.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a0babbdbbe50593ae35e6845ae723b70e050b4e Binary files /dev/null and b/AllinonSAM/__pycache__/model.cpython-312.pyc differ diff --git a/AllinonSAM/__pycache__/model.cpython-38.pyc b/AllinonSAM/__pycache__/model.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d10e5d1e03fe1899c4486dfa2fc5c23e46afb2d Binary files /dev/null and b/AllinonSAM/__pycache__/model.cpython-38.pyc differ diff --git a/AllinonSAM/__pycache__/test.cpython-312.pyc b/AllinonSAM/__pycache__/test.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..943b7239636c8eb1231a872767e032df8d8bfc86 Binary files /dev/null and b/AllinonSAM/__pycache__/test.cpython-312.pyc differ diff --git a/AllinonSAM/__pycache__/test.cpython-38.pyc b/AllinonSAM/__pycache__/test.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e43dd16354b451c92ebb2fd9f13c0136b366372 Binary files /dev/null and b/AllinonSAM/__pycache__/test.cpython-38.pyc differ diff --git a/AllinonSAM/__pycache__/train.cpython-312.pyc b/AllinonSAM/__pycache__/train.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..79980d30b7e78bdf45bbad8289079cd5b6d921d7 Binary files /dev/null and b/AllinonSAM/__pycache__/train.cpython-312.pyc differ diff --git a/AllinonSAM/__pycache__/train.cpython-38.pyc b/AllinonSAM/__pycache__/train.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc27aebe36428ab7320d737566317f9af65f6840 Binary files /dev/null and b/AllinonSAM/__pycache__/train.cpython-38.pyc differ diff --git a/AllinonSAM/__pycache__/utils.cpython-312.pyc b/AllinonSAM/__pycache__/utils.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93bd827a38db111d68371f77b116da4fa2ce003a Binary files /dev/null and b/AllinonSAM/__pycache__/utils.cpython-312.pyc differ diff --git a/AllinonSAM/__pycache__/utils.cpython-38.pyc b/AllinonSAM/__pycache__/utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb67c9ee5505d46b0ed3a49194df1f06efdd14d5 Binary files /dev/null and b/AllinonSAM/__pycache__/utils.cpython-38.pyc differ diff --git a/AllinonSAM/__pycache__/vit_seg_configs.cpython-38.pyc b/AllinonSAM/__pycache__/vit_seg_configs.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9de597c8827f765d1d7ce274a9a0c981c8da832c Binary files /dev/null and b/AllinonSAM/__pycache__/vit_seg_configs.cpython-38.pyc differ diff --git a/AllinonSAM/__pycache__/vit_seg_modeling.cpython-38.pyc b/AllinonSAM/__pycache__/vit_seg_modeling.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a6a6d2350566132b1078ba9893a018a867e18e06 Binary files /dev/null and b/AllinonSAM/__pycache__/vit_seg_modeling.cpython-38.pyc differ diff --git a/AllinonSAM/__pycache__/vit_seg_modeling_resnet_skip.cpython-38.pyc b/AllinonSAM/__pycache__/vit_seg_modeling_resnet_skip.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ed3e1898b1711b6c31a78bf343830b42d2b63f5 Binary files /dev/null and b/AllinonSAM/__pycache__/vit_seg_modeling_resnet_skip.cpython-38.pyc differ diff --git a/AllinonSAM/axialnet.py b/AllinonSAM/axialnet.py new file mode 100644 index 0000000000000000000000000000000000000000..dcf040763cbc7787b334dfd6414de0813da1838a --- /dev/null +++ b/AllinonSAM/axialnet.py @@ -0,0 +1,730 @@ +import pdb +import math +import torch +import torch.nn as nn +import torch.nn.functional as F +from utils import * +import pdb +import matplotlib.pyplot as plt + +import random + + + +def conv1x1(in_planes, out_planes, stride=1): + """1x1 convolution""" + return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) + + +class AxialAttention(nn.Module): + def __init__(self, in_planes, out_planes, groups=8, kernel_size=56, + stride=1, bias=False, width=False): + assert (in_planes % groups == 0) and (out_planes % groups == 0) + super(AxialAttention, self).__init__() + self.in_planes = in_planes + self.out_planes = out_planes + self.groups = groups + self.group_planes = out_planes // groups + self.kernel_size = kernel_size + self.stride = stride + self.bias = bias + self.width = width + + # Multi-head self attention + self.qkv_transform = qkv_transform(in_planes, out_planes * 2, kernel_size=1, stride=1, + padding=0, bias=False) + self.bn_qkv = nn.BatchNorm1d(out_planes * 2) + self.bn_similarity = nn.BatchNorm2d(groups * 3) + + self.bn_output = nn.BatchNorm1d(out_planes * 2) + + # Position embedding + self.relative = nn.Parameter(torch.randn(self.group_planes * 2, kernel_size * 2 - 1), requires_grad=True) + query_index = torch.arange(kernel_size).unsqueeze(0) + key_index = torch.arange(kernel_size).unsqueeze(1) + relative_index = key_index - query_index + kernel_size - 1 + self.register_buffer('flatten_index', relative_index.view(-1)) + if stride > 1: + self.pooling = nn.AvgPool2d(stride, stride=stride) + + self.reset_parameters() + + def forward(self, x): + # pdb.set_trace() + if self.width: + x = x.permute(0, 2, 1, 3) + else: + x = x.permute(0, 3, 1, 2) # N, W, C, H + N, W, C, H = x.shape + x = x.contiguous().view(N * W, C, H) + + # Transformations + qkv = self.bn_qkv(self.qkv_transform(x)) + q, k, v = torch.split(qkv.reshape(N * W, self.groups, self.group_planes * 2, H), [self.group_planes // 2, self.group_planes // 2, self.group_planes], dim=2) + + # Calculate position embedding + all_embeddings = torch.index_select(self.relative, 1, self.flatten_index).view(self.group_planes * 2, self.kernel_size, self.kernel_size) + q_embedding, k_embedding, v_embedding = torch.split(all_embeddings, [self.group_planes // 2, self.group_planes // 2, self.group_planes], dim=0) + + qr = torch.einsum('bgci,cij->bgij', q, q_embedding) + kr = torch.einsum('bgci,cij->bgij', k, k_embedding).transpose(2, 3) + + qk = torch.einsum('bgci, bgcj->bgij', q, k) + + stacked_similarity = torch.cat([qk, qr, kr], dim=1) + stacked_similarity = self.bn_similarity(stacked_similarity).view(N * W, 3, self.groups, H, H).sum(dim=1) + #stacked_similarity = self.bn_qr(qr) + self.bn_kr(kr) + self.bn_qk(qk) + # (N, groups, H, H, W) + similarity = F.softmax(stacked_similarity, dim=3) + sv = torch.einsum('bgij,bgcj->bgci', similarity, v) + sve = torch.einsum('bgij,cij->bgci', similarity, v_embedding) + stacked_output = torch.cat([sv, sve], dim=-1).view(N * W, self.out_planes * 2, H) + output = self.bn_output(stacked_output).view(N, W, self.out_planes, 2, H).sum(dim=-2) + + if self.width: + output = output.permute(0, 2, 1, 3) + else: + output = output.permute(0, 2, 3, 1) + + if self.stride > 1: + output = self.pooling(output) + + return output + + def reset_parameters(self): + self.qkv_transform.weight.data.normal_(0, math.sqrt(1. / self.in_planes)) + #nn.init.uniform_(self.relative, -0.1, 0.1) + nn.init.normal_(self.relative, 0., math.sqrt(1. / self.group_planes)) + +class AxialAttention_dynamic(nn.Module): + def __init__(self, in_planes, out_planes, groups=8, kernel_size=56, + stride=1, bias=False, width=False): + assert (in_planes % groups == 0) and (out_planes % groups == 0) + super(AxialAttention_dynamic, self).__init__() + self.in_planes = in_planes + self.out_planes = out_planes + self.groups = groups + self.group_planes = out_planes // groups + self.kernel_size = kernel_size + self.stride = stride + self.bias = bias + self.width = width + + # Multi-head self attention + self.qkv_transform = qkv_transform(in_planes, out_planes * 2, kernel_size=1, stride=1, + padding=0, bias=False) + self.bn_qkv = nn.BatchNorm1d(out_planes * 2) + self.bn_similarity = nn.BatchNorm2d(groups * 3) + self.bn_output = nn.BatchNorm1d(out_planes * 2) + + # Priority on encoding + + ## Initial values + + self.f_qr = nn.Parameter(torch.tensor(0.1), requires_grad=False) + self.f_kr = nn.Parameter(torch.tensor(0.1), requires_grad=False) + self.f_sve = nn.Parameter(torch.tensor(0.1), requires_grad=False) + self.f_sv = nn.Parameter(torch.tensor(1.0), requires_grad=False) + + + # Position embedding + self.relative = nn.Parameter(torch.randn(self.group_planes * 2, kernel_size * 2 - 1), requires_grad=True) + query_index = torch.arange(kernel_size).unsqueeze(0) + key_index = torch.arange(kernel_size).unsqueeze(1) + relative_index = key_index - query_index + kernel_size - 1 + self.register_buffer('flatten_index', relative_index.view(-1)) + if stride > 1: + self.pooling = nn.AvgPool2d(stride, stride=stride) + + self.reset_parameters() + # self.print_para() + + def forward(self, x): + if self.width: + x = x.permute(0, 2, 1, 3) + else: + x = x.permute(0, 3, 1, 2) # N, W, C, H + N, W, C, H = x.shape + x = x.contiguous().view(N * W, C, H) + + # Transformations + qkv = self.bn_qkv(self.qkv_transform(x)) + q, k, v = torch.split(qkv.reshape(N * W, self.groups, self.group_planes * 2, H), [self.group_planes // 2, self.group_planes // 2, self.group_planes], dim=2) + + # Calculate position embedding + all_embeddings = torch.index_select(self.relative, 1, self.flatten_index).view(self.group_planes * 2, self.kernel_size, self.kernel_size) + q_embedding, k_embedding, v_embedding = torch.split(all_embeddings, [self.group_planes // 2, self.group_planes // 2, self.group_planes], dim=0) + qr = torch.einsum('bgci,cij->bgij', q, q_embedding) + kr = torch.einsum('bgci,cij->bgij', k, k_embedding).transpose(2, 3) + qk = torch.einsum('bgci, bgcj->bgij', q, k) + + + # multiply by factors + qr = torch.mul(qr, self.f_qr) + kr = torch.mul(kr, self.f_kr) + + stacked_similarity = torch.cat([qk, qr, kr], dim=1) + stacked_similarity = self.bn_similarity(stacked_similarity).view(N * W, 3, self.groups, H, H).sum(dim=1) + #stacked_similarity = self.bn_qr(qr) + self.bn_kr(kr) + self.bn_qk(qk) + # (N, groups, H, H, W) + similarity = F.softmax(stacked_similarity, dim=3) + sv = torch.einsum('bgij,bgcj->bgci', similarity, v) + sve = torch.einsum('bgij,cij->bgci', similarity, v_embedding) + + # multiply by factors + sv = torch.mul(sv, self.f_sv) + sve = torch.mul(sve, self.f_sve) + + stacked_output = torch.cat([sv, sve], dim=-1).view(N * W, self.out_planes * 2, H) + output = self.bn_output(stacked_output).view(N, W, self.out_planes, 2, H).sum(dim=-2) + + if self.width: + output = output.permute(0, 2, 1, 3) + else: + output = output.permute(0, 2, 3, 1) + + if self.stride > 1: + output = self.pooling(output) + + return output + def reset_parameters(self): + self.qkv_transform.weight.data.normal_(0, math.sqrt(1. / self.in_planes)) + #nn.init.uniform_(self.relative, -0.1, 0.1) + nn.init.normal_(self.relative, 0., math.sqrt(1. / self.group_planes)) + +class AxialAttention_wopos(nn.Module): + def __init__(self, in_planes, out_planes, groups=8, kernel_size=56, + stride=1, bias=False, width=False): + assert (in_planes % groups == 0) and (out_planes % groups == 0) + super(AxialAttention_wopos, self).__init__() + self.in_planes = in_planes + self.out_planes = out_planes + self.groups = groups + self.group_planes = out_planes // groups + self.kernel_size = kernel_size + self.stride = stride + self.bias = bias + self.width = width + + # Multi-head self attention + self.qkv_transform = qkv_transform(in_planes, out_planes * 2, kernel_size=1, stride=1, + padding=0, bias=False) + self.bn_qkv = nn.BatchNorm1d(out_planes * 2) + self.bn_similarity = nn.BatchNorm2d(groups ) + + self.bn_output = nn.BatchNorm1d(out_planes * 1) + + if stride > 1: + self.pooling = nn.AvgPool2d(stride, stride=stride) + + self.reset_parameters() + + def forward(self, x): + if self.width: + x = x.permute(0, 2, 1, 3) + else: + x = x.permute(0, 3, 1, 2) # N, W, C, H + N, W, C, H = x.shape + x = x.contiguous().view(N * W, C, H) + + # Transformations + qkv = self.bn_qkv(self.qkv_transform(x)) + q, k, v = torch.split(qkv.reshape(N * W, self.groups, self.group_planes * 2, H), [self.group_planes // 2, self.group_planes // 2, self.group_planes], dim=2) + + qk = torch.einsum('bgci, bgcj->bgij', q, k) + + stacked_similarity = self.bn_similarity(qk).reshape(N * W, 1, self.groups, H, H).sum(dim=1).contiguous() + + similarity = F.softmax(stacked_similarity, dim=3) + sv = torch.einsum('bgij,bgcj->bgci', similarity, v) + + sv = sv.reshape(N*W,self.out_planes * 1, H).contiguous() + output = self.bn_output(sv).reshape(N, W, self.out_planes, 1, H).sum(dim=-2).contiguous() + + + if self.width: + output = output.permute(0, 2, 1, 3) + else: + output = output.permute(0, 2, 3, 1) + + if self.stride > 1: + output = self.pooling(output) + + return output + + def reset_parameters(self): + self.qkv_transform.weight.data.normal_(0, math.sqrt(1. / self.in_planes)) + #nn.init.uniform_(self.relative, -0.1, 0.1) + # nn.init.normal_(self.relative, 0., math.sqrt(1. / self.group_planes)) + +#end of attn definition + +class AxialBlock(nn.Module): + expansion = 2 + + def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, + base_width=64, dilation=1, norm_layer=None, kernel_size=56): + super(AxialBlock, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + width = int(planes * (base_width / 64.)) + # Both self.conv2 and self.downsample layers downsample the input when stride != 1 + self.conv_down = conv1x1(inplanes, width) + self.bn1 = norm_layer(width) + self.hight_block = AxialAttention(width, width, groups=groups, kernel_size=kernel_size) + self.width_block = AxialAttention(width, width, groups=groups, kernel_size=kernel_size, stride=stride, width=True) + self.conv_up = conv1x1(width, planes * self.expansion) + self.bn2 = norm_layer(planes * self.expansion) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv_down(x) + out = self.bn1(out) + out = self.relu(out) + # print(out.shape) + out = self.hight_block(out) + out = self.width_block(out) + out = self.relu(out) + + out = self.conv_up(out) + out = self.bn2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + +class AxialBlock_dynamic(nn.Module): + expansion = 2 + + def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, + base_width=64, dilation=1, norm_layer=None, kernel_size=56): + super(AxialBlock_dynamic, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + width = int(planes * (base_width / 64.)) + # Both self.conv2 and self.downsample layers downsample the input when stride != 1 + self.conv_down = conv1x1(inplanes, width) + self.bn1 = norm_layer(width) + self.hight_block = AxialAttention_dynamic(width, width, groups=groups, kernel_size=kernel_size) + self.width_block = AxialAttention_dynamic(width, width, groups=groups, kernel_size=kernel_size, stride=stride, width=True) + self.conv_up = conv1x1(width, planes * self.expansion) + self.bn2 = norm_layer(planes * self.expansion) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv_down(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.hight_block(out) + out = self.width_block(out) + out = self.relu(out) + + out = self.conv_up(out) + out = self.bn2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + +class AxialBlock_wopos(nn.Module): + expansion = 2 + + def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, + base_width=64, dilation=1, norm_layer=None, kernel_size=56): + super(AxialBlock_wopos, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + # print(kernel_size) + width = int(planes * (base_width / 64.)) + # Both self.conv2 and self.downsample layers downsample the input when stride != 1 + self.conv_down = conv1x1(inplanes, width) + self.conv1 = nn.Conv2d(width, width, kernel_size = 1) + self.bn1 = norm_layer(width) + self.hight_block = AxialAttention_wopos(width, width, groups=groups, kernel_size=kernel_size) + self.width_block = AxialAttention_wopos(width, width, groups=groups, kernel_size=kernel_size, stride=stride, width=True) + self.conv_up = conv1x1(width, planes * self.expansion) + self.bn2 = norm_layer(planes * self.expansion) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + # pdb.set_trace() + + out = self.conv_down(x) + out = self.bn1(out) + out = self.relu(out) + # print(out.shape) + out = self.hight_block(out) + out = self.width_block(out) + + out = self.relu(out) + + out = self.conv_up(out) + out = self.bn2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +#end of block definition + + +class ResAxialAttentionUNet(nn.Module): + + def __init__(self, block, layers, num_classes=2, zero_init_residual=True, + groups=8, width_per_group=64, replace_stride_with_dilation=None, + norm_layer=None, s=0.125, img_size = 128,imgchan = 3): + super(ResAxialAttentionUNet, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + self._norm_layer = norm_layer + + self.inplanes = int(64 * s) + self.dilation = 1 + if replace_stride_with_dilation is None: + replace_stride_with_dilation = [False, False, False] + if len(replace_stride_with_dilation) != 3: + raise ValueError("replace_stride_with_dilation should be None " + "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) + self.groups = groups + self.base_width = width_per_group + self.conv1 = nn.Conv2d(imgchan, self.inplanes, kernel_size=7, stride=2, padding=3, + bias=False) + self.conv2 = nn.Conv2d(self.inplanes, 128, kernel_size=3, stride=1, padding=1, bias=False) + self.conv3 = nn.Conv2d(128, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False) + self.bn1 = norm_layer(self.inplanes) + self.bn2 = norm_layer(128) + self.bn3 = norm_layer(self.inplanes) + self.relu = nn.ReLU(inplace=True) + # self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, int(128 * s), layers[0], kernel_size= (img_size//2)) + self.layer2 = self._make_layer(block, int(256 * s), layers[1], stride=2, kernel_size=(img_size//2), + dilate=replace_stride_with_dilation[0]) + self.layer3 = self._make_layer(block, int(512 * s), layers[2], stride=2, kernel_size=(img_size//4), + dilate=replace_stride_with_dilation[1]) + self.layer4 = self._make_layer(block, int(1024 * s), layers[3], stride=2, kernel_size=(img_size//8), + dilate=replace_stride_with_dilation[2]) + + # Decoder + self.decoder1 = nn.Conv2d(int(1024 *2*s) , int(1024*2*s), kernel_size=3, stride=2, padding=1) + self.decoder2 = nn.Conv2d(int(1024 *2*s) , int(1024*s), kernel_size=3, stride=1, padding=1) + self.decoder3 = nn.Conv2d(int(1024*s), int(512*s), kernel_size=3, stride=1, padding=1) + self.decoder4 = nn.Conv2d(int(512*s) , int(256*s), kernel_size=3, stride=1, padding=1) + self.decoder5 = nn.Conv2d(int(256*s) , int(128*s) , kernel_size=3, stride=1, padding=1) + self.adjust = nn.Conv2d(int(128*s) , num_classes, kernel_size=1, stride=1, padding=0) + self.soft = nn.Softmax(dim=1) + + + def _make_layer(self, block, planes, blocks, kernel_size=56, stride=1, dilate=False): + norm_layer = self._norm_layer + downsample = None + previous_dilation = self.dilation + if dilate: + self.dilation *= stride + stride = 1 + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + conv1x1(self.inplanes, planes * block.expansion, stride), + norm_layer(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample, groups=self.groups, + base_width=self.base_width, dilation=previous_dilation, + norm_layer=norm_layer, kernel_size=kernel_size)) + self.inplanes = planes * block.expansion + if stride != 1: + kernel_size = kernel_size // 2 + + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes, groups=self.groups, + base_width=self.base_width, dilation=self.dilation, + norm_layer=norm_layer, kernel_size=kernel_size)) + + return nn.Sequential(*layers) + + def _forward_impl(self, x): + + # AxialAttention Encoder + # pdb.set_trace() + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.conv2(x) + x = self.bn2(x) + x = self.relu(x) + x = self.conv3(x) + x = self.bn3(x) + x = self.relu(x) + + x1 = self.layer1(x) + + x2 = self.layer2(x1) + # print(x2.shape) + x3 = self.layer3(x2) + # print(x3.shape) + x4 = self.layer4(x3) + + x = F.relu(F.interpolate(self.decoder1(x4), scale_factor=(2,2), mode ='bilinear')) + x = torch.add(x, x4) + x = F.relu(F.interpolate(self.decoder2(x) , scale_factor=(2,2), mode ='bilinear')) + x = torch.add(x, x3) + x = F.relu(F.interpolate(self.decoder3(x) , scale_factor=(2,2), mode ='bilinear')) + x = torch.add(x, x2) + x = F.relu(F.interpolate(self.decoder4(x) , scale_factor=(2,2), mode ='bilinear')) + x = torch.add(x, x1) + x = F.relu(F.interpolate(self.decoder5(x) , scale_factor=(2,2), mode ='bilinear')) + x = self.adjust(F.relu(x)) + # pdb.set_trace() + return x + + def forward(self, x): + return self._forward_impl(x) + +class medt_net(nn.Module): + + def __init__(self, block, block_2, layers, num_classes=2, zero_init_residual=True, + groups=8, width_per_group=64, replace_stride_with_dilation=None, + norm_layer=None, s=0.125, img_size = 128,imgchan = 3): + super(medt_net, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + self._norm_layer = norm_layer + + self.inplanes = int(64 * s) + self.dilation = 1 + if replace_stride_with_dilation is None: + replace_stride_with_dilation = [False, False, False] + if len(replace_stride_with_dilation) != 3: + raise ValueError("replace_stride_with_dilation should be None " + "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) + self.groups = groups + self.base_width = width_per_group + self.conv1 = nn.Conv2d(imgchan, self.inplanes, kernel_size=7, stride=2, padding=3, + bias=False) + self.conv2 = nn.Conv2d(self.inplanes, 128, kernel_size=3, stride=1, padding=1, bias=False) + self.conv3 = nn.Conv2d(128, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False) + self.bn1 = norm_layer(self.inplanes) + self.bn2 = norm_layer(128) + self.bn3 = norm_layer(self.inplanes) + # self.conv1 = nn.Conv2d(1, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False) + self.bn1 = norm_layer(self.inplanes) + self.relu = nn.ReLU(inplace=True) + # self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, int(128 * s), layers[0], kernel_size= (img_size//2)) + self.layer2 = self._make_layer(block, int(256 * s), layers[1], stride=2, kernel_size=(img_size//2), + dilate=replace_stride_with_dilation[0]) + # self.layer3 = self._make_layer(block, int(512 * s), layers[2], stride=2, kernel_size=(img_size//4), + # dilate=replace_stride_with_dilation[1]) + # self.layer4 = self._make_layer(block, int(1024 * s), layers[3], stride=2, kernel_size=(img_size//8), + # dilate=replace_stride_with_dilation[2]) + + # Decoder + # self.decoder1 = nn.Conv2d(int(1024 *2*s) , int(1024*2*s), kernel_size=3, stride=2, padding=1) + # self.decoder2 = nn.Conv2d(int(1024 *2*s) , int(1024*s), kernel_size=3, stride=1, padding=1) + # self.decoder3 = nn.Conv2d(int(1024*s), int(512*s), kernel_size=3, stride=1, padding=1) + self.decoder4 = nn.Conv2d(int(512*s) , int(256*s), kernel_size=3, stride=1, padding=1) + self.decoder5 = nn.Conv2d(int(256*s) , int(128*s) , kernel_size=3, stride=1, padding=1) + self.adjust = nn.Conv2d(int(128*s) , num_classes, kernel_size=1, stride=1, padding=0) + self.soft = nn.Softmax(dim=1) + + + self.conv1_p = nn.Conv2d(imgchan, self.inplanes, kernel_size=7, stride=2, padding=3, + bias=False) + self.conv2_p = nn.Conv2d(self.inplanes,128, kernel_size=3, stride=1, padding=1, + bias=False) + self.conv3_p = nn.Conv2d(128, self.inplanes, kernel_size=3, stride=1, padding=1, + bias=False) + # self.conv1 = nn.Conv2d(1, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False) + self.bn1_p = norm_layer(self.inplanes) + self.bn2_p = norm_layer(128) + self.bn3_p = norm_layer(self.inplanes) + + self.relu_p = nn.ReLU(inplace=True) + + img_size_p = img_size // 4 + + self.layer1_p = self._make_layer(block_2, int(128 * s), layers[0], kernel_size= (img_size_p//2)) + self.layer2_p = self._make_layer(block_2, int(256 * s), layers[1], stride=2, kernel_size=(img_size_p//2), + dilate=replace_stride_with_dilation[0]) + self.layer3_p = self._make_layer(block_2, int(512 * s), layers[2], stride=2, kernel_size=(img_size_p//4), + dilate=replace_stride_with_dilation[1]) + self.layer4_p = self._make_layer(block_2, int(1024 * s), layers[3], stride=2, kernel_size=(img_size_p//8), + dilate=replace_stride_with_dilation[2]) + + # Decoder + self.decoder1_p = nn.Conv2d(int(1024 *2*s) , int(1024*2*s), kernel_size=3, stride=2, padding=1) + self.decoder2_p = nn.Conv2d(int(1024 *2*s) , int(1024*s), kernel_size=3, stride=1, padding=1) + self.decoder3_p = nn.Conv2d(int(1024*s), int(512*s), kernel_size=3, stride=1, padding=1) + self.decoder4_p = nn.Conv2d(int(512*s) , int(256*s), kernel_size=3, stride=1, padding=1) + self.decoder5_p = nn.Conv2d(int(256*s) , int(128*s) , kernel_size=3, stride=1, padding=1) + + self.decoderf = nn.Conv2d(int(128*s) , int(128*s) , kernel_size=3, stride=1, padding=1) + self.adjust_p = nn.Conv2d(int(128*s) , num_classes, kernel_size=1, stride=1, padding=0) + self.soft_p = nn.Softmax(dim=1) + + + def _make_layer(self, block, planes, blocks, kernel_size=56, stride=1, dilate=False): + norm_layer = self._norm_layer + downsample = None + previous_dilation = self.dilation + if dilate: + self.dilation *= stride + stride = 1 + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + conv1x1(self.inplanes, planes * block.expansion, stride), + norm_layer(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample, groups=self.groups, + base_width=self.base_width, dilation=previous_dilation, + norm_layer=norm_layer, kernel_size=kernel_size)) + self.inplanes = planes * block.expansion + if stride != 1: + kernel_size = kernel_size // 2 + + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes, groups=self.groups, + base_width=self.base_width, dilation=self.dilation, + norm_layer=norm_layer, kernel_size=kernel_size)) + + return nn.Sequential(*layers) + + def _forward_impl(self, x): + + xin = x.clone() + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.conv2(x) + x = self.bn2(x) + x = self.relu(x) + x = self.conv3(x) + x = self.bn3(x) + # x = F.max_pool2d(x,2,2) + x = self.relu(x) + + # x = self.maxpool(x) + # pdb.set_trace() + x1 = self.layer1(x) + # print(x1.shape) + x2 = self.layer2(x1) + # print(x2.shape) + # x3 = self.layer3(x2) + # # print(x3.shape) + # x4 = self.layer4(x3) + # # print(x4.shape) + # x = F.relu(F.interpolate(self.decoder1(x4), scale_factor=(2,2), mode ='bilinear')) + # x = torch.add(x, x4) + # x = F.relu(F.interpolate(self.decoder2(x4) , scale_factor=(2,2), mode ='bilinear')) + # x = torch.add(x, x3) + # x = F.relu(F.interpolate(self.decoder3(x3) , scale_factor=(2,2), mode ='bilinear')) + # x = torch.add(x, x2) + x = F.relu(F.interpolate(self.decoder4(x2) , scale_factor=(2,2), mode ='bilinear')) + x = torch.add(x, x1) + x = F.relu(F.interpolate(self.decoder5(x) , scale_factor=(2,2), mode ='bilinear')) + # print(x.shape) + + # end of full image training + + # y_out = torch.ones((1,2,128,128)) + x_loc = x.clone() + # x = F.relu(F.interpolate(self.decoder5(x) , scale_factor=(2,2), mode ='bilinear')) + #start + for i in range(0,4): + for j in range(0,4): + + x_p = xin[:,:,32*i:32*(i+1),32*j:32*(j+1)] + # begin patch wise + x_p = self.conv1_p(x_p) + x_p = self.bn1_p(x_p) + # x = F.max_pool2d(x,2,2) + x_p = self.relu(x_p) + + x_p = self.conv2_p(x_p) + x_p = self.bn2_p(x_p) + # x = F.max_pool2d(x,2,2) + x_p = self.relu(x_p) + x_p = self.conv3_p(x_p) + x_p = self.bn3_p(x_p) + # x = F.max_pool2d(x,2,2) + x_p = self.relu(x_p) + + # x = self.maxpool(x) + # pdb.set_trace() + x1_p = self.layer1_p(x_p) + # print(x1.shape) + x2_p = self.layer2_p(x1_p) + # print(x2.shape) + x3_p = self.layer3_p(x2_p) + # # print(x3.shape) + x4_p = self.layer4_p(x3_p) + + x_p = F.relu(F.interpolate(self.decoder1_p(x4_p), scale_factor=(2,2), mode ='bilinear')) + x_p = torch.add(x_p, x4_p) + x_p = F.relu(F.interpolate(self.decoder2_p(x_p) , scale_factor=(2,2), mode ='bilinear')) + x_p = torch.add(x_p, x3_p) + x_p = F.relu(F.interpolate(self.decoder3_p(x_p) , scale_factor=(2,2), mode ='bilinear')) + x_p = torch.add(x_p, x2_p) + x_p = F.relu(F.interpolate(self.decoder4_p(x_p) , scale_factor=(2,2), mode ='bilinear')) + x_p = torch.add(x_p, x1_p) + x_p = F.relu(F.interpolate(self.decoder5_p(x_p) , scale_factor=(2,2), mode ='bilinear')) + + x_loc[:,:,32*i:32*(i+1),32*j:32*(j+1)] = x_p + + x = torch.add(x,x_loc) + x = F.relu(self.decoderf(x)) + + x = self.adjust(F.relu(x)) + + # pdb.set_trace() + return x + + def forward(self, x, text_dummy): + return self.soft(self._forward_impl(x)),0 + + +def axialunet(pretrained=False, **kwargs): + model = ResAxialAttentionUNet(AxialBlock, [1, 2, 4, 1], s= 0.125, **kwargs) + return model + +def gated(pretrained=False, **kwargs): + model = ResAxialAttentionUNet(AxialBlock_dynamic, [1, 2, 4, 1], s= 0.125, **kwargs) + return model + +def MedT(pretrained=False, **kwargs): + model = medt_net(AxialBlock_dynamic,AxialBlock_wopos, [1, 2, 4, 1], s= 0.125, **kwargs) + return model + +def logo(pretrained=False, **kwargs): + model = medt_net(AxialBlock,AxialBlock, [1, 2, 4, 1], s= 0.125, **kwargs) + return model + +# EOF \ No newline at end of file diff --git a/AllinonSAM/baselines.py b/AllinonSAM/baselines.py new file mode 100644 index 0000000000000000000000000000000000000000..eac534aab2735a3ed9325f7ed58f569d2f655d69 --- /dev/null +++ b/AllinonSAM/baselines.py @@ -0,0 +1,630 @@ +import torch +import torch.nn as nn +from backbones_unet.model.unet import Unet +import torch.nn.functional as F +from utils import * +__all__ = ['UNext'] + +from timm.models.layers import DropPath, to_2tuple, trunc_normal_ +import math + +class UNet(nn.Module): + def __init__(self, in_channels = 3, out_channels = 1, init_features = 32, pretrained=True , back_bone=None): + super().__init__() + if back_bone is None: + self.model = torch.hub.load( + 'mateuszbuda/brain-segmentation-pytorch', 'unet', in_channels=in_channels, out_channels=out_channels, + init_features=init_features, pretrained=pretrained + ) + else: + self.model = UNet( + in_channels= in_channels, + out_channels= out_channels, + backbone=back_bone + ) + + self.soft = nn.Softmax(dim =1) + def forward(self, x, text_dummy): + return self.soft(self.model(x)),0 + + +def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d: + """1x1 convolution""" + return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, bias=False) + +class shiftmlp(nn.Module): + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0., shift_size=5): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.dim = in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.dwconv = DWConv(hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + self.shift_size = shift_size + self.pad = shift_size // 2 + + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + elif isinstance(m, nn.Conv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + fan_out //= m.groups + m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) + if m.bias is not None: + m.bias.data.zero_() + + + def forward(self, x, H, W): + # pdb.set_trace() + B, N, C = x.shape + + xn = x.transpose(1, 2).view(B, C, H, W).contiguous() + xn = F.pad(xn, (self.pad, self.pad, self.pad, self.pad) , "constant", 0) + xs = torch.chunk(xn, self.shift_size, 1) + x_shift = [torch.roll(x_c, shift, 2) for x_c, shift in zip(xs, range(-self.pad, self.pad+1))] + x_cat = torch.cat(x_shift, 1) + x_cat = torch.narrow(x_cat, 2, self.pad, H) + x_s = torch.narrow(x_cat, 3, self.pad, W) + + + x_s = x_s.reshape(B,C,H*W).contiguous() + x_shift_r = x_s.transpose(1,2) + + + x = self.fc1(x_shift_r) + + x = self.dwconv(x, H, W) + x = self.act(x) + x = self.drop(x) + + xn = x.transpose(1, 2).view(B, C, H, W).contiguous() + xn = F.pad(xn, (self.pad, self.pad, self.pad, self.pad) , "constant", 0) + xs = torch.chunk(xn, self.shift_size, 1) + x_shift = [torch.roll(x_c, shift, 3) for x_c, shift in zip(xs, range(-self.pad, self.pad+1))] + x_cat = torch.cat(x_shift, 1) + x_cat = torch.narrow(x_cat, 2, self.pad, H) + x_s = torch.narrow(x_cat, 3, self.pad, W) + x_s = x_s.reshape(B,C,H*W).contiguous() + x_shift_c = x_s.transpose(1,2) + + x = self.fc2(x_shift_c) + x = self.drop(x) + return x + + + +class shiftedBlock(nn.Module): + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, sr_ratio=1): + super().__init__() + + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = shiftmlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + elif isinstance(m, nn.Conv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + fan_out //= m.groups + m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) + if m.bias is not None: + m.bias.data.zero_() + + def forward(self, x, H, W): + + x = x + self.drop_path(self.mlp(self.norm2(x), H, W)) + return x + + +class DWConv(nn.Module): + def __init__(self, dim=768): + super(DWConv, self).__init__() + self.dwconv = nn.Conv2d(dim, dim, 3, 1, 1, bias=True, groups=dim) + + def forward(self, x, H, W): + B, N, C = x.shape + x = x.transpose(1, 2).view(B, C, H, W) + x = self.dwconv(x) + x = x.flatten(2).transpose(1, 2) + + return x + +class OverlapPatchEmbed(nn.Module): + """ Image to Patch Embedding + """ + + def __init__(self, img_size=224, patch_size=7, stride=4, in_chans=3, embed_dim=768): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + + self.img_size = img_size + self.patch_size = patch_size + self.H, self.W = img_size[0] // patch_size[0], img_size[1] // patch_size[1] + self.num_patches = self.H * self.W + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride, + padding=(patch_size[0] // 2, patch_size[1] // 2)) + self.norm = nn.LayerNorm(embed_dim) + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + elif isinstance(m, nn.Conv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + fan_out //= m.groups + m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) + if m.bias is not None: + m.bias.data.zero_() + + def forward(self, x): + x = self.proj(x) + _, _, H, W = x.shape + x = x.flatten(2).transpose(1, 2) + x = self.norm(x) + + return x, H, W + + +class UNext(nn.Module): + + ## Conv 3 + MLP 2 + shifted MLP + + def __init__(self, num_classes, input_channels=3, deep_supervision=False,img_size=256, patch_size=16, in_chans=3, embed_dims=[ 128, 160, 256], + num_heads=[1, 2, 4, 8], mlp_ratios=[4, 4, 4, 4], qkv_bias=False, qk_scale=None, drop_rate=0., + attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm, + depths=[1, 1, 1], sr_ratios=[8, 4, 2, 1], **kwargs): + super().__init__() + + self.encoder1 = nn.Conv2d(3, 16, 3, stride=1, padding=1) + self.encoder2 = nn.Conv2d(16, 32, 3, stride=1, padding=1) + self.encoder3 = nn.Conv2d(32, 128, 3, stride=1, padding=1) + + self.ebn1 = nn.BatchNorm2d(16) + self.ebn2 = nn.BatchNorm2d(32) + self.ebn3 = nn.BatchNorm2d(128) + + self.norm3 = norm_layer(embed_dims[1]) + self.norm4 = norm_layer(embed_dims[2]) + + self.dnorm3 = norm_layer(160) + self.dnorm4 = norm_layer(128) + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] + + self.block1 = nn.ModuleList([shiftedBlock( + dim=embed_dims[1], num_heads=num_heads[0], mlp_ratio=1, qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[0], norm_layer=norm_layer, + sr_ratio=sr_ratios[0])]) + + self.block2 = nn.ModuleList([shiftedBlock( + dim=embed_dims[2], num_heads=num_heads[0], mlp_ratio=1, qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[1], norm_layer=norm_layer, + sr_ratio=sr_ratios[0])]) + + self.dblock1 = nn.ModuleList([shiftedBlock( + dim=embed_dims[1], num_heads=num_heads[0], mlp_ratio=1, qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[0], norm_layer=norm_layer, + sr_ratio=sr_ratios[0])]) + + self.dblock2 = nn.ModuleList([shiftedBlock( + dim=embed_dims[0], num_heads=num_heads[0], mlp_ratio=1, qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[1], norm_layer=norm_layer, + sr_ratio=sr_ratios[0])]) + + self.patch_embed3 = OverlapPatchEmbed(img_size=img_size // 4, patch_size=3, stride=2, in_chans=embed_dims[0], + embed_dim=embed_dims[1]) + self.patch_embed4 = OverlapPatchEmbed(img_size=img_size // 8, patch_size=3, stride=2, in_chans=embed_dims[1], + embed_dim=embed_dims[2]) + + self.decoder1 = nn.Conv2d(256, 160, 3, stride=1,padding=1) + self.decoder2 = nn.Conv2d(160, 128, 3, stride=1, padding=1) + self.decoder3 = nn.Conv2d(128, 32, 3, stride=1, padding=1) + self.decoder4 = nn.Conv2d(32, 16, 3, stride=1, padding=1) + self.decoder5 = nn.Conv2d(16, 16, 3, stride=1, padding=1) + + self.dbn1 = nn.BatchNorm2d(160) + self.dbn2 = nn.BatchNorm2d(128) + self.dbn3 = nn.BatchNorm2d(32) + self.dbn4 = nn.BatchNorm2d(16) + + self.final = nn.Conv2d(16, num_classes, kernel_size=1) + + self.soft = nn.Softmax(dim =1) + + def forward(self, x, text_dummy): + + B = x.shape[0] + ### Encoder + ### Conv Stage + + ### Stage 1 + out = F.relu(F.max_pool2d(self.ebn1(self.encoder1(x)),2,2)) + t1 = out + ### Stage 2 + out = F.relu(F.max_pool2d(self.ebn2(self.encoder2(out)),2,2)) + t2 = out + ### Stage 3 + out = F.relu(F.max_pool2d(self.ebn3(self.encoder3(out)),2,2)) + t3 = out + + ### Tokenized MLP Stage + ### Stage 4 + + out,H,W = self.patch_embed3(out) + for i, blk in enumerate(self.block1): + out = blk(out, H, W) + out = self.norm3(out) + out = out.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() + t4 = out + + ### Bottleneck + + out ,H,W= self.patch_embed4(out) + for i, blk in enumerate(self.block2): + out = blk(out, H, W) + out = self.norm4(out) + out = out.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() + + ### Stage 4 + + out = F.relu(F.interpolate(self.dbn1(self.decoder1(out)),scale_factor=(2,2),mode ='bilinear')) + + out = torch.add(out,t4) + _,_,H,W = out.shape + out = out.flatten(2).transpose(1,2) + for i, blk in enumerate(self.dblock1): + out = blk(out, H, W) + + ### Stage 3 + + out = self.dnorm3(out) + out = out.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() + out = F.relu(F.interpolate(self.dbn2(self.decoder2(out)),scale_factor=(2,2),mode ='bilinear')) + out = torch.add(out,t3) + _,_,H,W = out.shape + out = out.flatten(2).transpose(1,2) + + for i, blk in enumerate(self.dblock2): + out = blk(out, H, W) + + out = self.dnorm4(out) + out = out.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() + + out = F.relu(F.interpolate(self.dbn3(self.decoder3(out)),scale_factor=(2,2),mode ='bilinear')) + out = torch.add(out,t2) + out = F.relu(F.interpolate(self.dbn4(self.decoder4(out)),scale_factor=(2,2),mode ='bilinear')) + out = torch.add(out,t1) + out = F.relu(F.interpolate(self.decoder5(out),scale_factor=(2,2),mode ='bilinear')) + + return self.soft(self.final(out)),0 + + +class UNext_S(nn.Module): + + ## Conv 3 + MLP 2 + shifted MLP w less parameters + + def __init__(self, num_classes, input_channels=3, deep_supervision=False,img_size=256, patch_size=16, in_chans=3, embed_dims=[32, 64, 128, 512], + num_heads=[1, 2, 4, 8], mlp_ratios=[4, 4, 4, 4], qkv_bias=False, qk_scale=None, drop_rate=0., + attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm, + depths=[1, 1, 1], sr_ratios=[8, 4, 2, 1], **kwargs): + super().__init__() + + self.encoder1 = nn.Conv2d(3, 8, 3, stride=1, padding=1) + self.encoder2 = nn.Conv2d(8, 16, 3, stride=1, padding=1) + self.encoder3 = nn.Conv2d(16, 32, 3, stride=1, padding=1) + + self.ebn1 = nn.BatchNorm2d(8) + self.ebn2 = nn.BatchNorm2d(16) + self.ebn3 = nn.BatchNorm2d(32) + + self.norm3 = norm_layer(embed_dims[1]) + self.norm4 = norm_layer(embed_dims[2]) + + self.dnorm3 = norm_layer(64) + self.dnorm4 = norm_layer(32) + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] + + self.block1 = nn.ModuleList([shiftedBlock( + dim=embed_dims[1], num_heads=num_heads[0], mlp_ratio=1, qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[0], norm_layer=norm_layer, + sr_ratio=sr_ratios[0])]) + + self.block2 = nn.ModuleList([shiftedBlock( + dim=embed_dims[2], num_heads=num_heads[0], mlp_ratio=1, qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[1], norm_layer=norm_layer, + sr_ratio=sr_ratios[0])]) + + self.dblock1 = nn.ModuleList([shiftedBlock( + dim=embed_dims[1], num_heads=num_heads[0], mlp_ratio=1, qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[0], norm_layer=norm_layer, + sr_ratio=sr_ratios[0])]) + + self.dblock2 = nn.ModuleList([shiftedBlock( + dim=embed_dims[0], num_heads=num_heads[0], mlp_ratio=1, qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[1], norm_layer=norm_layer, + sr_ratio=sr_ratios[0])]) + + self.patch_embed3 = OverlapPatchEmbed(img_size=img_size // 4, patch_size=3, stride=2, in_chans=embed_dims[0], + embed_dim=embed_dims[1]) + self.patch_embed4 = OverlapPatchEmbed(img_size=img_size // 8, patch_size=3, stride=2, in_chans=embed_dims[1], + embed_dim=embed_dims[2]) + + self.decoder1 = nn.Conv2d(128, 64, 3, stride=1,padding=1) + self.decoder2 = nn.Conv2d(64, 32, 3, stride=1, padding=1) + self.decoder3 = nn.Conv2d(32, 16, 3, stride=1, padding=1) + self.decoder4 = nn.Conv2d(16, 8, 3, stride=1, padding=1) + self.decoder5 = nn.Conv2d(8, 8, 3, stride=1, padding=1) + + self.dbn1 = nn.BatchNorm2d(64) + self.dbn2 = nn.BatchNorm2d(32) + self.dbn3 = nn.BatchNorm2d(16) + self.dbn4 = nn.BatchNorm2d(8) + + self.final = nn.Conv2d(8, num_classes, kernel_size=1) + + self.soft = nn.Softmax(dim =1) + + def forward(self, x, text_dummy): + + B = x.shape[0] + ### Encoder + ### Conv Stage + + ### Stage 1 + out = F.relu(F.max_pool2d(self.ebn1(self.encoder1(x)),2,2)) + t1 = out + ### Stage 2 + out = F.relu(F.max_pool2d(self.ebn2(self.encoder2(out)),2,2)) + t2 = out + ### Stage 3 + out = F.relu(F.max_pool2d(self.ebn3(self.encoder3(out)),2,2)) + t3 = out + + ### Tokenized MLP Stage + ### Stage 4 + + out,H,W = self.patch_embed3(out) + for i, blk in enumerate(self.block1): + out = blk(out, H, W) + out = self.norm3(out) + out = out.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() + t4 = out + + ### Bottleneck + + out ,H,W= self.patch_embed4(out) + for i, blk in enumerate(self.block2): + out = blk(out, H, W) + out = self.norm4(out) + out = out.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() + + ### Stage 4 + + out = F.relu(F.interpolate(self.dbn1(self.decoder1(out)),scale_factor=(2,2),mode ='bilinear')) + + out = torch.add(out,t4) + _,_,H,W = out.shape + out = out.flatten(2).transpose(1,2) + for i, blk in enumerate(self.dblock1): + out = blk(out, H, W) + + ### Stage 3 + + out = self.dnorm3(out) + out = out.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() + out = F.relu(F.interpolate(self.dbn2(self.decoder2(out)),scale_factor=(2,2),mode ='bilinear')) + out = torch.add(out,t3) + _,_,H,W = out.shape + out = out.flatten(2).transpose(1,2) + + for i, blk in enumerate(self.dblock2): + out = blk(out, H, W) + + out = self.dnorm4(out) + out = out.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() + + out = F.relu(F.interpolate(self.dbn3(self.decoder3(out)),scale_factor=(2,2),mode ='bilinear')) + out = torch.add(out,t2) + out = F.relu(F.interpolate(self.dbn4(self.decoder4(out)),scale_factor=(2,2),mode ='bilinear')) + out = torch.add(out,t1) + out = F.relu(F.interpolate(self.decoder5(out),scale_factor=(2,2),mode ='bilinear')) + + return self.final(out) + + +class medt_net(nn.Module): + + def __init__(self, block, block_2, layers, num_classes=2, zero_init_residual=True, + groups=8, width_per_group=64, replace_stride_with_dilation=None, + norm_layer=None, s=0.125, img_size = 128,imgchan = 3): + super(medt_net, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + self._norm_layer = norm_layer + + self.inplanes = int(64 * s) + self.dilation = 1 + if replace_stride_with_dilation is None: + replace_stride_with_dilation = [False, False, False] + if len(replace_stride_with_dilation) != 3: + raise ValueError("replace_stride_with_dilation should be None " + "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) + self.groups = groups + self.base_width = width_per_group + self.conv1 = nn.Conv2d(imgchan, self.inplanes, kernel_size=7, stride=2, padding=3, + bias=False) + self.conv2 = nn.Conv2d(self.inplanes, 128, kernel_size=3, stride=1, padding=1, bias=False) + self.conv3 = nn.Conv2d(128, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False) + self.bn1 = norm_layer(self.inplanes) + self.bn2 = norm_layer(128) + self.bn3 = norm_layer(self.inplanes) + self.bn1 = norm_layer(self.inplanes) + self.relu = nn.ReLU(inplace=True) + self.layer1 = self._make_layer(block, int(128 * s), layers[0], kernel_size= (img_size//2)) + self.layer2 = self._make_layer(block, int(256 * s), layers[1], stride=2, kernel_size=(img_size//2), + dilate=replace_stride_with_dilation[0]) + + self.decoder4 = nn.Conv2d(int(512*s) , int(256*s), kernel_size=3, stride=1, padding=1) + self.decoder5 = nn.Conv2d(int(256*s) , int(128*s) , kernel_size=3, stride=1, padding=1) + self.adjust = nn.Conv2d(int(128*s) , num_classes, kernel_size=1, stride=1, padding=0) + self.soft = nn.Softmax(dim=1) + + + self.conv1_p = nn.Conv2d(imgchan, self.inplanes, kernel_size=7, stride=2, padding=3, + bias=False) + self.conv2_p = nn.Conv2d(self.inplanes,128, kernel_size=3, stride=1, padding=1, + bias=False) + self.conv3_p = nn.Conv2d(128, self.inplanes, kernel_size=3, stride=1, padding=1, + bias=False) + self.bn1_p = norm_layer(self.inplanes) + self.bn2_p = norm_layer(128) + self.bn3_p = norm_layer(self.inplanes) + + self.relu_p = nn.ReLU(inplace=True) + + img_size_p = img_size // 4 + + self.layer1_p = self._make_layer(block_2, int(128 * s), layers[0], kernel_size= (img_size_p//2)) + self.layer2_p = self._make_layer(block_2, int(256 * s), layers[1], stride=2, kernel_size=(img_size_p//2), + dilate=replace_stride_with_dilation[0]) + self.layer3_p = self._make_layer(block_2, int(512 * s), layers[2], stride=2, kernel_size=(img_size_p//4), + dilate=replace_stride_with_dilation[1]) + self.layer4_p = self._make_layer(block_2, int(1024 * s), layers[3], stride=2, kernel_size=(img_size_p//8), + dilate=replace_stride_with_dilation[2]) + + # Decoder + self.decoder1_p = nn.Conv2d(int(1024 *2*s) , int(1024*2*s), kernel_size=3, stride=2, padding=1) + self.decoder2_p = nn.Conv2d(int(1024 *2*s) , int(1024*s), kernel_size=3, stride=1, padding=1) + self.decoder3_p = nn.Conv2d(int(1024*s), int(512*s), kernel_size=3, stride=1, padding=1) + self.decoder4_p = nn.Conv2d(int(512*s) , int(256*s), kernel_size=3, stride=1, padding=1) + self.decoder5_p = nn.Conv2d(int(256*s) , int(128*s) , kernel_size=3, stride=1, padding=1) + + self.decoderf = nn.Conv2d(int(128*s) , int(128*s) , kernel_size=3, stride=1, padding=1) + self.adjust_p = nn.Conv2d(int(128*s) , num_classes, kernel_size=1, stride=1, padding=0) + self.soft_p = nn.Softmax(dim=1) + + + def _make_layer(self, block, planes, blocks, kernel_size=56, stride=1, dilate=False): + norm_layer = self._norm_layer + downsample = None + previous_dilation = self.dilation + if dilate: + self.dilation *= stride + stride = 1 + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + conv1x1(self.inplanes, planes * block.expansion, stride), + norm_layer(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample, groups=self.groups, + base_width=self.base_width, dilation=previous_dilation, + norm_layer=norm_layer, kernel_size=kernel_size)) + self.inplanes = planes * block.expansion + if stride != 1: + kernel_size = kernel_size // 2 + + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes, groups=self.groups, + base_width=self.base_width, dilation=self.dilation, + norm_layer=norm_layer, kernel_size=kernel_size)) + + return nn.Sequential(*layers) + + def _forward_impl(self, x): + + xin = x.clone() + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.conv2(x) + x = self.bn2(x) + x = self.relu(x) + x = self.conv3(x) + x = self.bn3(x) + x = self.relu(x) + + x1 = self.layer1(x) + x2 = self.layer2(x1) + + x = F.relu(F.interpolate(self.decoder4(x2) , scale_factor=(2,2), mode ='bilinear')) + x = torch.add(x, x1) + x = F.relu(F.interpolate(self.decoder5(x) , scale_factor=(2,2), mode ='bilinear')) + + # end of full image training + + x_loc = x.clone() + #start + for i in range(0,4): + for j in range(0,4): + + x_p = xin[:,:,32*i:32*(i+1),32*j:32*(j+1)] + # begin patch wise + x_p = self.conv1_p(x_p) + x_p = self.bn1_p(x_p) + x_p = self.relu(x_p) + + x_p = self.conv2_p(x_p) + x_p = self.bn2_p(x_p) + x_p = self.relu(x_p) + x_p = self.conv3_p(x_p) + x_p = self.bn3_p(x_p) + x_p = self.relu(x_p) + + x1_p = self.layer1_p(x_p) + x2_p = self.layer2_p(x1_p) + x3_p = self.layer3_p(x2_p) + x4_p = self.layer4_p(x3_p) + + x_p = F.relu(F.interpolate(self.decoder1_p(x4_p), scale_factor=(2,2), mode ='bilinear')) + x_p = torch.add(x_p, x4_p) + x_p = F.relu(F.interpolate(self.decoder2_p(x_p) , scale_factor=(2,2), mode ='bilinear')) + x_p = torch.add(x_p, x3_p) + x_p = F.relu(F.interpolate(self.decoder3_p(x_p) , scale_factor=(2,2), mode ='bilinear')) + x_p = torch.add(x_p, x2_p) + x_p = F.relu(F.interpolate(self.decoder4_p(x_p) , scale_factor=(2,2), mode ='bilinear')) + x_p = torch.add(x_p, x1_p) + x_p = F.relu(F.interpolate(self.decoder5_p(x_p) , scale_factor=(2,2), mode ='bilinear')) + + x_loc[:,:,32*i:32*(i+1),32*j:32*(j+1)] = x_p + + x = torch.add(x,x_loc) + x = F.relu(self.decoderf(x)) + + x = self.adjust(F.relu(x)) + + return x + + def forward(self, x, text_dummy): + return self._forward_impl(x) diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_0_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_0_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..cacb89de7294c2a56d12d0789c10ab902c983ef7 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_0_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_0_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_0_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..03adf6e87c75bd3a1a6bc8b54bbefa2f0927b7ac Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_0_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_0_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_0_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..72f73623ceeef5d75b89de5d50c44826c9a6608f Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_0_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_0_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_0_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..f79520a6943dbcbb38fb3cdef13eef45bb37a3d8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_0_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_100_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_100_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..03adf6e87c75bd3a1a6bc8b54bbefa2f0927b7ac Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_100_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_100_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_100_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..f51d76b96c469c2a2b82de5616afc97c1f7d4d3d Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_100_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_100_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_100_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..f79520a6943dbcbb38fb3cdef13eef45bb37a3d8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_100_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_100_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_100_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..133dbe6bfc1dbfff6e6c678433f9e052c74d2915 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_100_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_10_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_10_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..72f73623ceeef5d75b89de5d50c44826c9a6608f Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_10_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_10_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_10_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..133dbe6bfc1dbfff6e6c678433f9e052c74d2915 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_10_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_10_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_10_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..ca1525dce53a1e9ff3665246fe7cec8be7b77eef Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_10_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_10_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_10_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..71b4eee587bd622044a39c05f6a282334e693355 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_10_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_110_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_110_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..133dbe6bfc1dbfff6e6c678433f9e052c74d2915 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_110_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_110_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_110_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..f51d76b96c469c2a2b82de5616afc97c1f7d4d3d Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_110_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_110_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_110_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..304e1ed3151fed1e0fe680597aab9ad6415a87c8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_110_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_110_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_110_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..72f73623ceeef5d75b89de5d50c44826c9a6608f Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_110_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_120_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_120_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..f79520a6943dbcbb38fb3cdef13eef45bb37a3d8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_120_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_120_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_120_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..71b4eee587bd622044a39c05f6a282334e693355 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_120_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_120_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_120_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..304e1ed3151fed1e0fe680597aab9ad6415a87c8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_120_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_120_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_120_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..03adf6e87c75bd3a1a6bc8b54bbefa2f0927b7ac Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_120_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_130_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_130_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..f51d76b96c469c2a2b82de5616afc97c1f7d4d3d Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_130_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_130_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_130_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..f79520a6943dbcbb38fb3cdef13eef45bb37a3d8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_130_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_130_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_130_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..03adf6e87c75bd3a1a6bc8b54bbefa2f0927b7ac Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_130_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_130_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_130_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..e081a106b1e68dc4c9731a636fa4a76c8d044d80 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_130_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_140_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_140_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..133dbe6bfc1dbfff6e6c678433f9e052c74d2915 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_140_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_140_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_140_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..72f73623ceeef5d75b89de5d50c44826c9a6608f Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_140_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_140_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_140_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..cacb89de7294c2a56d12d0789c10ab902c983ef7 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_140_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_140_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_140_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..304e1ed3151fed1e0fe680597aab9ad6415a87c8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_140_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_150_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_150_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..ca1525dce53a1e9ff3665246fe7cec8be7b77eef Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_150_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_150_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_150_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..03adf6e87c75bd3a1a6bc8b54bbefa2f0927b7ac Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_150_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_150_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_150_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..133dbe6bfc1dbfff6e6c678433f9e052c74d2915 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_150_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_150_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_150_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..304e1ed3151fed1e0fe680597aab9ad6415a87c8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_150_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_160_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_160_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..f79520a6943dbcbb38fb3cdef13eef45bb37a3d8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_160_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_160_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_160_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..cacb89de7294c2a56d12d0789c10ab902c983ef7 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_160_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_160_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_160_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..03adf6e87c75bd3a1a6bc8b54bbefa2f0927b7ac Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_160_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_160_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_160_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..e081a106b1e68dc4c9731a636fa4a76c8d044d80 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_160_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_170_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_170_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..f79520a6943dbcbb38fb3cdef13eef45bb37a3d8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_170_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_170_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_170_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..71b4eee587bd622044a39c05f6a282334e693355 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_170_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_170_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_170_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..304e1ed3151fed1e0fe680597aab9ad6415a87c8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_170_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_170_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_170_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..e081a106b1e68dc4c9731a636fa4a76c8d044d80 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_170_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_180_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_180_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..72f73623ceeef5d75b89de5d50c44826c9a6608f Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_180_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_180_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_180_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..133dbe6bfc1dbfff6e6c678433f9e052c74d2915 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_180_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_180_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_180_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..ca1525dce53a1e9ff3665246fe7cec8be7b77eef Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_180_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_180_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_180_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..f79520a6943dbcbb38fb3cdef13eef45bb37a3d8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_180_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_190_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_190_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..304e1ed3151fed1e0fe680597aab9ad6415a87c8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_190_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_190_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_190_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..ca1525dce53a1e9ff3665246fe7cec8be7b77eef Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_190_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_190_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_190_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..133dbe6bfc1dbfff6e6c678433f9e052c74d2915 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_190_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_190_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_190_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..f79520a6943dbcbb38fb3cdef13eef45bb37a3d8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_190_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_200_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_200_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..03adf6e87c75bd3a1a6bc8b54bbefa2f0927b7ac Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_200_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_200_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_200_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..e081a106b1e68dc4c9731a636fa4a76c8d044d80 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_200_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_200_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_200_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..304e1ed3151fed1e0fe680597aab9ad6415a87c8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_200_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_200_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_200_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..72f73623ceeef5d75b89de5d50c44826c9a6608f Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_200_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_20_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_20_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..71b4eee587bd622044a39c05f6a282334e693355 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_20_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_20_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_20_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..cacb89de7294c2a56d12d0789c10ab902c983ef7 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_20_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_20_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_20_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..f79520a6943dbcbb38fb3cdef13eef45bb37a3d8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_20_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_20_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_20_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..133dbe6bfc1dbfff6e6c678433f9e052c74d2915 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_20_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_210_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_210_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..f79520a6943dbcbb38fb3cdef13eef45bb37a3d8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_210_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_210_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_210_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..e081a106b1e68dc4c9731a636fa4a76c8d044d80 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_210_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_210_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_210_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..03adf6e87c75bd3a1a6bc8b54bbefa2f0927b7ac Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_210_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_210_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_210_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..71b4eee587bd622044a39c05f6a282334e693355 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_210_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_220_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_220_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..cacb89de7294c2a56d12d0789c10ab902c983ef7 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_220_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_220_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_220_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..133dbe6bfc1dbfff6e6c678433f9e052c74d2915 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_220_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_220_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_220_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..f51d76b96c469c2a2b82de5616afc97c1f7d4d3d Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_220_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_220_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_220_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..71b4eee587bd622044a39c05f6a282334e693355 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_220_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_230_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_230_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..ca1525dce53a1e9ff3665246fe7cec8be7b77eef Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_230_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_230_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_230_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..71b4eee587bd622044a39c05f6a282334e693355 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_230_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_230_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_230_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..72f73623ceeef5d75b89de5d50c44826c9a6608f Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_230_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_230_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_230_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..304e1ed3151fed1e0fe680597aab9ad6415a87c8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_230_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_240_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_240_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..133dbe6bfc1dbfff6e6c678433f9e052c74d2915 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_240_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_240_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_240_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..cacb89de7294c2a56d12d0789c10ab902c983ef7 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_240_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_240_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_240_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..03adf6e87c75bd3a1a6bc8b54bbefa2f0927b7ac Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_240_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_240_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_240_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..304e1ed3151fed1e0fe680597aab9ad6415a87c8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_240_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_250_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_250_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..71b4eee587bd622044a39c05f6a282334e693355 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_250_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_250_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_250_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..ca1525dce53a1e9ff3665246fe7cec8be7b77eef Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_250_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_250_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_250_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..f79520a6943dbcbb38fb3cdef13eef45bb37a3d8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_250_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_250_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_250_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..72f73623ceeef5d75b89de5d50c44826c9a6608f Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_250_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_260_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_260_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..f79520a6943dbcbb38fb3cdef13eef45bb37a3d8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_260_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_260_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_260_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..e081a106b1e68dc4c9731a636fa4a76c8d044d80 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_260_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_260_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_260_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..cacb89de7294c2a56d12d0789c10ab902c983ef7 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_260_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_260_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_260_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..ca1525dce53a1e9ff3665246fe7cec8be7b77eef Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_260_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_270_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_270_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..03adf6e87c75bd3a1a6bc8b54bbefa2f0927b7ac Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_270_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_270_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_270_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..133dbe6bfc1dbfff6e6c678433f9e052c74d2915 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_270_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_270_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_270_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..ca1525dce53a1e9ff3665246fe7cec8be7b77eef Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_270_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_270_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_270_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..f79520a6943dbcbb38fb3cdef13eef45bb37a3d8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_270_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_280_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_280_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..71b4eee587bd622044a39c05f6a282334e693355 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_280_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_280_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_280_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..304e1ed3151fed1e0fe680597aab9ad6415a87c8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_280_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_280_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_280_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..ca1525dce53a1e9ff3665246fe7cec8be7b77eef Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_280_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_280_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_280_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..f79520a6943dbcbb38fb3cdef13eef45bb37a3d8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_280_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_290_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_290_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..f79520a6943dbcbb38fb3cdef13eef45bb37a3d8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_290_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_290_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_290_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..71b4eee587bd622044a39c05f6a282334e693355 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_290_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_290_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_290_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..f51d76b96c469c2a2b82de5616afc97c1f7d4d3d Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_290_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_290_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_290_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..72f73623ceeef5d75b89de5d50c44826c9a6608f Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_290_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_300_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_300_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..72f73623ceeef5d75b89de5d50c44826c9a6608f Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_300_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_300_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_300_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..03adf6e87c75bd3a1a6bc8b54bbefa2f0927b7ac Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_300_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_300_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_300_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..e081a106b1e68dc4c9731a636fa4a76c8d044d80 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_300_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_300_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_300_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..cacb89de7294c2a56d12d0789c10ab902c983ef7 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_300_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_30_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_30_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..133dbe6bfc1dbfff6e6c678433f9e052c74d2915 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_30_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_30_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_30_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..71b4eee587bd622044a39c05f6a282334e693355 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_30_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_30_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_30_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..ca1525dce53a1e9ff3665246fe7cec8be7b77eef Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_30_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_30_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_30_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..e081a106b1e68dc4c9731a636fa4a76c8d044d80 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_30_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_310_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_310_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..f79520a6943dbcbb38fb3cdef13eef45bb37a3d8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_310_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_310_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_310_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..03adf6e87c75bd3a1a6bc8b54bbefa2f0927b7ac Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_310_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_310_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_310_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..72f73623ceeef5d75b89de5d50c44826c9a6608f Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_310_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_310_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_310_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..133dbe6bfc1dbfff6e6c678433f9e052c74d2915 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_310_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_320_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_320_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..f79520a6943dbcbb38fb3cdef13eef45bb37a3d8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_320_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_320_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_320_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..304e1ed3151fed1e0fe680597aab9ad6415a87c8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_320_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_320_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_320_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..ca1525dce53a1e9ff3665246fe7cec8be7b77eef Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_320_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_320_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_320_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..cacb89de7294c2a56d12d0789c10ab902c983ef7 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_320_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_330_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_330_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..71b4eee587bd622044a39c05f6a282334e693355 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_330_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_330_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_330_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..cacb89de7294c2a56d12d0789c10ab902c983ef7 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_330_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_330_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_330_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..304e1ed3151fed1e0fe680597aab9ad6415a87c8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_330_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_330_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_330_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..ca1525dce53a1e9ff3665246fe7cec8be7b77eef Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_330_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_340_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_340_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..f79520a6943dbcbb38fb3cdef13eef45bb37a3d8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_340_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_340_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_340_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..304e1ed3151fed1e0fe680597aab9ad6415a87c8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_340_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_340_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_340_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..71b4eee587bd622044a39c05f6a282334e693355 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_340_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_340_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_340_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..ca1525dce53a1e9ff3665246fe7cec8be7b77eef Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_340_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_350_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_350_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..304e1ed3151fed1e0fe680597aab9ad6415a87c8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_350_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_350_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_350_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..f51d76b96c469c2a2b82de5616afc97c1f7d4d3d Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_350_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_350_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_350_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..e081a106b1e68dc4c9731a636fa4a76c8d044d80 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_350_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_350_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_350_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..03adf6e87c75bd3a1a6bc8b54bbefa2f0927b7ac Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_350_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_360_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_360_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..71b4eee587bd622044a39c05f6a282334e693355 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_360_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_360_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_360_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..e081a106b1e68dc4c9731a636fa4a76c8d044d80 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_360_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_360_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_360_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..f79520a6943dbcbb38fb3cdef13eef45bb37a3d8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_360_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_360_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_360_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..72f73623ceeef5d75b89de5d50c44826c9a6608f Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_360_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_370_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_370_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..304e1ed3151fed1e0fe680597aab9ad6415a87c8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_370_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_370_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_370_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..f79520a6943dbcbb38fb3cdef13eef45bb37a3d8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_370_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_370_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_370_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..71b4eee587bd622044a39c05f6a282334e693355 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_370_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_370_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_370_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..ca1525dce53a1e9ff3665246fe7cec8be7b77eef Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_370_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_380_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_380_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..133dbe6bfc1dbfff6e6c678433f9e052c74d2915 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_380_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_380_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_380_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..cacb89de7294c2a56d12d0789c10ab902c983ef7 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_380_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_380_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_380_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..71b4eee587bd622044a39c05f6a282334e693355 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_380_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_380_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_380_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..304e1ed3151fed1e0fe680597aab9ad6415a87c8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_380_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_390_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_390_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..f79520a6943dbcbb38fb3cdef13eef45bb37a3d8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_390_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_390_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_390_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..304e1ed3151fed1e0fe680597aab9ad6415a87c8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_390_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_390_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_390_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..e081a106b1e68dc4c9731a636fa4a76c8d044d80 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_390_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_390_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_390_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..ca1525dce53a1e9ff3665246fe7cec8be7b77eef Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_390_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_400_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_400_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..f79520a6943dbcbb38fb3cdef13eef45bb37a3d8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_400_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_400_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_400_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..ca1525dce53a1e9ff3665246fe7cec8be7b77eef Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_400_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_400_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_400_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..133dbe6bfc1dbfff6e6c678433f9e052c74d2915 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_400_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_400_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_400_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..71b4eee587bd622044a39c05f6a282334e693355 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_400_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_40_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_40_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..e081a106b1e68dc4c9731a636fa4a76c8d044d80 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_40_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_40_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_40_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..f51d76b96c469c2a2b82de5616afc97c1f7d4d3d Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_40_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_40_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_40_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..03adf6e87c75bd3a1a6bc8b54bbefa2f0927b7ac Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_40_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_40_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_40_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..cacb89de7294c2a56d12d0789c10ab902c983ef7 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_40_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_410_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_410_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..ca1525dce53a1e9ff3665246fe7cec8be7b77eef Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_410_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_410_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_410_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..133dbe6bfc1dbfff6e6c678433f9e052c74d2915 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_410_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_410_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_410_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..f51d76b96c469c2a2b82de5616afc97c1f7d4d3d Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_410_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_410_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_410_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..304e1ed3151fed1e0fe680597aab9ad6415a87c8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_410_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_420_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_420_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..cacb89de7294c2a56d12d0789c10ab902c983ef7 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_420_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_420_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_420_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..ca1525dce53a1e9ff3665246fe7cec8be7b77eef Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_420_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_420_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_420_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..133dbe6bfc1dbfff6e6c678433f9e052c74d2915 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_420_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_420_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_420_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..304e1ed3151fed1e0fe680597aab9ad6415a87c8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_420_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_430_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_430_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..03adf6e87c75bd3a1a6bc8b54bbefa2f0927b7ac Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_430_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_430_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_430_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..ca1525dce53a1e9ff3665246fe7cec8be7b77eef Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_430_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_430_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_430_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..f79520a6943dbcbb38fb3cdef13eef45bb37a3d8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_430_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_430_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_430_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..133dbe6bfc1dbfff6e6c678433f9e052c74d2915 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_430_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_440_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_440_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..03adf6e87c75bd3a1a6bc8b54bbefa2f0927b7ac Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_440_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_440_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_440_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..e081a106b1e68dc4c9731a636fa4a76c8d044d80 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_440_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_440_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_440_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..71b4eee587bd622044a39c05f6a282334e693355 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_440_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_440_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_440_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..ca1525dce53a1e9ff3665246fe7cec8be7b77eef Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_440_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_450_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_450_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..71b4eee587bd622044a39c05f6a282334e693355 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_450_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_450_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_450_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..133dbe6bfc1dbfff6e6c678433f9e052c74d2915 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_450_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_450_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_450_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..f79520a6943dbcbb38fb3cdef13eef45bb37a3d8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_450_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_450_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_450_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..ca1525dce53a1e9ff3665246fe7cec8be7b77eef Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_450_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_460_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_460_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..72f73623ceeef5d75b89de5d50c44826c9a6608f Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_460_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_460_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_460_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..71b4eee587bd622044a39c05f6a282334e693355 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_460_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_460_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_460_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..ca1525dce53a1e9ff3665246fe7cec8be7b77eef Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_460_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_460_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_460_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..f79520a6943dbcbb38fb3cdef13eef45bb37a3d8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_460_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_470_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_470_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..03adf6e87c75bd3a1a6bc8b54bbefa2f0927b7ac Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_470_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_470_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_470_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..71b4eee587bd622044a39c05f6a282334e693355 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_470_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_470_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_470_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..e081a106b1e68dc4c9731a636fa4a76c8d044d80 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_470_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_470_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_470_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..f79520a6943dbcbb38fb3cdef13eef45bb37a3d8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_470_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_480_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_480_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..03adf6e87c75bd3a1a6bc8b54bbefa2f0927b7ac Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_480_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_480_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_480_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..f51d76b96c469c2a2b82de5616afc97c1f7d4d3d Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_480_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_480_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_480_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..f79520a6943dbcbb38fb3cdef13eef45bb37a3d8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_480_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_480_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_480_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..304e1ed3151fed1e0fe680597aab9ad6415a87c8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_480_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_490_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_490_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..304e1ed3151fed1e0fe680597aab9ad6415a87c8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_490_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_490_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_490_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..f79520a6943dbcbb38fb3cdef13eef45bb37a3d8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_490_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_490_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_490_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..cacb89de7294c2a56d12d0789c10ab902c983ef7 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_490_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_490_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_490_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..72f73623ceeef5d75b89de5d50c44826c9a6608f Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_490_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_50_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_50_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..f51d76b96c469c2a2b82de5616afc97c1f7d4d3d Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_50_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_50_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_50_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..304e1ed3151fed1e0fe680597aab9ad6415a87c8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_50_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_50_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_50_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..cacb89de7294c2a56d12d0789c10ab902c983ef7 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_50_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_50_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_50_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..e081a106b1e68dc4c9731a636fa4a76c8d044d80 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_50_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_60_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_60_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..72f73623ceeef5d75b89de5d50c44826c9a6608f Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_60_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_60_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_60_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..cacb89de7294c2a56d12d0789c10ab902c983ef7 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_60_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_60_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_60_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..ca1525dce53a1e9ff3665246fe7cec8be7b77eef Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_60_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_60_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_60_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..f79520a6943dbcbb38fb3cdef13eef45bb37a3d8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_60_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_70_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_70_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..03adf6e87c75bd3a1a6bc8b54bbefa2f0927b7ac Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_70_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_70_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_70_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..cacb89de7294c2a56d12d0789c10ab902c983ef7 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_70_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_70_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_70_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..ca1525dce53a1e9ff3665246fe7cec8be7b77eef Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_70_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_70_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_70_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..f51d76b96c469c2a2b82de5616afc97c1f7d4d3d Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_70_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_80_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_80_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..f51d76b96c469c2a2b82de5616afc97c1f7d4d3d Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_80_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_80_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_80_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..e081a106b1e68dc4c9731a636fa4a76c8d044d80 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_80_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_80_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_80_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..133dbe6bfc1dbfff6e6c678433f9e052c74d2915 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_80_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_80_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_80_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..ca1525dce53a1e9ff3665246fe7cec8be7b77eef Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_80_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_90_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_90_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..03adf6e87c75bd3a1a6bc8b54bbefa2f0927b7ac Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_90_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_90_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_90_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..f79520a6943dbcbb38fb3cdef13eef45bb37a3d8 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_90_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_90_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/labels/epoch_90_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..e081a106b1e68dc4c9731a636fa4a76c8d044d80 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_90_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/labels/epoch_90_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/labels/epoch_90_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..72f73623ceeef5d75b89de5d50c44826c9a6608f Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/labels/epoch_90_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_0_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_0_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_0_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_0_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_0_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_0_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_0_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_0_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_0_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_0_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_0_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_0_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_100_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_100_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_100_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_100_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_100_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_100_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_100_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_100_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_100_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_100_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_100_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_100_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_10_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_10_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_10_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_10_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_10_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_10_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_10_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_10_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_10_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_10_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_10_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_10_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_110_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_110_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_110_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_110_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_110_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_110_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_110_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_110_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_110_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_110_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_110_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_110_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_120_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_120_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_120_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_120_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_120_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_120_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_120_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_120_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_120_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_120_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_120_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_120_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_130_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_130_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_130_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_130_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_130_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_130_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_130_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_130_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_130_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_130_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_130_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_130_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_140_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_140_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_140_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_140_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_140_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_140_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_140_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_140_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_140_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_140_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_140_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_140_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_150_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_150_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_150_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_150_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_150_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_150_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_150_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_150_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_150_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_150_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_150_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_150_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_160_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_160_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_160_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_160_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_160_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_160_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_160_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_160_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_160_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_160_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_160_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_160_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_170_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_170_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_170_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_170_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_170_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_170_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_170_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_170_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_170_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_170_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_170_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_170_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_180_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_180_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_180_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_180_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_180_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_180_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_180_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_180_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_180_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_180_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_180_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_180_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_190_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_190_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_190_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_190_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_190_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_190_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_190_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_190_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_190_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_190_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_190_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_190_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_200_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_200_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_200_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_200_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_200_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_200_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_200_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_200_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_200_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_200_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_200_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_200_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_20_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_20_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_20_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_20_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_20_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_20_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_20_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_20_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_20_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_20_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_20_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_20_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_210_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_210_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_210_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_210_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_210_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_210_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_210_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_210_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_210_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_210_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_210_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_210_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_220_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_220_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_220_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_220_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_220_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_220_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_220_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_220_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_220_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_220_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_220_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_220_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_230_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_230_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_230_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_230_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_230_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_230_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_230_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_230_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_230_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_230_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_230_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_230_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_240_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_240_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_240_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_240_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_240_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_240_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_240_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_240_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_240_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_240_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_240_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_240_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_250_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_250_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_250_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_250_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_250_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_250_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_250_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_250_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_250_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_250_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_250_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_250_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_260_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_260_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_260_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_260_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_260_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_260_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_260_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_260_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_260_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_260_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_260_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_260_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_270_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_270_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_270_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_270_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_270_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_270_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_270_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_270_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_270_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_270_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_270_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_270_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_280_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_280_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_280_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_280_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_280_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_280_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_280_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_280_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_280_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_280_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_280_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_280_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_290_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_290_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_290_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_290_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_290_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_290_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_290_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_290_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_290_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_290_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_290_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_290_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_300_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_300_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_300_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_300_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_300_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_300_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_300_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_300_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_300_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_300_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_300_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_300_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_30_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_30_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_30_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_30_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_30_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_30_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_30_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_30_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_30_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_30_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_30_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_30_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_310_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_310_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_310_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_310_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_310_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_310_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_310_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_310_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_310_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_310_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_310_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_310_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_320_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_320_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_320_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_320_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_320_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_320_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_320_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_320_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_320_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_320_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_320_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_320_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_330_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_330_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_330_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_330_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_330_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_330_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_330_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_330_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_330_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_330_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_330_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_330_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_340_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_340_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_340_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_340_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_340_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_340_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_340_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_340_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_340_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_340_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_340_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_340_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_350_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_350_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_350_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_350_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_350_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_350_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_350_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_350_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_350_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_350_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_350_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_350_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_360_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_360_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_360_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_360_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_360_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_360_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_360_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_360_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_360_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_360_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_360_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_360_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_370_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_370_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_370_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_370_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_370_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_370_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_370_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_370_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_370_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_370_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_370_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_370_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_380_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_380_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_380_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_380_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_380_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_380_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_380_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_380_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_380_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_380_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_380_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_380_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_390_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_390_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_390_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_390_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_390_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_390_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_390_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_390_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_390_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_390_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_390_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_390_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_400_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_400_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_400_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_400_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_400_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_400_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_400_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_400_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_400_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_400_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_400_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_400_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_40_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_40_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_40_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_40_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_40_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_40_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_40_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_40_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_40_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_40_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_40_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_40_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_410_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_410_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_410_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_410_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_410_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_410_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_410_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_410_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_410_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_410_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_410_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_410_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_420_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_420_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_420_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_420_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_420_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_420_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_420_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_420_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_420_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_420_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_420_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_420_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_430_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_430_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_430_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_430_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_430_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_430_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_430_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_430_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_430_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_430_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_430_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_430_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_440_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_440_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_440_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_440_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_440_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_440_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_440_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_440_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_440_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_440_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_440_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_440_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_450_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_450_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_450_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_450_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_450_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_450_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_450_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_450_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_450_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_450_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_450_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_450_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_460_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_460_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_460_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_460_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_460_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_460_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_460_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_460_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_460_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_460_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_460_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_460_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_470_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_470_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_470_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_470_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_470_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_470_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_470_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_470_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_470_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_470_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_470_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_470_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_480_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_480_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_480_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_480_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_480_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_480_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_480_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_480_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_480_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_480_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_480_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_480_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_490_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_490_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_490_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_490_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_490_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_490_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_490_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_490_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_490_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_490_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_490_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_490_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_50_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_50_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_50_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_50_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_50_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_50_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_50_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_50_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_50_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_50_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_50_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_50_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_60_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_60_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_60_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_60_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_60_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_60_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_60_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_60_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_60_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_60_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_60_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_60_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_70_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_70_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_70_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_70_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_70_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_70_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_70_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_70_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_70_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_70_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_70_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_70_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_80_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_80_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_80_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_80_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_80_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_80_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_80_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_80_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_80_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_80_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_80_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_80_batch_1_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_90_batch_0_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_90_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_90_batch_0_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_90_batch_0_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_90_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_90_batch_0_img_1.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_90_batch_1_img_0.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_90_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_90_batch_1_img_0.png differ diff --git a/AllinonSAM/biastuning/DIAS/pred_labels/epoch_90_batch_1_img_1.png b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_90_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb2faf2dc8ceb9668304803e04b3ae3f19c4b799 Binary files /dev/null and b/AllinonSAM/biastuning/DIAS/pred_labels/epoch_90_batch_1_img_1.png differ diff --git a/AllinonSAM/clip/__init__.py b/AllinonSAM/clip/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..dcc5619538c0f7c782508bdbd9587259d805e0d9 --- /dev/null +++ b/AllinonSAM/clip/__init__.py @@ -0,0 +1 @@ +from .clip import * diff --git a/AllinonSAM/clip/__pycache__/__init__.cpython-312.pyc b/AllinonSAM/clip/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a256106df1e052956aedad4aece637183186891 Binary files /dev/null and b/AllinonSAM/clip/__pycache__/__init__.cpython-312.pyc differ diff --git a/AllinonSAM/clip/__pycache__/__init__.cpython-38.pyc b/AllinonSAM/clip/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c1997cdca7c4d4fd816caea40263108fc71f953 Binary files /dev/null and b/AllinonSAM/clip/__pycache__/__init__.cpython-38.pyc differ diff --git a/AllinonSAM/clip/__pycache__/clip.cpython-312.pyc b/AllinonSAM/clip/__pycache__/clip.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4aad71a6c52c27ce87c444a48030010fd426ffe Binary files /dev/null and b/AllinonSAM/clip/__pycache__/clip.cpython-312.pyc differ diff --git a/AllinonSAM/clip/__pycache__/clip.cpython-38.pyc b/AllinonSAM/clip/__pycache__/clip.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c2cbf4be82f952ce9861379050b978fb0faee954 Binary files /dev/null and b/AllinonSAM/clip/__pycache__/clip.cpython-38.pyc differ diff --git a/AllinonSAM/clip/__pycache__/model.cpython-312.pyc b/AllinonSAM/clip/__pycache__/model.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b2dab4fd3c700b88c842eb372b36ee49044c435d Binary files /dev/null and b/AllinonSAM/clip/__pycache__/model.cpython-312.pyc differ diff --git a/AllinonSAM/clip/__pycache__/model.cpython-38.pyc b/AllinonSAM/clip/__pycache__/model.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..abf02f724e7754bf6317992340032ef28f191e92 Binary files /dev/null and b/AllinonSAM/clip/__pycache__/model.cpython-38.pyc differ diff --git a/AllinonSAM/clip/__pycache__/simple_tokenizer.cpython-312.pyc b/AllinonSAM/clip/__pycache__/simple_tokenizer.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52d94d5d9815b404d5439b7cc8e0416cc5048391 Binary files /dev/null and b/AllinonSAM/clip/__pycache__/simple_tokenizer.cpython-312.pyc differ diff --git a/AllinonSAM/clip/__pycache__/simple_tokenizer.cpython-38.pyc b/AllinonSAM/clip/__pycache__/simple_tokenizer.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..58b4396e32c94865261ee51b89da452db7510b6a Binary files /dev/null and b/AllinonSAM/clip/__pycache__/simple_tokenizer.cpython-38.pyc differ diff --git a/AllinonSAM/clip/bpe_simple_vocab_16e6.txt.gz b/AllinonSAM/clip/bpe_simple_vocab_16e6.txt.gz new file mode 100644 index 0000000000000000000000000000000000000000..36a15856e00a06a9fbed8cdd34d2393fea4a3113 --- /dev/null +++ b/AllinonSAM/clip/bpe_simple_vocab_16e6.txt.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:924691ac288e54409236115652ad4aa250f48203de50a9e4722a6ecd48d6804a +size 1356917 diff --git a/AllinonSAM/clip/clip.py b/AllinonSAM/clip/clip.py new file mode 100644 index 0000000000000000000000000000000000000000..b1e4140861a7232189999e05a9d716c0e4911f79 --- /dev/null +++ b/AllinonSAM/clip/clip.py @@ -0,0 +1,221 @@ +import hashlib +import os +import urllib +import warnings +from typing import Union, List + +import torch +from PIL import Image +from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize +from tqdm import tqdm + +from .model import build_model +from .simple_tokenizer import SimpleTokenizer as _Tokenizer + +try: + from torchvision.transforms import InterpolationMode + BICUBIC = InterpolationMode.BICUBIC +except ImportError: + BICUBIC = Image.BICUBIC + + +if torch.__version__.split(".") < ["1", "7", "1"]: + warnings.warn("PyTorch version 1.7.1 or higher is recommended") + + +__all__ = ["available_models", "load", "tokenize"] +_tokenizer = _Tokenizer() + +_MODELS = { + "RN50": "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt", + "RN101": "https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt", + "RN50x4": "https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt", + "RN50x16": "https://openaipublic.azureedge.net/clip/models/52378b407f34354e150460fe41077663dd5b39c54cd0bfd2b27167a4a06ec9aa/RN50x16.pt", + "ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt", + "ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt", +} + + +def _download(url: str, root: str = os.path.expanduser("~/.cache/clip")): + os.makedirs(root, exist_ok=True) + filename = os.path.basename(url) + + expected_sha256 = url.split("/")[-2] + download_target = os.path.join(root, filename) + + if os.path.exists(download_target) and not os.path.isfile(download_target): + raise RuntimeError(f"{download_target} exists and is not a regular file") + + if os.path.isfile(download_target): + if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256: + return download_target + else: + warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file") + + with urllib.request.urlopen(url) as source, open(download_target, "wb") as output: + with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True) as loop: + while True: + buffer = source.read(8192) + if not buffer: + break + + output.write(buffer) + loop.update(len(buffer)) + + if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256: + raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match") + + return download_target + + +def _transform(n_px): + return Compose([ + Resize(n_px, interpolation=BICUBIC), + CenterCrop(n_px), + lambda image: image.convert("RGB"), + ToTensor(), + Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)), + ]) + + +def available_models() -> List[str]: + """Returns the names of available CLIP models""" + return list(_MODELS.keys()) + + +def load(name: str, device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", jit=False): + """Load a CLIP model + + Parameters + ---------- + name : str + A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict + + device : Union[str, torch.device] + The device to put the loaded model + + jit : bool + Whether to load the optimized JIT model or more hackable non-JIT model (default). + + Returns + ------- + model : torch.nn.Module + The CLIP model + + preprocess : Callable[[PIL.Image], torch.Tensor] + A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input + """ + if name in _MODELS: + model_path = _download(_MODELS[name]) + elif os.path.isfile(name): + model_path = name + else: + raise RuntimeError(f"Model {name} not found; available models = {available_models()}") + + try: + # loading JIT archive + model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval() + state_dict = None + except RuntimeError: + # loading saved state dict + if jit: + warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead") + jit = False + state_dict = torch.load(model_path, map_location="cpu") + + if not jit: + model = build_model(state_dict or model.state_dict()).to(device) + if str(device) == "cpu": + model.float() + return model, _transform(model.visual.input_resolution) + + # patch the device names + device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[]) + device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1] + + def patch_device(module): + try: + graphs = [module.graph] if hasattr(module, "graph") else [] + except RuntimeError: + graphs = [] + + if hasattr(module, "forward1"): + graphs.append(module.forward1.graph) + + for graph in graphs: + for node in graph.findAllNodes("prim::Constant"): + if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"): + node.copyAttributes(device_node) + + model.apply(patch_device) + patch_device(model.encode_image) + patch_device(model.encode_text) + + # patch dtype to float32 on CPU + if str(device) == "cpu": + float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[]) + float_input = list(float_holder.graph.findNode("aten::to").inputs())[1] + float_node = float_input.node() + + def patch_float(module): + try: + graphs = [module.graph] if hasattr(module, "graph") else [] + except RuntimeError: + graphs = [] + + if hasattr(module, "forward1"): + graphs.append(module.forward1.graph) + + for graph in graphs: + for node in graph.findAllNodes("aten::to"): + inputs = list(node.inputs()) + for i in [1, 2]: # dtype can be the second or third argument to aten::to() + if inputs[i].node()["value"] == 5: + inputs[i].node().copyAttributes(float_node) + + model.apply(patch_float) + patch_float(model.encode_image) + patch_float(model.encode_text) + + model.float() + + return model, _transform(model.input_resolution.item()) + + +def tokenize(texts: Union[str, List[str]], context_length: int = 77, truncate: bool = False) -> torch.LongTensor: + """ + Returns the tokenized representation of given input string(s) + + Parameters + ---------- + texts : Union[str, List[str]] + An input string or a list of input strings to tokenize + + context_length : int + The context length to use; all CLIP models use 77 as the context length + + truncate: bool + Whether to truncate the text in case its encoding is longer than the context length + + Returns + ------- + A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length] + """ + if isinstance(texts, str): + texts = [texts] + + sot_token = _tokenizer.encoder["<|startoftext|>"] + eot_token = _tokenizer.encoder["<|endoftext|>"] + all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts] + result = torch.zeros(len(all_tokens), context_length, dtype=torch.long) + + for i, tokens in enumerate(all_tokens): + if len(tokens) > context_length: + if truncate: + tokens = tokens[:context_length] + tokens[-1] = eot_token + else: + raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}") + result[i, :len(tokens)] = torch.tensor(tokens) + + return result diff --git a/AllinonSAM/clip/model.py b/AllinonSAM/clip/model.py new file mode 100644 index 0000000000000000000000000000000000000000..e73a60b4a2c1e436d117e03d6a1f2148447b5b75 --- /dev/null +++ b/AllinonSAM/clip/model.py @@ -0,0 +1,432 @@ +from collections import OrderedDict +from typing import Tuple, Union + +import numpy as np +import torch +import torch.nn.functional as F +from torch import nn + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1): + super().__init__() + + # all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1 + self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False) + self.bn1 = nn.BatchNorm2d(planes) + + self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False) + self.bn2 = nn.BatchNorm2d(planes) + + self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity() + + self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False) + self.bn3 = nn.BatchNorm2d(planes * self.expansion) + + self.relu = nn.ReLU(inplace=True) + self.downsample = None + self.stride = stride + + if stride > 1 or inplanes != planes * Bottleneck.expansion: + # downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1 + self.downsample = nn.Sequential(OrderedDict([ + ("-1", nn.AvgPool2d(stride)), + ("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)), + ("1", nn.BatchNorm2d(planes * self.expansion)) + ])) + + def forward(self, x: torch.Tensor): + identity = x + + out = self.relu(self.bn1(self.conv1(x))) + out = self.relu(self.bn2(self.conv2(out))) + out = self.avgpool(out) + out = self.bn3(self.conv3(out)) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + return out + + +class AttentionPool2d(nn.Module): + def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None): + super().__init__() + self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5) + self.k_proj = nn.Linear(embed_dim, embed_dim) + self.q_proj = nn.Linear(embed_dim, embed_dim) + self.v_proj = nn.Linear(embed_dim, embed_dim) + self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim) + self.num_heads = num_heads + + def forward(self, x): + x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC + x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC + x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC + x, _ = F.multi_head_attention_forward( + query=x, key=x, value=x, + embed_dim_to_check=x.shape[-1], + num_heads=self.num_heads, + q_proj_weight=self.q_proj.weight, + k_proj_weight=self.k_proj.weight, + v_proj_weight=self.v_proj.weight, + in_proj_weight=None, + in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]), + bias_k=None, + bias_v=None, + add_zero_attn=False, + dropout_p=0, + out_proj_weight=self.c_proj.weight, + out_proj_bias=self.c_proj.bias, + use_separate_proj_weight=True, + training=self.training, + need_weights=False + ) + + return x[0] + + +class ModifiedResNet(nn.Module): + """ + A ResNet class that is similar to torchvision's but contains the following changes: + - There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool. + - Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1 + - The final pooling layer is a QKV attention instead of an average pool + """ + + def __init__(self, layers, output_dim, heads, input_resolution=224, width=64): + super().__init__() + self.output_dim = output_dim + self.input_resolution = input_resolution + + # the 3-layer stem + self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False) + self.bn1 = nn.BatchNorm2d(width // 2) + self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False) + self.bn2 = nn.BatchNorm2d(width // 2) + self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False) + self.bn3 = nn.BatchNorm2d(width) + self.avgpool = nn.AvgPool2d(2) + self.relu = nn.ReLU(inplace=True) + + # residual layers + self._inplanes = width # this is a *mutable* variable used during construction + self.layer1 = self._make_layer(width, layers[0]) + self.layer2 = self._make_layer(width * 2, layers[1], stride=2) + self.layer3 = self._make_layer(width * 4, layers[2], stride=2) + self.layer4 = self._make_layer(width * 8, layers[3], stride=2) + + embed_dim = width * 32 # the ResNet feature dimension + self.attnpool = AttentionPool2d(input_resolution // 32, embed_dim, heads, output_dim) + + def _make_layer(self, planes, blocks, stride=1): + layers = [Bottleneck(self._inplanes, planes, stride)] + + self._inplanes = planes * Bottleneck.expansion + for _ in range(1, blocks): + layers.append(Bottleneck(self._inplanes, planes)) + + return nn.Sequential(*layers) + + def forward(self, x): + def stem(x): + for conv, bn in [(self.conv1, self.bn1), (self.conv2, self.bn2), (self.conv3, self.bn3)]: + x = self.relu(bn(conv(x))) + x = self.avgpool(x) + return x + + x = x.type(self.conv1.weight.dtype) + x = stem(x) + x1 = self.layer1(x) + x2 = self.layer2(x1) + x3 = self.layer3(x2) + x4 = self.layer4(x3) + x5 = self.attnpool(x4) + + return x5 + + +class LayerNorm(nn.LayerNorm): + """Subclass torch's LayerNorm to handle fp16.""" + + def forward(self, x: torch.Tensor): + orig_type = x.dtype + ret = super().forward(x.type(torch.float32)) + return ret.type(orig_type) + + +class QuickGELU(nn.Module): + def forward(self, x: torch.Tensor): + return x * torch.sigmoid(1.702 * x) + + +class ResidualAttentionBlock(nn.Module): + def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None): + super().__init__() + + self.attn = nn.MultiheadAttention(d_model, n_head) + self.ln_1 = LayerNorm(d_model) + self.mlp = nn.Sequential(OrderedDict([ + ("c_fc", nn.Linear(d_model, d_model * 4)), + ("gelu", QuickGELU()), + ("c_proj", nn.Linear(d_model * 4, d_model)) + ])) + self.ln_2 = LayerNorm(d_model) + self.attn_mask = attn_mask + + def attention(self, x: torch.Tensor): + self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None + return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0] + + def forward(self, x: torch.Tensor): + x = x + self.attention(self.ln_1(x)) + x = x + self.mlp(self.ln_2(x)) + return x + + +class Transformer(nn.Module): + def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None): + super().__init__() + self.width = width + self.layers = layers + self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)]) + + def forward(self, x: torch.Tensor): + return self.resblocks(x) + + +class VisionTransformer(nn.Module): + def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int): + super().__init__() + self.input_resolution = input_resolution + self.output_dim = output_dim + self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False) + + scale = width ** -0.5 + self.class_embedding = nn.Parameter(scale * torch.randn(width)) + self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width)) + self.ln_pre = LayerNorm(width) + + self.transformer = Transformer(width, layers, heads) + + self.ln_post = LayerNorm(width) + self.proj = nn.Parameter(scale * torch.randn(width, output_dim)) + + def forward(self, x: torch.Tensor): + x = self.conv1(x) # shape = [*, width, grid, grid] + x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2] + x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width] + x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width] + x = x + self.positional_embedding.to(x.dtype) + x = self.ln_pre(x) + + x = x.permute(1, 0, 2) # NLD -> LND + x = self.transformer(x) + x = x.permute(1, 0, 2) # LND -> NLD + + x = self.ln_post(x[:, 0, :]) + + if self.proj is not None: + x = x @ self.proj + + return x + + +class CLIP(nn.Module): + def __init__(self, + embed_dim: int, + # vision + image_resolution: int, + vision_layers: Union[Tuple[int, int, int, int], int], + vision_width: int, + vision_patch_size: int, + # text + context_length: int, + vocab_size: int, + transformer_width: int, + transformer_heads: int, + transformer_layers: int + ): + super().__init__() + + self.context_length = context_length + + if isinstance(vision_layers, (tuple, list)): + vision_heads = vision_width * 32 // 64 + self.visual = ModifiedResNet( + layers=vision_layers, + output_dim=embed_dim, + heads=vision_heads, + input_resolution=image_resolution, + width=vision_width + ) + else: + vision_heads = vision_width // 64 + self.visual = VisionTransformer( + input_resolution=image_resolution, + patch_size=vision_patch_size, + width=vision_width, + layers=vision_layers, + heads=vision_heads, + output_dim=embed_dim + ) + + self.transformer = Transformer( + width=transformer_width, + layers=transformer_layers, + heads=transformer_heads, + attn_mask=self.build_attention_mask() + ) + + self.vocab_size = vocab_size + self.token_embedding = nn.Embedding(vocab_size, transformer_width) + self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width)) + self.ln_final = LayerNorm(transformer_width) + + self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim)) + self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07)) + + self.initialize_parameters() + + def initialize_parameters(self): + nn.init.normal_(self.token_embedding.weight, std=0.02) + nn.init.normal_(self.positional_embedding, std=0.01) + + if isinstance(self.visual, ModifiedResNet): + if self.visual.attnpool is not None: + std = self.visual.attnpool.c_proj.in_features ** -0.5 + nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std) + nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std) + nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std) + nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std) + + for resnet_block in [self.visual.layer1, self.visual.layer2, self.visual.layer3, self.visual.layer4]: + for name, param in resnet_block.named_parameters(): + if name.endswith("bn3.weight"): + nn.init.zeros_(param) + + proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5) + attn_std = self.transformer.width ** -0.5 + fc_std = (2 * self.transformer.width) ** -0.5 + for block in self.transformer.resblocks: + nn.init.normal_(block.attn.in_proj_weight, std=attn_std) + nn.init.normal_(block.attn.out_proj.weight, std=proj_std) + nn.init.normal_(block.mlp.c_fc.weight, std=fc_std) + nn.init.normal_(block.mlp.c_proj.weight, std=proj_std) + + if self.text_projection is not None: + nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5) + + def build_attention_mask(self): + # lazily create causal attention mask, with full attention between the vision tokens + # pytorch uses additive attention mask; fill with -inf + mask = torch.empty(self.context_length, self.context_length) + mask.fill_(float("-inf")) + mask.triu_(1) # zero out the lower diagonal + return mask + + @property + def dtype(self): + return self.visual.conv1.weight.dtype + + def encode_image(self, image): + return self.visual(image.type(self.dtype)) + + def encode_text(self, text): + x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model] + + x = x + self.positional_embedding.type(self.dtype) + x = x.permute(1, 0, 2) # NLD -> LND + x = self.transformer(x) + x = x.permute(1, 0, 2) # LND -> NLD + x = self.ln_final(x).type(self.dtype) + + # x.shape = [batch_size, n_ctx, transformer.width] + # take features from the eot embedding (eot_token is the highest number in each sequence) + x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection + + return x + + def forward(self, image, text): + image_features = self.encode_image(image) + text_features = self.encode_text(text) + + # normalized features + image_features = image_features / image_features.norm(dim=-1, keepdim=True) + text_features = text_features / text_features.norm(dim=-1, keepdim=True) + + # cosine similarity as logits + logit_scale = self.logit_scale.exp() + logits_per_image = logit_scale * image_features @ text_features.t() + logits_per_text = logit_scale * text_features @ image_features.t() + + # shape = [global_batch_size, global_batch_size] + return logits_per_image, logits_per_text + + +def convert_weights(model: nn.Module): + """Convert applicable model parameters to fp16""" + + def _convert_weights_to_fp16(l): + if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)): + l.weight.data = l.weight.data.half() + if l.bias is not None: + l.bias.data = l.bias.data.half() + + if isinstance(l, nn.MultiheadAttention): + for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]: + tensor = getattr(l, attr) + if tensor is not None: + tensor.data = tensor.data.half() + + for name in ["text_projection", "proj"]: + if hasattr(l, name): + attr = getattr(l, name) + if attr is not None: + attr.data = attr.data.half() + + model.apply(_convert_weights_to_fp16) + + +def build_model(state_dict: dict): + vit = "visual.proj" in state_dict + + if vit: + vision_width = state_dict["visual.conv1.weight"].shape[0] + vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")]) + vision_patch_size = state_dict["visual.conv1.weight"].shape[-1] + grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5) + image_resolution = vision_patch_size * grid_size + else: + counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]] + vision_layers = tuple(counts) + vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0] + output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5) + vision_patch_size = None + assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0] + image_resolution = output_width * 32 + + embed_dim = state_dict["text_projection"].shape[1] + context_length = state_dict["positional_embedding"].shape[0] + vocab_size = state_dict["token_embedding.weight"].shape[0] + transformer_width = state_dict["ln_final.weight"].shape[0] + transformer_heads = transformer_width // 64 + transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks"))) + + model = CLIP( + embed_dim, + image_resolution, vision_layers, vision_width, vision_patch_size, + context_length, vocab_size, transformer_width, transformer_heads, transformer_layers + ) + + for key in ["input_resolution", "context_length", "vocab_size"]: + if key in state_dict: + del state_dict[key] + + convert_weights(model) + model.load_state_dict(state_dict) + return model.eval() diff --git a/AllinonSAM/clip/simple_tokenizer.py b/AllinonSAM/clip/simple_tokenizer.py new file mode 100644 index 0000000000000000000000000000000000000000..0a66286b7d5019c6e221932a813768038f839c91 --- /dev/null +++ b/AllinonSAM/clip/simple_tokenizer.py @@ -0,0 +1,132 @@ +import gzip +import html +import os +from functools import lru_cache + +import ftfy +import regex as re + + +@lru_cache() +def default_bpe(): + return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz") + + +@lru_cache() +def bytes_to_unicode(): + """ + Returns list of utf-8 byte and a corresponding list of unicode strings. + The reversible bpe codes work on unicode strings. + This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. + When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. + This is a signficant percentage of your normal, say, 32K bpe vocab. + To avoid that, we want lookup tables between utf-8 bytes and unicode strings. + And avoids mapping to whitespace/control characters the bpe code barfs on. + """ + bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1)) + cs = bs[:] + n = 0 + for b in range(2**8): + if b not in bs: + bs.append(b) + cs.append(2**8+n) + n += 1 + cs = [chr(n) for n in cs] + return dict(zip(bs, cs)) + + +def get_pairs(word): + """Return set of symbol pairs in a word. + Word is represented as tuple of symbols (symbols being variable-length strings). + """ + pairs = set() + prev_char = word[0] + for char in word[1:]: + pairs.add((prev_char, char)) + prev_char = char + return pairs + + +def basic_clean(text): + text = ftfy.fix_text(text) + text = html.unescape(html.unescape(text)) + return text.strip() + + +def whitespace_clean(text): + text = re.sub(r'\s+', ' ', text) + text = text.strip() + return text + + +class SimpleTokenizer(object): + def __init__(self, bpe_path: str = default_bpe()): + self.byte_encoder = bytes_to_unicode() + self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} + merges = gzip.open(bpe_path).read().decode("utf-8").split('\n') + merges = merges[1:49152-256-2+1] + merges = [tuple(merge.split()) for merge in merges] + vocab = list(bytes_to_unicode().values()) + vocab = vocab + [v+'' for v in vocab] + for merge in merges: + vocab.append(''.join(merge)) + vocab.extend(['<|startoftext|>', '<|endoftext|>']) + self.encoder = dict(zip(vocab, range(len(vocab)))) + self.decoder = {v: k for k, v in self.encoder.items()} + self.bpe_ranks = dict(zip(merges, range(len(merges)))) + self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'} + self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE) + + def bpe(self, token): + if token in self.cache: + return self.cache[token] + word = tuple(token[:-1]) + ( token[-1] + '',) + pairs = get_pairs(word) + + if not pairs: + return token+'' + + while True: + bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf'))) + if bigram not in self.bpe_ranks: + break + first, second = bigram + new_word = [] + i = 0 + while i < len(word): + try: + j = word.index(first, i) + new_word.extend(word[i:j]) + i = j + except: + new_word.extend(word[i:]) + break + + if word[i] == first and i < len(word)-1 and word[i+1] == second: + new_word.append(first+second) + i += 2 + else: + new_word.append(word[i]) + i += 1 + new_word = tuple(new_word) + word = new_word + if len(word) == 1: + break + else: + pairs = get_pairs(word) + word = ' '.join(word) + self.cache[token] = word + return word + + def encode(self, text): + bpe_tokens = [] + text = whitespace_clean(basic_clean(text)).lower() + for token in re.findall(self.pat, text): + token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8')) + bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' ')) + return bpe_tokens + + def decode(self, tokens): + text = ''.join([self.decoder[token] for token in tokens]) + text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('', ' ') + return text diff --git a/AllinonSAM/combined_model.py b/AllinonSAM/combined_model.py new file mode 100644 index 0000000000000000000000000000000000000000..6cf694ac10597f957735b9b4e2d491ed6711a425 --- /dev/null +++ b/AllinonSAM/combined_model.py @@ -0,0 +1,17 @@ + +import torch.nn as nn + +class CRFCombinedModel(nn.Module): + def __init__(self, base_model, crf): + super(CRFCombinedModel, self).__init__() + self.base_model = base_model + self.crf = crf + + def forward(self, x, x_text=None, spatial_spacings=None): + logits,reg_loss = self.base_model(x, x_text) + shape_img = logits.shape + if len(shape_img) == 3: + logits = logits.reshape(shape_img[0], 1, shape_img[1], shape_img[2]) + output = self.crf(logits, spatial_spacings=spatial_spacings) + print(output.shape) + return output , reg_loss diff --git a/AllinonSAM/config_amos22.yml b/AllinonSAM/config_amos22.yml new file mode 100644 index 0000000000000000000000000000000000000000..b1da19c58b55368c5535f3f986aafb7c02c1390c --- /dev/null +++ b/AllinonSAM/config_amos22.yml @@ -0,0 +1,36 @@ +data_transforms: + img_size: 256 + use_random_crop: False + use_rotation: True + rotation_angle: 10 + use_saturation: False + saturation: 2 + use_brightness: False + brightness: 2 + use_horizontal_flip: False +data: + name: AMOS22 + root_path: '/media/ubuntu/New Volume/jay/amos22/amos22' + label_list: [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] + label_names: ['spleen', 'right kidney', 'left kidney', 'gall bladder', 'esophagus', 'liver', 'stomach', 'aorta', 'postcava', 'pancreas', 'right adrenal gland', 'left adrenal gland', 'duodenum', 'bladder', 'prostate'] + volume_channel: 2 + sampling_deviation: 5 + samples_per_slice: 3 + negative_to_positive_ratio: -1 + label_dict: { + 'spleen' : 1, + 'right kidney' : 2, + 'left kidney' : 3, + 'gall bladder' : 4, + 'esophagus' : 5, + 'liver' : 6, + 'stomach' : 7, + 'aorta' : 8, + 'postcava' : 9, + 'pancreas' : 10, + 'right adrenal gland' : 11, + 'left adrenal gland' : 12, + 'duodenum' : 13, + 'bladder' : 14, + 'prostate' : 15 + } diff --git a/AllinonSAM/config_arcade.yml b/AllinonSAM/config_arcade.yml new file mode 100644 index 0000000000000000000000000000000000000000..f61279fa322dce5f0249671fdb3b94cef61f8872 --- /dev/null +++ b/AllinonSAM/config_arcade.yml @@ -0,0 +1,23 @@ +data_transforms: + a_min: 0 + a_max: 255 + img_size: 512 # This value might be different based on your actual images + use_random_crop: False + use_rotation: True + rotation_angle: 10 + use_saturation: False + saturation: 2 + use_brightness: True + brightness: 2 + use_horizontal_flip: True + use_random_scale: True + +data: + name: ArcadeDataset + root_path: '/home/abdelrahman.elsayed/sarim_dataset' + data_split_csv: '/home/abdelrahman.elsayed/sarim_dataset/data_split.csv' + fold_num: 0 + label_list: [0,1] + label_names: ["Background", "Vein"] + volume_channel: 3 + negative_to_positive_ratio: -1 diff --git a/AllinonSAM/config_atr1.yml b/AllinonSAM/config_atr1.yml new file mode 100644 index 0000000000000000000000000000000000000000..c80aa9838a9e6e74ebcb25c75d1e9992d1c77ef6 --- /dev/null +++ b/AllinonSAM/config_atr1.yml @@ -0,0 +1,21 @@ +data_transforms: + a_min: 0 + a_max: 255 + img_size: 512 + use_random_crop: False + use_rotation: True + rotation_angle: 10 + use_saturation: False + saturation: 2 + use_brightness: False + brightness: 2 + use_horizontal_flip: True + use_cjitter: False + use_affine: False +data: + name: ATR + root_path: '/media/ubuntu/New Volume/jay/ATR/atr_dataset/cegr' + label_list: [1] + label_names: ['Military Vehicle'] + volume_channel: 2 + negative_to_positive_ratio: -1 diff --git a/AllinonSAM/config_atr2.yml b/AllinonSAM/config_atr2.yml new file mode 100644 index 0000000000000000000000000000000000000000..e4297ded1ee37bd958ce8c098a06850896963a3d --- /dev/null +++ b/AllinonSAM/config_atr2.yml @@ -0,0 +1,21 @@ +data_transforms: + a_min: 0 + a_max: 255 + img_size: 1024 + use_random_crop: False + use_rotation: True + rotation_angle: 10 + use_saturation: False + saturation: 2 + use_brightness: False + brightness: 2 + use_horizontal_flip: True + use_cjitter: False + use_affine: False +data: + name: ATR + root_path: '/media/ubuntu/New Volume/jay/ATR/atr_dataset/cegr' + label_list: [1] + label_names: ['Vehicle'] + volume_channel: 2 + negative_to_positive_ratio: -1 diff --git a/AllinonSAM/config_atr3.yml b/AllinonSAM/config_atr3.yml new file mode 100644 index 0000000000000000000000000000000000000000..b9bc7f08f95d310f5843c6ae4ce84126f7ed20e9 --- /dev/null +++ b/AllinonSAM/config_atr3.yml @@ -0,0 +1,21 @@ +data_transforms: + a_min: 0 + a_max: 255 + img_size: 512 + use_random_crop: False + use_rotation: True + rotation_angle: 10 + use_saturation: False + saturation: 2 + use_brightness: False + brightness: 2 + use_horizontal_flip: True + use_cjitter: False + use_affine: False +data: + name: ATR + root_path: '/media/ubuntu/New Volume/jay/ATR/atr_dataset/cegr' + label_list: [1,2,3,4,5,6,7,8] + label_names: ['SUV Vehicle', '2S3 Vehicle', 'ZSU23 Vehicle', 'BRDM2 Vehicle', 'BMP2 Vehicle', 'T72 Vehicle', 'Pickup Vehicle', 'BTR70 Vehicle'] + volume_channel: 2 + negative_to_positive_ratio: -1 diff --git a/AllinonSAM/config_btcv.yml b/AllinonSAM/config_btcv.yml new file mode 100644 index 0000000000000000000000000000000000000000..a7950925ccb0c275847587ab4c4825412a45d39f --- /dev/null +++ b/AllinonSAM/config_btcv.yml @@ -0,0 +1,19 @@ +data_transforms: + a_min: 0 + a_max: 255 + img_size: 1024 + use_random_crop: False + use_rotation: True + rotation_angle: 10 + use_saturation: False + saturation: 2 + use_brightness: True + brightness: 2 + use_horizontal_flip: True +data: + name: BTCV + root_path: '/media/ubuntu/New Volume/jay/BTCV' + label_list: [1,2,3,4,5,6,7,8] + label_names: ['Spleen', 'Right Kidney', 'Left Kidney', 'Gall Bladder', 'Liver', 'Stomach', 'Aorta', 'Pancreas'] + volume_channel: 2 + negative_to_positive_ratio: -1 diff --git a/AllinonSAM/config_chestxdet.yml b/AllinonSAM/config_chestxdet.yml new file mode 100644 index 0000000000000000000000000000000000000000..00ccc39e35b310e01ddf5172af564f1e433c160d --- /dev/null +++ b/AllinonSAM/config_chestxdet.yml @@ -0,0 +1,18 @@ +data_transforms: + a_min: 0 + a_max: 255 + img_size: 256 + use_random_crop: False + use_rotation: True + rotation_angle: 10 + use_saturation: False + saturation: 2 + use_brightness: True + brightness: 2 + use_horizontal_flip: False +data: + name: CHESTXDET + root_path: '/media/ubuntu/New Volume/jay/ChestXDet/train_data' + label_list: [1,2,3,4,5,6,7,8,9,10,11,12,13] + label_names: ['Effusion', 'Nodule', 'Cardiomegaly', 'Fibrosis', 'Consolidation', 'Emphysema', 'Mass', 'Fracture', 'Calcification', 'Pleural Thickening', 'Pneumothorax', 'Atelectasis', 'Diffuse Nodule'] + volume_channel: 2 diff --git a/AllinonSAM/config_cholec8k.yml b/AllinonSAM/config_cholec8k.yml new file mode 100644 index 0000000000000000000000000000000000000000..0027206b9d6251f9952a6efefc91a84f7e00c954 --- /dev/null +++ b/AllinonSAM/config_cholec8k.yml @@ -0,0 +1,19 @@ +data_transforms: + a_min: 0 + a_max: 255 + img_size: 256 + use_random_crop: False + use_rotation: True + rotation_angle: 10 + use_saturation: False + saturation: 2 + use_brightness: False + brightness: 2 + use_horizontal_flip: False +data: + name: CHOLEC 8K + root_path: '/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/archive' + label_list: [1,2,3,4,5,6,7,8,9,10,11,12] + label_names: ['Grasper', 'L Hook Electrocautery', 'Liver', 'Fat', 'Gall Bladder','Abdominal Wall','Gastrointestinal Tract','Cystic Duct','Blood','Hepatic Vein', 'Liver Ligament', 'Connective Tissue'] + volume_channel: 2 + negative_to_positive_ratio: -1 diff --git a/AllinonSAM/config_endovis.yml b/AllinonSAM/config_endovis.yml new file mode 100644 index 0000000000000000000000000000000000000000..f7bb7f91a99e8acdfb2afc6b787435c9bf6f0d05 --- /dev/null +++ b/AllinonSAM/config_endovis.yml @@ -0,0 +1,20 @@ +data_transforms: + a_min: 0 + a_max: 255 + img_size: 256 + use_random_crop: False + use_rotation: True + rotation_angle: 10 + use_saturation: True + saturation: 2 + use_brightness: True + brightness: 2 + use_horizontal_flip: False +data: + name: ENDOVIS + root_path: '/media/ubuntu/New Volume/jay/endovis17/all_training' + label_list: [1,2,3,4,5,6,7,8,9] + label_names: ['Left Prograsp Forceps', 'Maryland Bipolar Forceps', 'Right Prograsp Forceps', 'Left Large Needle Driver', 'Right Large Needle Driver', 'Left Grasping Retractor', 'Right Grasping Retractor', 'Vessel Sealer', 'Monopolar Curved Scissors'] + volume_channel: 2 + negative_to_positive_ratio: -1 + diff --git a/AllinonSAM/config_endovis18.yml b/AllinonSAM/config_endovis18.yml new file mode 100644 index 0000000000000000000000000000000000000000..c32a858d19912bd5c6f04160a59ec2dfef7c4cb9 --- /dev/null +++ b/AllinonSAM/config_endovis18.yml @@ -0,0 +1,19 @@ +data_transforms: + a_min: 0 + a_max: 255 + img_size: 256 + use_random_crop: False + use_rotation: True + rotation_angle: 10 + use_saturation: False + saturation: 2 + use_brightness: False + brightness: 2 + use_horizontal_flip: False +data: + name: ENDOVIS 18 + root_path: '/media/ubuntu/New Volume/jay/endovis18/train' + label_list: [1,2,3,4,5,6,7,8,9,10] + label_names: ['background tissue', 'surgical instrument', 'kidney parenchyma', 'covered kidney', 'thread', 'clamps', 'suturing needle', 'suction instrument', 'small intestine','ultrasound probe'] + volume_channel: 2 + negative_to_positive_ratio: -1 diff --git a/AllinonSAM/config_glas.yml b/AllinonSAM/config_glas.yml new file mode 100644 index 0000000000000000000000000000000000000000..1c0a89c1a284aef4571b0fb878ca27bae7a6b1bd --- /dev/null +++ b/AllinonSAM/config_glas.yml @@ -0,0 +1,21 @@ +data_transforms: + a_min: 0 + a_max: 255 + img_size: 1024 + use_random_crop: False + use_rotation: True + rotation_angle: 10 + use_saturation: True + saturation: 2 + use_brightness: True + brightness: 2 + use_horizontal_flip: True + use_cjitter: False + use_affine: False +data: + name: GLAS + root_path: '/media/ubuntu/New Volume/jay/GLAS/archive' + label_list: [1] + label_names: ['Glands'] + volume_channel: 2 + negative_to_positive_ratio: -1 diff --git a/AllinonSAM/config_idrid.yml b/AllinonSAM/config_idrid.yml new file mode 100644 index 0000000000000000000000000000000000000000..f3fe7284220679e3b973f9753f7f051a52c95c8a --- /dev/null +++ b/AllinonSAM/config_idrid.yml @@ -0,0 +1,18 @@ +data_transforms: + a_min: 0 + a_max: 255 + img_size: 600 + use_random_crop: True + use_rotation: False + rotation_angle: 10 + use_saturation: False + saturation: 2 + use_brightness: False + brightness: 2 + use_horizontal_flip: True +data: + name: IDRID + root_path: '/home/ubuntu/Desktop/Domain_Adaptation_Project/data/IDRID/Train' + label_list: [1,2,3,4,5] + label_names: ['Microaneurysms', 'Haemorrhages', 'Hard Exudates', 'Optic Disc', 'Soft Exudates'] + volume_channel: 2 diff --git a/AllinonSAM/config_isic18.yml b/AllinonSAM/config_isic18.yml new file mode 100644 index 0000000000000000000000000000000000000000..4192456763b643efc5b57e4c4d4d6b9b7ea0034e --- /dev/null +++ b/AllinonSAM/config_isic18.yml @@ -0,0 +1,19 @@ +data_transforms: + a_min: 0 + a_max: 255 + img_size: 256 + use_random_crop: False + use_rotation: True + rotation_angle: 10 + use_saturation: False + saturation: 2 + use_brightness: True + brightness: 2 + use_horizontal_flip: False +data: + name: ISIC2018 + root_path: '/media/ubuntu/New Volume/jay/ISIC2018' + label_list: [1] + label_names: ['Lesion'] + volume_channel: 2 + negative_to_positive_ratio: -1 diff --git a/AllinonSAM/config_kvasirseg.yml b/AllinonSAM/config_kvasirseg.yml new file mode 100644 index 0000000000000000000000000000000000000000..ecde7468f0bb4392986f22f292f3f5bf8dcbb0a0 --- /dev/null +++ b/AllinonSAM/config_kvasirseg.yml @@ -0,0 +1,19 @@ +data_transforms: + a_min: 0 + a_max: 255 + img_size: 256 + use_random_crop: False + use_rotation: True + rotation_angle: 10 + use_saturation: False + saturation: 2 + use_brightness: True + brightness: 2 + use_horizontal_flip: False +data: + name: KVASIRSEG + root_path: '/home/ubuntu/Desktop/Domain_Adaptation_Project/data/kvasir-seg/Kvasir-SEG' + label_list: [1] + label_names: ['Polyp'] + volume_channel: 2 + negative_to_positive_ratio: -1 diff --git a/AllinonSAM/config_lits.yml b/AllinonSAM/config_lits.yml new file mode 100644 index 0000000000000000000000000000000000000000..c73b06b5457508ca2e3d385d1732704e41acf418 --- /dev/null +++ b/AllinonSAM/config_lits.yml @@ -0,0 +1,21 @@ +data_transforms: + a_min: -1410 + a_max: 3024 + img_size: 400 + use_random_crop: True + use_rotation: True + rotation_angle: 10 + use_saturation: False + saturation: 2 + use_brightness: False + brightness: 2 + use_horizontal_flip: False +data: + name: LITS + root_path: '/media/ubuntu/New Volume/jay/LiTS' + label_list: [1,2] + label_names: ['liver', 'tumor'] + volume_channel: 2 + sampling_deviation: 5 + samples_per_slice: 2 + negative_to_positive_ratio: -1 diff --git a/AllinonSAM/config_lits2.yml b/AllinonSAM/config_lits2.yml new file mode 100644 index 0000000000000000000000000000000000000000..1ec9181bfc942f406e02054db1836136bd5115ef --- /dev/null +++ b/AllinonSAM/config_lits2.yml @@ -0,0 +1,19 @@ +data_transforms: + a_min: 0 + a_max: 255 + img_size: 256 + use_random_crop: False + use_rotation: True + rotation_angle: 10 + use_saturation: False + saturation: 2 + use_brightness: True + brightness: 2 + use_horizontal_flip: False +data: + name: LITS2 + root_path: '/media/ubuntu/New Volume/jay/LiTS2/archive' + label_list: [1,2] + label_names: ['Liver','Tumor'] + volume_channel: 2 + negative_to_positive_ratio: -1 diff --git a/AllinonSAM/config_polyp.yml b/AllinonSAM/config_polyp.yml new file mode 100644 index 0000000000000000000000000000000000000000..e593816ab256f807be6ba70f06e4e825b8854db9 --- /dev/null +++ b/AllinonSAM/config_polyp.yml @@ -0,0 +1,19 @@ +data_transforms: + a_min: 0 + a_max: 255 + img_size: 1024 + use_random_crop: False + use_rotation: True + rotation_angle: 25 + use_saturation: True + saturation: 2 + use_brightness: True + brightness: 2 + use_horizontal_flip: True +data: + name: Polyp + root_path: '/media/ubuntu/New Volume/jay/Polyp2' + label_list: [1] + label_names: ['Polyp'] + volume_channel: 2 + negative_to_positive_ratio: -1 diff --git a/AllinonSAM/config_refuge.yml b/AllinonSAM/config_refuge.yml new file mode 100644 index 0000000000000000000000000000000000000000..790627814e1e3571eaaa374075d7b4def3538527 --- /dev/null +++ b/AllinonSAM/config_refuge.yml @@ -0,0 +1,19 @@ +data_transforms: + a_min: 0 + a_max: 255 + img_size: 512 + use_random_crop: False + use_rotation: True + rotation_angle: 10 + use_saturation: False + saturation: 2 + use_brightness: True + brightness: 2 + use_horizontal_flip: True +data: + name: Refuge + root_path: '/media/ubuntu/New Volume/jay/fundus_images/archive/REFUGE' + label_list: [1,2] + label_names: ['optic cup', 'optic disk'] + volume_channel: 2 + negative_to_positive_ratio: -1 diff --git a/AllinonSAM/config_rite.yml b/AllinonSAM/config_rite.yml new file mode 100644 index 0000000000000000000000000000000000000000..e7f628395256276e4fd7de65c8916939e90919a6 --- /dev/null +++ b/AllinonSAM/config_rite.yml @@ -0,0 +1,19 @@ +data_transforms: + a_min: 0 + a_max: 255 + img_size: 256 + use_random_crop: False + use_rotation: True + rotation_angle: 10 + use_saturation: False + saturation: 2 + use_brightness: True + brightness: 2 + use_horizontal_flip: True +data: + name: RITE + root_path: '/media/ubuntu/New Volume/jay/RITE/archive' + label_list: [1] + label_names: ['Vessels'] + volume_channel: 2 + negative_to_positive_ratio: -1 diff --git a/AllinonSAM/config_tmp.yml b/AllinonSAM/config_tmp.yml new file mode 100644 index 0000000000000000000000000000000000000000..4cd2e712dba8fc46acf35e9b65013934c7a2fa25 --- /dev/null +++ b/AllinonSAM/config_tmp.yml @@ -0,0 +1,12 @@ +data_transforms: + a_min: -1410 + a_max: 3024 + img_size: 256 +data: + name: LITS + root_path: '/media/ubuntu/New Volume/jay/LiTS' + label_list: [1,2] + label_names: ['liver', 'tumor'] + volume_channel: 2 + sampling_deviation: 5 + samples_per_slice: 2 diff --git a/AllinonSAM/config_ultrasound.yml b/AllinonSAM/config_ultrasound.yml new file mode 100644 index 0000000000000000000000000000000000000000..fd50ac54c82d9da100485b8061271bbf9d642075 --- /dev/null +++ b/AllinonSAM/config_ultrasound.yml @@ -0,0 +1,18 @@ +data_transforms: + a_min: 0 + a_max: 255 + img_size: 256 + use_random_crop: False + use_rotation: True + rotation_angle: 10 + use_saturation: False + saturation: 2 + use_brightness: True + brightness: 2 + use_horizontal_flip: False +data: + name: ULTRASOUND + root_path: '/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/AUS' + label_list: [1,2,3,4,5,6,7,8] + label_names: ['Liver', 'Kidney', 'Pancreas', 'Vessels', 'Adrenals', 'Gall Bladder', 'Bones', 'Spleen'] + volume_channel: 2 diff --git a/AllinonSAM/data_transforms/ChestXDet_transform.py b/AllinonSAM/data_transforms/ChestXDet_transform.py new file mode 100644 index 0000000000000000000000000000000000000000..58d70a35ed50825eda89ddba5ad60d0f8716a5eb --- /dev/null +++ b/AllinonSAM/data_transforms/ChestXDet_transform.py @@ -0,0 +1,103 @@ +import random +import numpy as np +import torch +from torchvision import transforms +from torchvision.transforms import functional as F +from torch.nn.functional import pad + + +class ChestXDet_Transform(): + def __init__(self, config): + self.pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1,1,1) + self.pixel_std = torch.Tensor([53.395, 57.12, 57.375]).view(-1,1,1) + self.degree = config['data_transforms']['rotation_angle'] + self.saturation = config['data_transforms']['saturation'] + self.brightness = config['data_transforms']['brightness'] + self.img_size = config['data_transforms']['img_size'] + self.resize = transforms.Resize(self.img_size-1, max_size=self.img_size, antialias=True) + + self.data_transforms = config['data_transforms'] + + def __call__(self, img, mask, apply_norm=True, is_train=True): + if is_train: + #flip horizontally with some probability + if self.data_transforms['use_horizontal_flip']: + p = random.random() + if p<0.5: + img = F.hflip(img) + mask = F.hflip(mask) + + #rotate with p1 probability + if self.data_transforms['use_rotation']: + p = random.random() + if p<0.5: + deg = 1+random.choice(list(range(self.degree))) + img = F.rotate(img, angle = deg) + mask = F.rotate(mask, angle=deg) + + #adjust saturation with some probability + if self.data_transforms['use_saturation']: + p = random.random() + if p<0.2: + img = F.adjust_saturation(img, self.saturation) + + #adjust brightness with some probability + if self.data_transforms['use_brightness']: + p = random.random() + if p<0.5: + img = F.adjust_brightness(img, self.brightness*max(0.5,random.random())) + + #take random crops of img size X img_size such that label is non zero + if self.data_transforms['use_random_crop']: + fallback = 20 + fall_back_ctr = 0 + repeat_flag = True + while(repeat_flag): + fall_back_ctr += 1 + t = transforms.RandomCrop((self.img_size, self.img_size)) + i,j,h,w = t.get_params(img, (self.img_size, self.img_size)) + + #if mask is all zeros, exit the loop + if not mask.any(): + repeat_flag = False + + #fallback to avoid long loops + if fall_back_ctr >= fallback: + temp1, temp2, temp3 = np.where(mask!=0) + point_of_interest = random.choice(list(range(len(temp2)))) + i = temp2[point_of_interest] - (h//2) + j = temp3[point_of_interest] - (w//2) + repeat_flag = False + + cropped_img = F.crop(img, i, j, h, w) + cropped_mask = F.crop(mask, i, j, h, w) + if cropped_mask.any(): + repeat_flag = False + img = cropped_img + mask = cropped_mask + else: + #if no random crops then perform resizing + b_min = 0 + img = self.resize(img) + mask = self.resize(mask) + #pad if necessary + h, w = img.shape[-2:] + padh = self.img_size - h + padw = self.img_size - w + img = pad(img, (0, padw, 0, padh), value=b_min) + mask = pad(mask, (0, padw, 0, padh), value=b_min) + + + #apply centering based on SAM's expected mean and variance + if apply_norm: + b_min=0 + #scale intensities to 0-255 + b_min,b_max = 0, 255 + img = (img - self.data_transforms['a_min']) / (self.data_transforms['a_max'] - self.data_transforms['a_min']) + img = img * (b_max - b_min) + b_min + img = torch.clamp(img,b_min,b_max) + + #center around SAM's expected mean + img = (img - self.pixel_mean)/self.pixel_std + + return img, mask \ No newline at end of file diff --git a/AllinonSAM/data_transforms/__pycache__/ChestXDet_transform.cpython-312.pyc b/AllinonSAM/data_transforms/__pycache__/ChestXDet_transform.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..28ff78b24d869e046a1e9f62fb5fa3afe6441c69 Binary files /dev/null and b/AllinonSAM/data_transforms/__pycache__/ChestXDet_transform.cpython-312.pyc differ diff --git a/AllinonSAM/data_transforms/__pycache__/ChestXDet_transform.cpython-38.pyc b/AllinonSAM/data_transforms/__pycache__/ChestXDet_transform.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..95807e4824406391a0f50d9395c6d11b91eaa540 Binary files /dev/null and b/AllinonSAM/data_transforms/__pycache__/ChestXDet_transform.cpython-38.pyc differ diff --git a/AllinonSAM/data_transforms/__pycache__/atr_transform.cpython-312.pyc b/AllinonSAM/data_transforms/__pycache__/atr_transform.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..11d687343da67ef68825d45c293cbcb73667ee0d Binary files /dev/null and b/AllinonSAM/data_transforms/__pycache__/atr_transform.cpython-312.pyc differ diff --git a/AllinonSAM/data_transforms/__pycache__/atr_transform.cpython-38.pyc b/AllinonSAM/data_transforms/__pycache__/atr_transform.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26b0b5cad57933601cd20cec24584b3675fc2c7f Binary files /dev/null and b/AllinonSAM/data_transforms/__pycache__/atr_transform.cpython-38.pyc differ diff --git a/AllinonSAM/data_transforms/__pycache__/btcv_transform.cpython-312.pyc b/AllinonSAM/data_transforms/__pycache__/btcv_transform.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89b79ad248e78be823a92ebe1584575b8ad00fac Binary files /dev/null and b/AllinonSAM/data_transforms/__pycache__/btcv_transform.cpython-312.pyc differ diff --git a/AllinonSAM/data_transforms/__pycache__/btcv_transform.cpython-38.pyc b/AllinonSAM/data_transforms/__pycache__/btcv_transform.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d9298a068f313691a0cc9d5721bf638b43cc4142 Binary files /dev/null and b/AllinonSAM/data_transforms/__pycache__/btcv_transform.cpython-38.pyc differ diff --git a/AllinonSAM/data_transforms/__pycache__/cholec_8k_transform.cpython-312.pyc b/AllinonSAM/data_transforms/__pycache__/cholec_8k_transform.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7114524d610bfc4e206f68808d3b3322e885ae10 Binary files /dev/null and b/AllinonSAM/data_transforms/__pycache__/cholec_8k_transform.cpython-312.pyc differ diff --git a/AllinonSAM/data_transforms/__pycache__/cholec_8k_transform.cpython-38.pyc b/AllinonSAM/data_transforms/__pycache__/cholec_8k_transform.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e39e182871427542d2f9419a758c8d291382b9ec Binary files /dev/null and b/AllinonSAM/data_transforms/__pycache__/cholec_8k_transform.cpython-38.pyc differ diff --git a/AllinonSAM/data_transforms/__pycache__/endovis_18_transform.cpython-312.pyc b/AllinonSAM/data_transforms/__pycache__/endovis_18_transform.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4247eb0ec3e0acfa7d758b02bb7d53de05473bb1 Binary files /dev/null and b/AllinonSAM/data_transforms/__pycache__/endovis_18_transform.cpython-312.pyc differ diff --git a/AllinonSAM/data_transforms/__pycache__/endovis_18_transform.cpython-38.pyc b/AllinonSAM/data_transforms/__pycache__/endovis_18_transform.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..136d07d47d47c4d764d5040a9673d67467fb185f Binary files /dev/null and b/AllinonSAM/data_transforms/__pycache__/endovis_18_transform.cpython-38.pyc differ diff --git a/AllinonSAM/data_transforms/__pycache__/endovis_transform.cpython-312.pyc b/AllinonSAM/data_transforms/__pycache__/endovis_transform.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..82f1926d959fef12a993cd7122918fb815685785 Binary files /dev/null and b/AllinonSAM/data_transforms/__pycache__/endovis_transform.cpython-312.pyc differ diff --git a/AllinonSAM/data_transforms/__pycache__/endovis_transform.cpython-38.pyc b/AllinonSAM/data_transforms/__pycache__/endovis_transform.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7afb348f71910640f04112babb85e33179c39ed0 Binary files /dev/null and b/AllinonSAM/data_transforms/__pycache__/endovis_transform.cpython-38.pyc differ diff --git a/AllinonSAM/data_transforms/__pycache__/glas_transform.cpython-312.pyc b/AllinonSAM/data_transforms/__pycache__/glas_transform.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2152fd4d211c2ca46c6a8cba4338dbeb4c4627a9 Binary files /dev/null and b/AllinonSAM/data_transforms/__pycache__/glas_transform.cpython-312.pyc differ diff --git a/AllinonSAM/data_transforms/__pycache__/glas_transform.cpython-38.pyc b/AllinonSAM/data_transforms/__pycache__/glas_transform.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..755cef4e8698d7dc6d50bd9bfdb7d317f047f740 Binary files /dev/null and b/AllinonSAM/data_transforms/__pycache__/glas_transform.cpython-38.pyc differ diff --git a/AllinonSAM/data_transforms/__pycache__/isic2018_transform.cpython-312.pyc b/AllinonSAM/data_transforms/__pycache__/isic2018_transform.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef18026ad64d7ccac0ff6386a850a9422e4d4566 Binary files /dev/null and b/AllinonSAM/data_transforms/__pycache__/isic2018_transform.cpython-312.pyc differ diff --git a/AllinonSAM/data_transforms/__pycache__/isic2018_transform.cpython-38.pyc b/AllinonSAM/data_transforms/__pycache__/isic2018_transform.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93706c252941d3a4bca6edd7ced3a62e3ad1af93 Binary files /dev/null and b/AllinonSAM/data_transforms/__pycache__/isic2018_transform.cpython-38.pyc differ diff --git a/AllinonSAM/data_transforms/__pycache__/kvasirSeg_transform.cpython-312.pyc b/AllinonSAM/data_transforms/__pycache__/kvasirSeg_transform.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc3a3d92b45873ec5e25aaa7396f8b53f29ba452 Binary files /dev/null and b/AllinonSAM/data_transforms/__pycache__/kvasirSeg_transform.cpython-312.pyc differ diff --git a/AllinonSAM/data_transforms/__pycache__/kvasirSeg_transform.cpython-38.pyc b/AllinonSAM/data_transforms/__pycache__/kvasirSeg_transform.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f98a1147e92384194984c08a62e3a5ebd981f62f Binary files /dev/null and b/AllinonSAM/data_transforms/__pycache__/kvasirSeg_transform.cpython-38.pyc differ diff --git a/AllinonSAM/data_transforms/__pycache__/lits2_transform.cpython-312.pyc b/AllinonSAM/data_transforms/__pycache__/lits2_transform.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cdb0c0293efeb1042c0fe7447d0fb0445c93fb72 Binary files /dev/null and b/AllinonSAM/data_transforms/__pycache__/lits2_transform.cpython-312.pyc differ diff --git a/AllinonSAM/data_transforms/__pycache__/lits2_transform.cpython-38.pyc b/AllinonSAM/data_transforms/__pycache__/lits2_transform.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c38bd1458312441ac84955d627f5d61177a8772 Binary files /dev/null and b/AllinonSAM/data_transforms/__pycache__/lits2_transform.cpython-38.pyc differ diff --git a/AllinonSAM/data_transforms/__pycache__/polyp_transform.cpython-312.pyc b/AllinonSAM/data_transforms/__pycache__/polyp_transform.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c2032495fd3485e36325dc0d23de11142b965437 Binary files /dev/null and b/AllinonSAM/data_transforms/__pycache__/polyp_transform.cpython-312.pyc differ diff --git a/AllinonSAM/data_transforms/__pycache__/polyp_transform.cpython-38.pyc b/AllinonSAM/data_transforms/__pycache__/polyp_transform.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd1728251da19c68a73917775299de6554b2c6b6 Binary files /dev/null and b/AllinonSAM/data_transforms/__pycache__/polyp_transform.cpython-38.pyc differ diff --git a/AllinonSAM/data_transforms/__pycache__/refuge_transform.cpython-312.pyc b/AllinonSAM/data_transforms/__pycache__/refuge_transform.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a0667654b18d63f6c009229a4e43a220f7567d24 Binary files /dev/null and b/AllinonSAM/data_transforms/__pycache__/refuge_transform.cpython-312.pyc differ diff --git a/AllinonSAM/data_transforms/__pycache__/refuge_transform.cpython-38.pyc b/AllinonSAM/data_transforms/__pycache__/refuge_transform.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dbac5ec8853453efa53de8121b040e07d6943217 Binary files /dev/null and b/AllinonSAM/data_transforms/__pycache__/refuge_transform.cpython-38.pyc differ diff --git a/AllinonSAM/data_transforms/__pycache__/rite_transform.cpython-312.pyc b/AllinonSAM/data_transforms/__pycache__/rite_transform.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e9998449889c9328b3e397bcf0b109d7e0898617 Binary files /dev/null and b/AllinonSAM/data_transforms/__pycache__/rite_transform.cpython-312.pyc differ diff --git a/AllinonSAM/data_transforms/__pycache__/rite_transform.cpython-38.pyc b/AllinonSAM/data_transforms/__pycache__/rite_transform.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..13fda06e9d5f002940cc14309a2fcad4129bfb04 Binary files /dev/null and b/AllinonSAM/data_transforms/__pycache__/rite_transform.cpython-38.pyc differ diff --git a/AllinonSAM/data_transforms/__pycache__/ultrasound_transform.cpython-312.pyc b/AllinonSAM/data_transforms/__pycache__/ultrasound_transform.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67b3d213312f78ed53a072964754eab65a188b77 Binary files /dev/null and b/AllinonSAM/data_transforms/__pycache__/ultrasound_transform.cpython-312.pyc differ diff --git a/AllinonSAM/data_transforms/__pycache__/ultrasound_transform.cpython-38.pyc b/AllinonSAM/data_transforms/__pycache__/ultrasound_transform.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25ee449b4e0a29d16e7364561d1c55233c2cac1d Binary files /dev/null and b/AllinonSAM/data_transforms/__pycache__/ultrasound_transform.cpython-38.pyc differ diff --git a/AllinonSAM/data_transforms/atr_transform.py b/AllinonSAM/data_transforms/atr_transform.py new file mode 100644 index 0000000000000000000000000000000000000000..202a0136b89ba8b4122ef01242c230332ad7a01d --- /dev/null +++ b/AllinonSAM/data_transforms/atr_transform.py @@ -0,0 +1,124 @@ +import random +import numpy as np +import torch +from torchvision import transforms +from torchvision.transforms import functional as F +from torch.nn.functional import pad + + +class ATR_Transform(): + def __init__(self, config): + self.pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1,1,1) + self.pixel_std = torch.Tensor([53.395, 57.12, 57.375]).view(-1,1,1) + self.degree = config['data_transforms']['rotation_angle'] + self.saturation = config['data_transforms']['saturation'] + self.brightness = config['data_transforms']['brightness'] + self.img_size = config['data_transforms']['img_size'] + self.resize = transforms.Resize(self.img_size-1, max_size=self.img_size, antialias=True) + + self.data_transforms = config['data_transforms'] + + def __call__(self, img, mask, apply_norm=True, is_train=True): + if is_train: + #flip horizontally with some probability + if self.data_transforms['use_horizontal_flip']: + p = random.random() + if p<0.5: + img = F.hflip(img) + mask = F.hflip(mask) + + #rotate with p1 probability + if self.data_transforms['use_rotation']: + p = random.random() + if p<0.5: + deg = 1+random.choice(list(range(self.degree))) + img = F.rotate(img, angle = deg) + mask = F.rotate(mask, angle=deg) + + #adjust saturation with some probability + if self.data_transforms['use_saturation']: + p = random.random() + if p<0.2: + img = F.adjust_saturation(img, self.saturation) + + #adjust brightness with some probability + if self.data_transforms['use_brightness']: + p = random.random() + if p<0.5: + img = F.adjust_brightness(img, self.brightness*max(0.5,random.random())) + + #adjust color jitter with some probability + if self.data_transforms['use_cjitter']: + p = random.random() + if p<0.5: + brightness = random.uniform(0,0.2) + contrast = random.uniform(0,0.2) + saturation = random.uniform(0,0.2) + hue = random.uniform(0,0.1) + img = F.adjust_brightness(img, brightness_factor=brightness) + img = F.adjust_contrast(img, contrast_factor=contrast) + img = F.adjust_saturation(img, saturation_factor=saturation) + img = F.adjust_hue(img, hue_factor=hue) + + #affine transforms with some probability + if self.data_transforms['use_affine']: + p = random.random() + if p<0.5: + scale = random.uniform(0.9,1) + img = F.affine(img, translate=[5,5], scale=scale, angle=5, shear=0) + mask = F.affine(img, translate=[5,5], scale=scale, angle=5, shear=0) + + #take random crops of img size X img_size such that label is non zero + if self.data_transforms['use_random_crop']: + fallback = 20 + fall_back_ctr = 0 + repeat_flag = True + while(repeat_flag): + fall_back_ctr += 1 + t = transforms.RandomCrop((self.img_size, self.img_size)) + i,j,h,w = t.get_params(img, (self.img_size, self.img_size)) + + #if mask is all zeros, exit the loop + if not mask.any(): + repeat_flag = False + + #fallback to avoid long loops + if fall_back_ctr >= fallback: + temp1, temp2, temp3 = np.where(mask!=0) + point_of_interest = random.choice(list(range(len(temp2)))) + i = temp2[point_of_interest] - (h//2) + j = temp3[point_of_interest] - (w//2) + repeat_flag = False + + cropped_img = F.crop(img, i, j, h, w) + cropped_mask = F.crop(mask, i, j, h, w) + if cropped_mask.any(): + repeat_flag = False + img = cropped_img + mask = cropped_mask + else: + #if no random crops then perform resizing + b_min = 0 + img = self.resize(img) + mask = self.resize(mask) + #pad if necessary + h, w = img.shape[-2:] + padh = self.img_size - h + padw = self.img_size - w + img = pad(img, (0, padw, 0, padh), value=b_min) + mask = pad(mask, (0, padw, 0, padh), value=b_min) + + + #apply centering based on SAM's expected mean and variance + if apply_norm: + b_min=0 + #scale intensities to 0-255 + b_min,b_max = 0, 255 + img = (img - img.min()) / (img.max() - img.min()) + img = img * (b_max - b_min) + b_min + img = torch.clamp(img,b_min,b_max) + + #center around SAM's expected mean + img = (img - self.pixel_mean)/self.pixel_std + + return img, mask \ No newline at end of file diff --git a/AllinonSAM/data_transforms/btcv_transform.py b/AllinonSAM/data_transforms/btcv_transform.py new file mode 100644 index 0000000000000000000000000000000000000000..f845137fde8b531ab54053b04cbac56ed7b5be1f --- /dev/null +++ b/AllinonSAM/data_transforms/btcv_transform.py @@ -0,0 +1,103 @@ +import random +import numpy as np +import torch +from torchvision import transforms +from torchvision.transforms import functional as F +from torch.nn.functional import pad + + +class BTCV_Transform(): + def __init__(self, config): + self.pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1,1,1) + self.pixel_std = torch.Tensor([53.395, 57.12, 57.375]).view(-1,1,1) + self.degree = config['data_transforms']['rotation_angle'] + self.saturation = config['data_transforms']['saturation'] + self.brightness = config['data_transforms']['brightness'] + self.img_size = config['data_transforms']['img_size'] + self.resize = transforms.Resize(self.img_size-1, max_size=self.img_size, antialias=True) + + self.data_transforms = config['data_transforms'] + + def __call__(self, img, mask, apply_norm=True, is_train=True): + if is_train: + #flip horizontally with some probability + if self.data_transforms['use_horizontal_flip']: + p = random.random() + if p<0.5: + img = F.hflip(img) + mask = F.hflip(mask) + + #rotate with p1 probability + if self.data_transforms['use_rotation']: + p = random.random() + if p<0.5: + deg = 1+random.choice(list(range(self.degree))) + img = F.rotate(img, angle = deg) + mask = F.rotate(mask, angle=deg) + + #adjust saturation with some probability + if self.data_transforms['use_saturation']: + p = random.random() + if p<0.2: + img = F.adjust_saturation(img, self.saturation) + + #adjust brightness with some probability + if self.data_transforms['use_brightness']: + p = random.random() + if p<0.5: + img = F.adjust_brightness(img, self.brightness*max(0.5,random.random())) + + #take random crops of img size X img_size such that label is non zero + if self.data_transforms['use_random_crop']: + fallback = 20 + fall_back_ctr = 0 + repeat_flag = True + while(repeat_flag): + fall_back_ctr += 1 + t = transforms.RandomCrop((self.img_size, self.img_size)) + i,j,h,w = t.get_params(img, (self.img_size, self.img_size)) + + #if mask is all zeros, exit the loop + if not mask.any(): + repeat_flag = False + + #fallback to avoid long loops + if fall_back_ctr >= fallback: + temp1, temp2, temp3 = np.where(mask!=0) + point_of_interest = random.choice(list(range(len(temp2)))) + i = temp2[point_of_interest] - (h//2) + j = temp3[point_of_interest] - (w//2) + repeat_flag = False + + cropped_img = F.crop(img, i, j, h, w) + cropped_mask = F.crop(mask, i, j, h, w) + if cropped_mask.any(): + repeat_flag = False + img = cropped_img + mask = cropped_mask + else: + #if no random crops then perform resizing + b_min = 0 + img = self.resize(img) + mask = self.resize(mask) + #pad if necessary + h, w = img.shape[-2:] + padh = self.img_size - h + padw = self.img_size - w + img = pad(img, (0, padw, 0, padh), value=b_min) + mask = pad(mask, (0, padw, 0, padh), value=b_min) + + + #apply centering based on SAM's expected mean and variance + if apply_norm: + b_min=0 + #scale intensities to 0-255 + b_min,b_max = 0, 255 + img = (img - img.min()) / (img.max() - img.min()) + img = img * (b_max - b_min) + b_min + img = torch.clamp(img,b_min,b_max) + + #center around SAM's expected mean + img = (img - self.pixel_mean)/self.pixel_std + + return img, mask \ No newline at end of file diff --git a/AllinonSAM/data_transforms/cholec_8k_transform.py b/AllinonSAM/data_transforms/cholec_8k_transform.py new file mode 100644 index 0000000000000000000000000000000000000000..fa9d388e6fc80138353abc179f5ec4628aa7b9f9 --- /dev/null +++ b/AllinonSAM/data_transforms/cholec_8k_transform.py @@ -0,0 +1,103 @@ +import random +import numpy as np +import torch +from torchvision import transforms +from torchvision.transforms import functional as F +from torch.nn.functional import pad + + +class Cholec_8k_Transform(): + def __init__(self, config): + self.pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1,1,1) + self.pixel_std = torch.Tensor([53.395, 57.12, 57.375]).view(-1,1,1) + self.degree = config['data_transforms']['rotation_angle'] + self.saturation = config['data_transforms']['saturation'] + self.brightness = config['data_transforms']['brightness'] + self.img_size = config['data_transforms']['img_size'] + self.resize = transforms.Resize(self.img_size-1, max_size=self.img_size, antialias=True) + + self.data_transforms = config['data_transforms'] + + def __call__(self, img, mask, apply_norm=True, is_train=True): + if is_train: + #flip horizontally with some probability + if self.data_transforms['use_horizontal_flip']: + p = random.random() + if p<0.5: + img = F.hflip(img) + mask = F.hflip(mask) + + #rotate with p1 probability + if self.data_transforms['use_rotation']: + p = random.random() + if p<0.5: + deg = 1+random.choice(list(range(self.degree))) + img = F.rotate(img, angle = deg) + mask = F.rotate(mask, angle=deg) + + #adjust saturation with some probability + if self.data_transforms['use_saturation']: + p = random.random() + if p<0.2: + img = F.adjust_saturation(img, self.saturation) + + #adjust brightness with some probability + if self.data_transforms['use_brightness']: + p = random.random() + if p<0.5: + img = F.adjust_brightness(img, self.brightness*random.random()) + + #take random crops of img size X img_size such that label is non zero + if self.data_transforms['use_random_crop']: + fallback = 20 + fall_back_ctr = 0 + repeat_flag = True + while(repeat_flag): + fall_back_ctr += 1 + t = transforms.RandomCrop((self.img_size, self.img_size)) + i,j,h,w = t.get_params(img, (self.img_size, self.img_size)) + + #if mask is all zeros, exit the loop + if not mask.any(): + repeat_flag = False + + #fallback to avoid long loops + if fall_back_ctr >= fallback: + temp1, temp2, temp3 = np.where(mask!=0) + point_of_interest = random.choice(list(range(len(temp2)))) + i = temp2[point_of_interest] - (h//2) + j = temp3[point_of_interest] - (w//2) + repeat_flag = False + + cropped_img = F.crop(img, i, j, h, w) + cropped_mask = F.crop(mask, i, j, h, w) + if cropped_mask.any(): + repeat_flag = False + img = cropped_img + mask = cropped_mask + else: + #if no random crops then perform resizing + b_min = 0 + img = self.resize(img) + mask = self.resize(mask) + #pad if necessary + h, w = img.shape[-2:] + padh = self.img_size - h + padw = self.img_size - w + img = pad(img, (0, padw, 0, padh), value=b_min) + mask = pad(mask, (0, padw, 0, padh), value=b_min) + + + #apply centering based on SAM's expected mean and variance + if apply_norm: + b_min=0 + #scale intensities to 0-255 + b_min,b_max = 0, 255 + img = (img - self.data_transforms['a_min']) / (self.data_transforms['a_max'] - self.data_transforms['a_min']) + img = img * (b_max - b_min) + b_min + img = torch.clamp(img,b_min,b_max) + + #center around SAM's expected mean + img = (img - self.pixel_mean)/self.pixel_std + + return img, mask \ No newline at end of file diff --git a/AllinonSAM/data_transforms/endovis_18_transform.py b/AllinonSAM/data_transforms/endovis_18_transform.py new file mode 100644 index 0000000000000000000000000000000000000000..f8c103535fa52077b0c96e1978402a6a2bf33571 --- /dev/null +++ b/AllinonSAM/data_transforms/endovis_18_transform.py @@ -0,0 +1,103 @@ +import random +import numpy as np +import torch +from torchvision import transforms +from torchvision.transforms import functional as F +from torch.nn.functional import pad + + +class ENDOVIS_18_Transform(): + def __init__(self, config): + self.pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1,1,1) + self.pixel_std = torch.Tensor([53.395, 57.12, 57.375]).view(-1,1,1) + self.degree = config['data_transforms']['rotation_angle'] + self.saturation = config['data_transforms']['saturation'] + self.brightness = config['data_transforms']['brightness'] + self.img_size = config['data_transforms']['img_size'] + self.resize = transforms.Resize(self.img_size-1, max_size=self.img_size, antialias=True) + + self.data_transforms = config['data_transforms'] + + def __call__(self, img, mask, apply_norm=True, is_train=True): + if is_train: + #flip horizontally with some probability + if self.data_transforms['use_horizontal_flip']: + p = random.random() + if p<0.5: + img = F.hflip(img) + mask = F.hflip(mask) + + #rotate with p1 probability + if self.data_transforms['use_rotation']: + p = random.random() + if p<0.5: + deg = 1+random.choice(list(range(self.degree))) + img = F.rotate(img, angle = deg) + mask = F.rotate(mask, angle=deg) + + #adjust saturation with some probability + if self.data_transforms['use_saturation']: + p = random.random() + if p<0.2: + img = F.adjust_saturation(img, self.saturation) + + #adjust brightness with some probability + if self.data_transforms['use_brightness']: + p = random.random() + if p<0.5: + img = F.adjust_brightness(img, self.brightness*random.random()) + + #take random crops of img size X img_size such that label is non zero + if self.data_transforms['use_random_crop']: + fallback = 20 + fall_back_ctr = 0 + repeat_flag = True + while(repeat_flag): + fall_back_ctr += 1 + t = transforms.RandomCrop((self.img_size, self.img_size)) + i,j,h,w = t.get_params(img, (self.img_size, self.img_size)) + + #if mask is all zeros, exit the loop + if not mask.any(): + repeat_flag = False + + #fallback to avoid long loops + if fall_back_ctr >= fallback: + temp1, temp2, temp3 = np.where(mask!=0) + point_of_interest = random.choice(list(range(len(temp2)))) + i = temp2[point_of_interest] - (h//2) + j = temp3[point_of_interest] - (w//2) + repeat_flag = False + + cropped_img = F.crop(img, i, j, h, w) + cropped_mask = F.crop(mask, i, j, h, w) + if cropped_mask.any(): + repeat_flag = False + img = cropped_img + mask = cropped_mask + else: + #if no random crops then perform resizing + b_min = 0 + img = self.resize(img) + mask = self.resize(mask) + #pad if necessary + h, w = img.shape[-2:] + padh = self.img_size - h + padw = self.img_size - w + img = pad(img, (0, padw, 0, padh), value=b_min) + mask = pad(mask, (0, padw, 0, padh), value=b_min) + + + #apply centering based on SAM's expected mean and variance + if apply_norm: + b_min=0 + #scale intensities to 0-255 + b_min,b_max = 0, 255 + img = (img - self.data_transforms['a_min']) / (self.data_transforms['a_max'] - self.data_transforms['a_min']) + img = img * (b_max - b_min) + b_min + img = torch.clamp(img,b_min,b_max) + + #center around SAM's expected mean + img = (img - self.pixel_mean)/self.pixel_std + + return img, mask \ No newline at end of file diff --git a/AllinonSAM/data_transforms/endovis_transform.py b/AllinonSAM/data_transforms/endovis_transform.py new file mode 100644 index 0000000000000000000000000000000000000000..b537559264576f9e95df063e9938c57928503b7e --- /dev/null +++ b/AllinonSAM/data_transforms/endovis_transform.py @@ -0,0 +1,117 @@ +import random +import numpy as np +import torch +from torchvision import transforms +from torchvision.transforms import functional as F +from torch.nn.functional import pad + +''' +{ + "Bipolar Forceps": 1, + "Prograsp Forceps": 2, + "Large Needle Driver": 3, + "Vessel Sealer": 4, + "Grasping Retractor": 5, + "Monopolar Curved Scissors": 6, + "Other": 7 +} +''' + +class ENDOVIS_Transform(): + def __init__(self, config): + self.pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1,1,1) + self.pixel_std = torch.Tensor([53.395, 57.12, 57.375]).view(-1,1,1) + self.degree = config['data_transforms']['rotation_angle'] + self.saturation = config['data_transforms']['saturation'] + self.brightness = config['data_transforms']['brightness'] + self.img_size = config['data_transforms']['img_size'] + self.resize = transforms.Resize(self.img_size-1, max_size=self.img_size, antialias=True) + + self.data_transforms = config['data_transforms'] + + def __call__(self, img, mask, apply_norm=True, is_train=True): + #crop the image so that only the main arrea is in consideration + img = img[:, 60:-60,350:-350] + mask = mask[:, 60:-60,350:-350] + if is_train: + #flip horizontally with some probability + if self.data_transforms['use_horizontal_flip']: + p = random.random() + if p<0.5: + img = F.hflip(img) + mask = F.hflip(mask) + + #rotate with p1 probability + if self.data_transforms['use_rotation']: + p = random.random() + if p<0.5: + deg = 1+random.choice(list(range(self.degree))) + img = F.rotate(img, angle = deg) + mask = F.rotate(mask, angle=deg) + + #adjust saturation with some probability + if self.data_transforms['use_saturation']: + p = random.random() + if p<0.2: + img = F.adjust_saturation(img, self.saturation) + + #adjust brightness with some probability + if self.data_transforms['use_brightness']: + p = random.random() + if p<0.5: + img = F.adjust_brightness(img, self.brightness*random.random()) + + #take random crops of img size X img_size such that label is non zero + if self.data_transforms['use_random_crop']: + fallback = 20 + fall_back_ctr = 0 + repeat_flag = True + while(repeat_flag): + fall_back_ctr += 1 + t = transforms.RandomCrop((self.img_size, self.img_size)) + i,j,h,w = t.get_params(img, (self.img_size, self.img_size)) + + #if mask is all zeros, exit the loop + if not mask.any(): + repeat_flag = False + + #fallback to avoid long loops + if fall_back_ctr >= fallback: + temp1, temp2, temp3 = np.where(mask!=0) + point_of_interest = random.choice(list(range(len(temp2)))) + i = temp2[point_of_interest] - (h//2) + j = temp3[point_of_interest] - (w//2) + repeat_flag = False + + cropped_img = F.crop(img, i, j, h, w) + cropped_mask = F.crop(mask, i, j, h, w) + if cropped_mask.any(): + repeat_flag = False + img = cropped_img + mask = cropped_mask + else: + #if no random crops then perform resizing + b_min = 0 + img = self.resize(img) + mask = self.resize(mask) + #pad if necessary + h, w = img.shape[-2:] + padh = self.img_size - h + padw = self.img_size - w + img = pad(img, (0, padw, 0, padh), value=b_min) + mask = pad(mask, (0, padw, 0, padh), value=b_min) + + + #apply centering based on SAM's expected mean and variance + if apply_norm: + b_min=0 + #scale intensities to 0-255 + b_min,b_max = 0, 255 + img = (img - self.data_transforms['a_min']) / (self.data_transforms['a_max'] - self.data_transforms['a_min']) + img = img * (b_max - b_min) + b_min + img = torch.clamp(img,b_min,b_max) + + #center around SAM's expected mean + img = (img - self.pixel_mean)/self.pixel_std + + return img, mask \ No newline at end of file diff --git a/AllinonSAM/data_transforms/glas_transform.py b/AllinonSAM/data_transforms/glas_transform.py new file mode 100644 index 0000000000000000000000000000000000000000..baacd835ff91df38a30cb129affde3de04cc6891 --- /dev/null +++ b/AllinonSAM/data_transforms/glas_transform.py @@ -0,0 +1,124 @@ +import random +import numpy as np +import torch +from torchvision import transforms +from torchvision.transforms import functional as F +from torch.nn.functional import pad + + +class GLAS_Transform(): + def __init__(self, config): + self.pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1,1,1) + self.pixel_std = torch.Tensor([53.395, 57.12, 57.375]).view(-1,1,1) + self.degree = config['data_transforms']['rotation_angle'] + self.saturation = config['data_transforms']['saturation'] + self.brightness = config['data_transforms']['brightness'] + self.img_size = config['data_transforms']['img_size'] + self.resize = transforms.Resize(self.img_size-1, max_size=self.img_size, antialias=True) + + self.data_transforms = config['data_transforms'] + + def __call__(self, img, mask, apply_norm=True, is_train=True): + if is_train: + #flip horizontally with some probability + if self.data_transforms['use_horizontal_flip']: + p = random.random() + if p<0.5: + img = F.hflip(img) + mask = F.hflip(mask) + + #rotate with p1 probability + if self.data_transforms['use_rotation']: + p = random.random() + if p<0.5: + deg = 1+random.choice(list(range(self.degree))) + img = F.rotate(img, angle = deg) + mask = F.rotate(mask, angle=deg) + + #adjust saturation with some probability + if self.data_transforms['use_saturation']: + p = random.random() + if p<0.2: + img = F.adjust_saturation(img, self.saturation) + + #adjust brightness with some probability + if self.data_transforms['use_brightness']: + p = random.random() + if p<0.5: + img = F.adjust_brightness(img, self.brightness*max(0.5,random.random())) + + #adjust color jitter with some probability + if self.data_transforms['use_cjitter']: + p = random.random() + if p<0.5: + brightness = random.uniform(0,0.2) + contrast = random.uniform(0,0.2) + saturation = random.uniform(0,0.2) + hue = random.uniform(0,0.1) + img = F.adjust_brightness(img, brightness_factor=brightness) + img = F.adjust_contrast(img, contrast_factor=contrast) + img = F.adjust_saturation(img, saturation_factor=saturation) + img = F.adjust_hue(img, hue_factor=hue) + + #affine transforms with some probability + if self.data_transforms['use_affine']: + p = random.random() + if p<0.5: + scale = random.uniform(0.9,1) + img = F.affine(img, translate=[5,5], scale=scale, angle=5, shear=0) + mask = F.affine(img, translate=[5,5], scale=scale, angle=5, shear=0) + + #take random crops of img size X img_size such that label is non zero + if self.data_transforms['use_random_crop']: + fallback = 20 + fall_back_ctr = 0 + repeat_flag = True + while(repeat_flag): + fall_back_ctr += 1 + t = transforms.RandomCrop((self.img_size, self.img_size)) + i,j,h,w = t.get_params(img, (self.img_size, self.img_size)) + + #if mask is all zeros, exit the loop + if not mask.any(): + repeat_flag = False + + #fallback to avoid long loops + if fall_back_ctr >= fallback: + temp1, temp2, temp3 = np.where(mask!=0) + point_of_interest = random.choice(list(range(len(temp2)))) + i = temp2[point_of_interest] - (h//2) + j = temp3[point_of_interest] - (w//2) + repeat_flag = False + + cropped_img = F.crop(img, i, j, h, w) + cropped_mask = F.crop(mask, i, j, h, w) + if cropped_mask.any(): + repeat_flag = False + img = cropped_img + mask = cropped_mask + else: + #if no random crops then perform resizing + b_min = 0 + img = self.resize(img) + mask = self.resize(mask) + #pad if necessary + h, w = img.shape[-2:] + padh = self.img_size - h + padw = self.img_size - w + img = pad(img, (0, padw, 0, padh), value=b_min) + mask = pad(mask, (0, padw, 0, padh), value=b_min) + + + #apply centering based on SAM's expected mean and variance + if apply_norm: + b_min=0 + #scale intensities to 0-255 + b_min,b_max = 0, 255 + img = (img - img.min()) / (img.max() - img.min()) + img = img * (b_max - b_min) + b_min + img = torch.clamp(img,b_min,b_max) + + #center around SAM's expected mean + img = (img - self.pixel_mean)/self.pixel_std + + return img, mask \ No newline at end of file diff --git a/AllinonSAM/data_transforms/isic2018_transform.py b/AllinonSAM/data_transforms/isic2018_transform.py new file mode 100644 index 0000000000000000000000000000000000000000..c010239a87e3ec1ae85a01f719f9fa9e16435203 --- /dev/null +++ b/AllinonSAM/data_transforms/isic2018_transform.py @@ -0,0 +1,103 @@ +import random +import numpy as np +import torch +from torchvision import transforms +from torchvision.transforms import functional as F +from torch.nn.functional import pad + + +class ISIC_Transform(): + def __init__(self, config): + self.pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1,1,1) + self.pixel_std = torch.Tensor([53.395, 57.12, 57.375]).view(-1,1,1) + self.degree = config['data_transforms']['rotation_angle'] + self.saturation = config['data_transforms']['saturation'] + self.brightness = config['data_transforms']['brightness'] + self.img_size = config['data_transforms']['img_size'] + self.resize = transforms.Resize(self.img_size-1, max_size=self.img_size, antialias=True) + + self.data_transforms = config['data_transforms'] + + def __call__(self, img, mask, apply_norm=True, is_train=True): + if is_train: + #flip horizontally with some probability + if self.data_transforms['use_horizontal_flip']: + p = random.random() + if p<0.5: + img = F.hflip(img) + mask = F.hflip(mask) + + #rotate with p1 probability + if self.data_transforms['use_rotation']: + p = random.random() + if p<0.5: + deg = 1+random.choice(list(range(self.degree))) + img = F.rotate(img, angle = deg) + mask = F.rotate(mask, angle=deg) + + #adjust saturation with some probability + if self.data_transforms['use_saturation']: + p = random.random() + if p<0.2: + img = F.adjust_saturation(img, self.saturation) + + #adjust brightness with some probability + if self.data_transforms['use_brightness']: + p = random.random() + if p<0.5: + img = F.adjust_brightness(img, self.brightness*max(0.5,random.random())) + + #take random crops of img size X img_size such that label is non zero + if self.data_transforms['use_random_crop']: + fallback = 20 + fall_back_ctr = 0 + repeat_flag = True + while(repeat_flag): + fall_back_ctr += 1 + t = transforms.RandomCrop((self.img_size, self.img_size)) + i,j,h,w = t.get_params(img, (self.img_size, self.img_size)) + + #if mask is all zeros, exit the loop + if not mask.any(): + repeat_flag = False + + #fallback to avoid long loops + if fall_back_ctr >= fallback: + temp1, temp2, temp3 = np.where(mask!=0) + point_of_interest = random.choice(list(range(len(temp2)))) + i = temp2[point_of_interest] - (h//2) + j = temp3[point_of_interest] - (w//2) + repeat_flag = False + + cropped_img = F.crop(img, i, j, h, w) + cropped_mask = F.crop(mask, i, j, h, w) + if cropped_mask.any(): + repeat_flag = False + img = cropped_img + mask = cropped_mask + else: + #if no random crops then perform resizing + b_min = 0 + img = self.resize(img) + mask = self.resize(mask) + #pad if necessary + h, w = img.shape[-2:] + padh = self.img_size - h + padw = self.img_size - w + img = pad(img, (0, padw, 0, padh), value=b_min) + mask = pad(mask, (0, padw, 0, padh), value=b_min) + + + #apply centering based on SAM's expected mean and variance + if apply_norm: + b_min=0 + #scale intensities to 0-255 + b_min,b_max = 0, 255 + img = (img - self.data_transforms['a_min']) / (self.data_transforms['a_max'] - self.data_transforms['a_min']) + img = img * (b_max - b_min) + b_min + img = torch.clamp(img,b_min,b_max) + + #center around SAM's expected mean + img = (img - self.pixel_mean)/self.pixel_std + + return img, mask \ No newline at end of file diff --git a/AllinonSAM/data_transforms/kvasirSeg_transform.py b/AllinonSAM/data_transforms/kvasirSeg_transform.py new file mode 100644 index 0000000000000000000000000000000000000000..c3a30ef31550a707e55f6022a4cb9a1f8e4fcfa2 --- /dev/null +++ b/AllinonSAM/data_transforms/kvasirSeg_transform.py @@ -0,0 +1,103 @@ +import random +import numpy as np +import torch +from torchvision import transforms +from torchvision.transforms import functional as F +from torch.nn.functional import pad + + +class kvasirSeg_Transform(): + def __init__(self, config): + self.pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1,1,1) + self.pixel_std = torch.Tensor([53.395, 57.12, 57.375]).view(-1,1,1) + self.degree = config['data_transforms']['rotation_angle'] + self.saturation = config['data_transforms']['saturation'] + self.brightness = config['data_transforms']['brightness'] + self.img_size = config['data_transforms']['img_size'] + self.resize = transforms.Resize(self.img_size-1, max_size=self.img_size, antialias=True) + + self.data_transforms = config['data_transforms'] + + def __call__(self, img, mask, apply_norm=True, is_train=True): + if is_train: + #flip horizontally with some probability + if self.data_transforms['use_horizontal_flip']: + p = random.random() + if p<0.5: + img = F.hflip(img) + mask = F.hflip(mask) + + #rotate with p1 probability + if self.data_transforms['use_rotation']: + p = random.random() + if p<0.5: + deg = 1+random.choice(list(range(self.degree))) + img = F.rotate(img, angle = deg) + mask = F.rotate(mask, angle=deg) + + #adjust saturation with some probability + if self.data_transforms['use_saturation']: + p = random.random() + if p<0.2: + img = F.adjust_saturation(img, self.saturation) + + #adjust brightness with some probability + if self.data_transforms['use_brightness']: + p = random.random() + if p<0.5: + img = F.adjust_brightness(img, self.brightness*max(0.5,random.random())) + + #take random crops of img size X img_size such that label is non zero + if self.data_transforms['use_random_crop']: + fallback = 20 + fall_back_ctr = 0 + repeat_flag = True + while(repeat_flag): + fall_back_ctr += 1 + t = transforms.RandomCrop((self.img_size, self.img_size)) + i,j,h,w = t.get_params(img, (self.img_size, self.img_size)) + + #if mask is all zeros, exit the loop + if not mask.any(): + repeat_flag = False + + #fallback to avoid long loops + if fall_back_ctr >= fallback: + temp1, temp2, temp3 = np.where(mask!=0) + point_of_interest = random.choice(list(range(len(temp2)))) + i = temp2[point_of_interest] - (h//2) + j = temp3[point_of_interest] - (w//2) + repeat_flag = False + + cropped_img = F.crop(img, i, j, h, w) + cropped_mask = F.crop(mask, i, j, h, w) + if cropped_mask.any(): + repeat_flag = False + img = cropped_img + mask = cropped_mask + else: + #if no random crops then perform resizing + b_min = 0 + img = self.resize(img) + mask = self.resize(mask) + #pad if necessary + h, w = img.shape[-2:] + padh = self.img_size - h + padw = self.img_size - w + img = pad(img, (0, padw, 0, padh), value=b_min) + mask = pad(mask, (0, padw, 0, padh), value=b_min) + + + #apply centering based on SAM's expected mean and variance + if apply_norm: + b_min=0 + #scale intensities to 0-255 + b_min,b_max = 0, 255 + img = (img - self.data_transforms['a_min']) / (self.data_transforms['a_max'] - self.data_transforms['a_min']) + img = img * (b_max - b_min) + b_min + img = torch.clamp(img,b_min,b_max) + + #center around SAM's expected mean + img = (img - self.pixel_mean)/self.pixel_std + + return img, mask \ No newline at end of file diff --git a/AllinonSAM/data_transforms/lits2_transform.py b/AllinonSAM/data_transforms/lits2_transform.py new file mode 100644 index 0000000000000000000000000000000000000000..beac4fd2a6f5636f1d74802a4e58c46a1f8d90c3 --- /dev/null +++ b/AllinonSAM/data_transforms/lits2_transform.py @@ -0,0 +1,103 @@ +import random +import numpy as np +import torch +from torchvision import transforms +from torchvision.transforms import functional as F +from torch.nn.functional import pad + + +class LiTS2_Transform(): + def __init__(self, config): + self.pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1,1,1) + self.pixel_std = torch.Tensor([53.395, 57.12, 57.375]).view(-1,1,1) + self.degree = config['data_transforms']['rotation_angle'] + self.saturation = config['data_transforms']['saturation'] + self.brightness = config['data_transforms']['brightness'] + self.img_size = config['data_transforms']['img_size'] + self.resize = transforms.Resize(self.img_size-1, max_size=self.img_size, antialias=True) + + self.data_transforms = config['data_transforms'] + + def __call__(self, img, mask, apply_norm=True, is_train=True): + if is_train: + #flip horizontally with some probability + if self.data_transforms['use_horizontal_flip']: + p = random.random() + if p<0.5: + img = F.hflip(img) + mask = F.hflip(mask) + + #rotate with p1 probability + if self.data_transforms['use_rotation']: + p = random.random() + if p<0.5: + deg = 1+random.choice(list(range(self.degree))) + img = F.rotate(img, angle = deg) + mask = F.rotate(mask, angle=deg) + + #adjust saturation with some probability + if self.data_transforms['use_saturation']: + p = random.random() + if p<0.2: + img = F.adjust_saturation(img, self.saturation) + + #adjust brightness with some probability + if self.data_transforms['use_brightness']: + p = random.random() + if p<0.5: + img = F.adjust_brightness(img, self.brightness*max(0.5,random.random())) + + #take random crops of img size X img_size such that label is non zero + if self.data_transforms['use_random_crop']: + fallback = 20 + fall_back_ctr = 0 + repeat_flag = True + while(repeat_flag): + fall_back_ctr += 1 + t = transforms.RandomCrop((self.img_size, self.img_size)) + i,j,h,w = t.get_params(img, (self.img_size, self.img_size)) + + #if mask is all zeros, exit the loop + if not mask.any(): + repeat_flag = False + + #fallback to avoid long loops + if fall_back_ctr >= fallback: + temp1, temp2, temp3 = np.where(mask!=0) + point_of_interest = random.choice(list(range(len(temp2)))) + i = temp2[point_of_interest] - (h//2) + j = temp3[point_of_interest] - (w//2) + repeat_flag = False + + cropped_img = F.crop(img, i, j, h, w) + cropped_mask = F.crop(mask, i, j, h, w) + if cropped_mask.any(): + repeat_flag = False + img = cropped_img + mask = cropped_mask + else: + #if no random crops then perform resizing + b_min = 0 + img = self.resize(img) + mask = self.resize(mask) + #pad if necessary + h, w = img.shape[-2:] + padh = self.img_size - h + padw = self.img_size - w + img = pad(img, (0, padw, 0, padh), value=b_min) + mask = pad(mask, (0, padw, 0, padh), value=b_min) + + + #apply centering based on SAM's expected mean and variance + if apply_norm: + b_min=0 + #scale intensities to 0-255 + b_min,b_max = 0, 255 + img = (img - self.data_transforms['a_min']) / (self.data_transforms['a_max'] - self.data_transforms['a_min']) + img = img * (b_max - b_min) + b_min + img = torch.clamp(img,b_min,b_max) + + #center around SAM's expected mean + img = (img - self.pixel_mean)/self.pixel_std + + return img, mask \ No newline at end of file diff --git a/AllinonSAM/data_transforms/polyp_transform.py b/AllinonSAM/data_transforms/polyp_transform.py new file mode 100644 index 0000000000000000000000000000000000000000..42d034e746902be2ffe50b951a0f8f1511a4905c --- /dev/null +++ b/AllinonSAM/data_transforms/polyp_transform.py @@ -0,0 +1,103 @@ +import random +import numpy as np +import torch +from torchvision import transforms +from torchvision.transforms import functional as F +from torch.nn.functional import pad + + +class Polyp_Transform(): + def __init__(self, config): + self.pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1,1,1) + self.pixel_std = torch.Tensor([53.395, 57.12, 57.375]).view(-1,1,1) + self.degree = config['data_transforms']['rotation_angle'] + self.saturation = config['data_transforms']['saturation'] + self.brightness = config['data_transforms']['brightness'] + self.img_size = config['data_transforms']['img_size'] + self.resize = transforms.Resize(self.img_size-1, max_size=self.img_size, antialias=True) + + self.data_transforms = config['data_transforms'] + + def __call__(self, img, mask, apply_norm=True, is_train=True): + if is_train: + #flip horizontally with some probability + if self.data_transforms['use_horizontal_flip']: + p = random.random() + if p<0.5: + img = F.hflip(img) + mask = F.hflip(mask) + + #rotate with p1 probability + if self.data_transforms['use_rotation']: + p = random.random() + if p<0.5: + deg = 1+random.choice(list(range(self.degree))) + img = F.rotate(img, angle = deg) + mask = F.rotate(mask, angle=deg) + + #adjust saturation with some probability + if self.data_transforms['use_saturation']: + p = random.random() + if p<0.2: + img = F.adjust_saturation(img, self.saturation) + + #adjust brightness with some probability + if self.data_transforms['use_brightness']: + p = random.random() + if p<0.5: + img = F.adjust_brightness(img, self.brightness*max(0.5,random.random())) + + #take random crops of img size X img_size such that label is non zero + if self.data_transforms['use_random_crop']: + fallback = 20 + fall_back_ctr = 0 + repeat_flag = True + while(repeat_flag): + fall_back_ctr += 1 + t = transforms.RandomCrop((self.img_size, self.img_size)) + i,j,h,w = t.get_params(img, (self.img_size, self.img_size)) + + #if mask is all zeros, exit the loop + if not mask.any(): + repeat_flag = False + + #fallback to avoid long loops + if fall_back_ctr >= fallback: + temp1, temp2, temp3 = np.where(mask!=0) + point_of_interest = random.choice(list(range(len(temp2)))) + i = temp2[point_of_interest] - (h//2) + j = temp3[point_of_interest] - (w//2) + repeat_flag = False + + cropped_img = F.crop(img, i, j, h, w) + cropped_mask = F.crop(mask, i, j, h, w) + if cropped_mask.any(): + repeat_flag = False + img = cropped_img + mask = cropped_mask + else: + #if no random crops then perform resizing + b_min = 0 + img = self.resize(img) + mask = self.resize(mask) + #pad if necessary + h, w = img.shape[-2:] + padh = self.img_size - h + padw = self.img_size - w + img = pad(img, (0, padw, 0, padh), value=b_min) + mask = pad(mask, (0, padw, 0, padh), value=b_min) + + + #apply centering based on SAM's expected mean and variance + if apply_norm: + b_min=0 + #scale intensities to 0-255 + b_min,b_max = 0, 255 + img = (img - self.data_transforms['a_min']) / (self.data_transforms['a_max'] - self.data_transforms['a_min']) + img = img * (b_max - b_min) + b_min + img = torch.clamp(img,b_min,b_max) + + #center around SAM's expected mean + img = (img - self.pixel_mean)/self.pixel_std + + return img, mask \ No newline at end of file diff --git a/AllinonSAM/data_transforms/refuge_transform.py b/AllinonSAM/data_transforms/refuge_transform.py new file mode 100644 index 0000000000000000000000000000000000000000..d3459947f916861e63175fab5b2dd62b7e036d1f --- /dev/null +++ b/AllinonSAM/data_transforms/refuge_transform.py @@ -0,0 +1,103 @@ +import random +import numpy as np +import torch +from torchvision import transforms +from torchvision.transforms import functional as F +from torch.nn.functional import pad + + +class Refuge_Transform(): + def __init__(self, config): + self.pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1,1,1) + self.pixel_std = torch.Tensor([53.395, 57.12, 57.375]).view(-1,1,1) + self.degree = config['data_transforms']['rotation_angle'] + self.saturation = config['data_transforms']['saturation'] + self.brightness = config['data_transforms']['brightness'] + self.img_size = config['data_transforms']['img_size'] + self.resize = transforms.Resize(self.img_size-1, max_size=self.img_size, antialias=True) + + self.data_transforms = config['data_transforms'] + + def __call__(self, img, mask, apply_norm=True, is_train=True): + if is_train: + #flip horizontally with some probability + if self.data_transforms['use_horizontal_flip']: + p = random.random() + if p<0.5: + img = F.hflip(img) + mask = F.hflip(mask) + + #rotate with p1 probability + if self.data_transforms['use_rotation']: + p = random.random() + if p<0.5: + deg = 1+random.choice(list(range(self.degree))) + img = F.rotate(img, angle = deg) + mask = F.rotate(mask, angle=deg) + + #adjust saturation with some probability + if self.data_transforms['use_saturation']: + p = random.random() + if p<0.2: + img = F.adjust_saturation(img, self.saturation) + + #adjust brightness with some probability + if self.data_transforms['use_brightness']: + p = random.random() + if p<0.5: + img = F.adjust_brightness(img, self.brightness*max(0.5,random.random())) + + #take random crops of img size X img_size such that label is non zero + if self.data_transforms['use_random_crop']: + fallback = 20 + fall_back_ctr = 0 + repeat_flag = True + while(repeat_flag): + fall_back_ctr += 1 + t = transforms.RandomCrop((self.img_size, self.img_size)) + i,j,h,w = t.get_params(img, (self.img_size, self.img_size)) + + #if mask is all zeros, exit the loop + if not mask.any(): + repeat_flag = False + + #fallback to avoid long loops + if fall_back_ctr >= fallback: + temp1, temp2, temp3 = np.where(mask!=0) + point_of_interest = random.choice(list(range(len(temp2)))) + i = temp2[point_of_interest] - (h//2) + j = temp3[point_of_interest] - (w//2) + repeat_flag = False + + cropped_img = F.crop(img, i, j, h, w) + cropped_mask = F.crop(mask, i, j, h, w) + if cropped_mask.any(): + repeat_flag = False + img = cropped_img + mask = cropped_mask + else: + #if no random crops then perform resizing + b_min = 0 + img = self.resize(img) + mask = self.resize(mask) + #pad if necessary + h, w = img.shape[-2:] + padh = self.img_size - h + padw = self.img_size - w + img = pad(img, (0, padw, 0, padh), value=b_min) + mask = pad(mask, (0, padw, 0, padh), value=b_min) + + + #apply centering based on SAM's expected mean and variance + if apply_norm: + b_min=0 + #scale intensities to 0-255 + b_min,b_max = 0, 255 + img = (img - self.data_transforms['a_min']) / (self.data_transforms['a_max'] - self.data_transforms['a_min']) + img = img * (b_max - b_min) + b_min + img = torch.clamp(img,b_min,b_max) + + #center around SAM's expected mean + img = (img - self.pixel_mean)/self.pixel_std + + return img, mask \ No newline at end of file diff --git a/AllinonSAM/data_transforms/rite_transform.py b/AllinonSAM/data_transforms/rite_transform.py new file mode 100644 index 0000000000000000000000000000000000000000..1a762f822c9f836dd5b66a94c8ea15fce07d9f3c --- /dev/null +++ b/AllinonSAM/data_transforms/rite_transform.py @@ -0,0 +1,103 @@ +import random +import numpy as np +import torch +from torchvision import transforms +from torchvision.transforms import functional as F +from torch.nn.functional import pad + + +class RITE_Transform(): + def __init__(self, config): + self.pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1,1,1) + self.pixel_std = torch.Tensor([53.395, 57.12, 57.375]).view(-1,1,1) + self.degree = config['data_transforms']['rotation_angle'] + self.saturation = config['data_transforms']['saturation'] + self.brightness = config['data_transforms']['brightness'] + self.img_size = config['data_transforms']['img_size'] + self.resize = transforms.Resize(self.img_size-1, max_size=self.img_size, antialias=True) + + self.data_transforms = config['data_transforms'] + + def __call__(self, img, mask, apply_norm=True, is_train=True): + if is_train: + #flip horizontally with some probability + if self.data_transforms['use_horizontal_flip']: + p = random.random() + if p<0.5: + img = F.hflip(img) + mask = F.hflip(mask) + + #rotate with p1 probability + if self.data_transforms['use_rotation']: + p = random.random() + if p<0.5: + deg = 1+random.choice(list(range(self.degree))) + img = F.rotate(img, angle = deg) + mask = F.rotate(mask, angle=deg) + + #adjust saturation with some probability + if self.data_transforms['use_saturation']: + p = random.random() + if p<0.2: + img = F.adjust_saturation(img, self.saturation) + + #adjust brightness with some probability + if self.data_transforms['use_brightness']: + p = random.random() + if p<0.5: + img = F.adjust_brightness(img, self.brightness*max(0.5,random.random())) + + #take random crops of img size X img_size such that label is non zero + if self.data_transforms['use_random_crop']: + fallback = 20 + fall_back_ctr = 0 + repeat_flag = True + while(repeat_flag): + fall_back_ctr += 1 + t = transforms.RandomCrop((self.img_size, self.img_size)) + i,j,h,w = t.get_params(img, (self.img_size, self.img_size)) + + #if mask is all zeros, exit the loop + if not mask.any(): + repeat_flag = False + + #fallback to avoid long loops + if fall_back_ctr >= fallback: + temp1, temp2, temp3 = np.where(mask!=0) + point_of_interest = random.choice(list(range(len(temp2)))) + i = temp2[point_of_interest] - (h//2) + j = temp3[point_of_interest] - (w//2) + repeat_flag = False + + cropped_img = F.crop(img, i, j, h, w) + cropped_mask = F.crop(mask, i, j, h, w) + if cropped_mask.any(): + repeat_flag = False + img = cropped_img + mask = cropped_mask + else: + #if no random crops then perform resizing + b_min = 0 + img = self.resize(img) + mask = self.resize(mask) + #pad if necessary + h, w = img.shape[-2:] + padh = self.img_size - h + padw = self.img_size - w + img = pad(img, (0, padw, 0, padh), value=b_min) + mask = pad(mask, (0, padw, 0, padh), value=b_min) + + + #apply centering based on SAM's expected mean and variance + if apply_norm: + b_min=0 + #scale intensities to 0-255 + b_min,b_max = 0, 255 + img = (img - self.data_transforms['a_min']) / (self.data_transforms['a_max'] - self.data_transforms['a_min']) + img = img * (b_max - b_min) + b_min + img = torch.clamp(img,b_min,b_max) + + #center around SAM's expected mean + img = (img - self.pixel_mean)/self.pixel_std + + return img, mask \ No newline at end of file diff --git a/AllinonSAM/data_transforms/ultrasound_transform.py b/AllinonSAM/data_transforms/ultrasound_transform.py new file mode 100644 index 0000000000000000000000000000000000000000..18b8eef203bc1a0c1943eb29783fffc25c7c7568 --- /dev/null +++ b/AllinonSAM/data_transforms/ultrasound_transform.py @@ -0,0 +1,103 @@ +import random +import numpy as np +import torch +from torchvision import transforms +from torchvision.transforms import functional as F +from torch.nn.functional import pad + + +class Ultrasound_Transform(): + def __init__(self, config): + self.pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1,1,1) + self.pixel_std = torch.Tensor([53.395, 57.12, 57.375]).view(-1,1,1) + self.degree = config['data_transforms']['rotation_angle'] + self.saturation = config['data_transforms']['saturation'] + self.brightness = config['data_transforms']['brightness'] + self.img_size = config['data_transforms']['img_size'] + self.resize = transforms.Resize(self.img_size-1, max_size=self.img_size, antialias=True) + + self.data_transforms = config['data_transforms'] + + def __call__(self, img, mask, apply_norm=True, is_train=True): + if is_train: + #flip horizontally with some probability + if self.data_transforms['use_horizontal_flip']: + p = random.random() + if p<0.5: + img = F.hflip(img) + mask = F.hflip(mask) + + #rotate with p1 probability + if self.data_transforms['use_rotation']: + p = random.random() + if p<0.5: + deg = 1+random.choice(list(range(self.degree))) + img = F.rotate(img, angle = deg) + mask = F.rotate(mask, angle=deg) + + #adjust saturation with some probability + if self.data_transforms['use_saturation']: + p = random.random() + if p<0.2: + img = F.adjust_saturation(img, self.saturation) + + #adjust brightness with some probability + if self.data_transforms['use_brightness']: + p = random.random() + if p<0.5: + img = F.adjust_brightness(img, self.brightness*max(0.5,random.random())) + + #take random crops of img size X img_size such that label is non zero + if self.data_transforms['use_random_crop']: + fallback = 20 + fall_back_ctr = 0 + repeat_flag = True + while(repeat_flag): + fall_back_ctr += 1 + t = transforms.RandomCrop((self.img_size, self.img_size)) + i,j,h,w = t.get_params(img, (self.img_size, self.img_size)) + + #if mask is all zeros, exit the loop + if not mask.any(): + repeat_flag = False + + #fallback to avoid long loops + if fall_back_ctr >= fallback: + temp1, temp2, temp3 = np.where(mask!=0) + point_of_interest = random.choice(list(range(len(temp2)))) + i = temp2[point_of_interest] - (h//2) + j = temp3[point_of_interest] - (w//2) + repeat_flag = False + + cropped_img = F.crop(img, i, j, h, w) + cropped_mask = F.crop(mask, i, j, h, w) + if cropped_mask.any(): + repeat_flag = False + img = cropped_img + mask = cropped_mask + else: + #if no random crops then perform resizing + b_min = 0 + img = self.resize(img) + mask = self.resize(mask) + #pad if necessary + h, w = img.shape[-2:] + padh = self.img_size - h + padw = self.img_size - w + img = pad(img, (0, padw, 0, padh), value=b_min) + mask = pad(mask, (0, padw, 0, padh), value=b_min) + + + #apply centering based on SAM's expected mean and variance + if apply_norm: + b_min=0 + #scale intensities to 0-255 + b_min,b_max = 0, 255 + img = (img - self.data_transforms['a_min']) / (self.data_transforms['a_max'] - self.data_transforms['a_min']) + img = img * (b_max - b_min) + b_min + img = torch.clamp(img,b_min,b_max) + + #center around SAM's expected mean + img = (img - self.pixel_mean)/self.pixel_std + + return img, mask \ No newline at end of file diff --git a/AllinonSAM/data_utils.py b/AllinonSAM/data_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..738a28cc0e52a07c067d6d7741a6b972145d604e --- /dev/null +++ b/AllinonSAM/data_utils.py @@ -0,0 +1,1421 @@ +import random +import argparse +import os +import sys +import numpy as np +import pandas as pd +import torch +from matplotlib import pyplot as plt +from PIL import Image +from torch.utils.data import Dataset, TensorDataset +from torchvision import datasets, models +from torchvision import transforms +from torchvision.transforms import functional as F +from torch.nn.functional import pad +from skimage.transform import resize +import nibabel as nib +import time +import json + +from data_transforms.endovis_transform import ENDOVIS_Transform +from data_transforms.endovis_18_transform import ENDOVIS_18_Transform +from data_transforms.cholec_8k_transform import Cholec_8k_Transform +from data_transforms.ultrasound_transform import Ultrasound_Transform +from data_transforms.kvasirSeg_transform import kvasirSeg_Transform +from data_transforms.ChestXDet_transform import ChestXDet_Transform +from data_transforms.lits2_transform import LiTS2_Transform +from data_transforms.btcv_transform import BTCV_Transform + +import os +import sys +source_path = os.path.join('/home/abdelrahman.elsayed/CVPR/AllinonSAM/datasets') +sys.path.append(source_path) +from isic2018 import ISIC2018_Dataset +from polyp import Polyp_Dataset +from rite import RITE_Dataset +from glas import GLAS_Dataset +from refuge import Refuge_Dataset +from btcv import BTCV_Dataset +from atr import ATR_Dataset +from arcade import ArcadeDataset + +def make_positive_negative_files(config, output_root, label_dict, populated_img_path_list, populated_gt_list, populated_classname_list, rgb_gt = False, name_prefix='val'): + # generates positive and negative example files for each class + #positive example file has a list of all images and labels where the class is present + #negative example file has a list of all images where the class is not present + os.makedirs(output_root, exist_ok=True) + assert(len(populated_classname_list) == len(populated_gt_list)) + assert(len(populated_classname_list) == len(populated_img_path_list)) + + main_dict = {} + #make dicts for every class + for c in np.unique(populated_classname_list): + print(c) + main_dict[c] = {} + main_dict[c]['pos_img'] = [] + main_dict[c]['pos_label'] = [] + main_dict[c]['neg_img'] = [] + + for i in range(len(populated_classname_list)): + class_name = populated_classname_list[i] + gt_path = populated_gt_list[i] + im_path = populated_img_path_list[i] + + #check if gt is all blank + if rgb_gt: + gt = np.array(Image.open(gt_path).convert("RGB")) + # if config['data']['volume_channel']==2: + # gt = gt.permute(2,0,1) + mask = np.zeros((gt.shape[0], gt.shape[1])) + else: + gt = np.array(Image.open(gt_path)) + if len(gt.shape)==3: + gt = gt[:,:,0] + if gt.max()<2: + gt = (gt*255).astype(int) + mask = np.zeros((gt.shape[0], gt.shape[1])) + + H,W = mask.shape + selected_color_list = label_dict[class_name] + temp = np.zeros((H,W)).astype('uint8') + if rgb_gt: + for c in selected_color_list: + temp = temp | (np.all(np.where(gt==c,1,0),axis=2)) + else: + temp = (gt==label_dict[class_name]) + mask[:,:] = temp + if mask.any(): + main_dict[class_name]['pos_img'].append(im_path) + main_dict[class_name]['pos_label'].append(gt_path) + else: + main_dict[class_name]['neg_img'].append(im_path) + + with open(os.path.join(output_root, name_prefix+"_pos_neg_dict.json"),'w') as fp: + json.dump(main_dict, fp) + + print("json file successfully created") + return + + + + +class Slice_Transforms: + def __init__(self, config=None): + #SAM encoder expects images to be centered around tehe following mean and variance, how to change it for medical datasets? + self.pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1,1,1).unsqueeze(0) + self.pixel_std = torch.Tensor([53.395, 57.12, 57.375]).view(-1,1,1).unsqueeze(0) + self.img_size = config['data_transforms']['img_size'] + self.resize = transforms.Resize(self.img_size-1, max_size=self.img_size, antialias=True) + # self.a_min = config['data_transforms']['a_min'] + # self.a_max = config['data_transforms']['a_max'] + + + def __call__(self, image, label, apply_mean_norm=True): + # image = torch.Tensor(image) + b_min=0 + a_min = image.min() + a_max = image.max() + # if not is_mask: + #scale intensities to 0-255 + b_min,b_max = 0, 255 + image = (image - a_min) / (a_max - a_min) + image = image * (b_max - b_min) + b_min + image = torch.clamp(image,b_min,b_max) + image = image.int() + + #center around SAM's expected mean + if apply_mean_norm: + image = (image - self.pixel_mean)/self.pixel_std + + image = self.resize(image) + label = self.resize(label) + #pad if necessary + h, w = image.shape[-2:] + padh = self.img_size - h + padw = self.img_size - w + image = pad(image, (0, padw, 0, padh), value=b_min) + label = pad(label, (0, padw, 0, padh), value=0) + return image, label + + +class Generic_Dataset_3d(Dataset): + def __init__(self, config, is_train=False, folder_start=0, folder_end=40, shuffle_list=True, apply_norm=True, use_folder_idx=True): + super().__init__() + self.root_path = config['data']['root_path'] + self.img_path_list = [] + self.label_path_list = [] + self.label_names_text = [] + self.label_names = config['data']['label_names'] + self.label_list = config['data']['label_list'] + self.label_dict = config['data']['label_dict'] + self.is_train = is_train + self.folder_start = folder_start + self.folder_end = folder_end + self.config = config + self.final_img_path_list = [] + self.final_label_path_list = [] + self.final_label_names_list = [] + self.final_position_list = [] + self.use_folder_idx = use_folder_idx + #can be one of 2d_gaussian, 2d, 3d + self.mode = "2d_gaussian" + self.apply_norm = apply_norm + + self.populate_lists() + if shuffle_list: + p = [x for x in range(len(self.img_path_list))] + random.shuffle(p) + self.img_path_list = [self.img_path_list[pi] for pi in p] + self.label_path_list = [self.label_path_list[pi] for pi in p] + self.label_names_text = [self.label_names_text[pi] for pi in p] + + + #define data transforms + self.transform = Slice_Transforms(config=config) + + def populate_lists(self): + # print(self.folder_start, self.folder_end, self.label_list) + if self.use_folder_idx: + for case_no in sorted(os.listdir(os.path.join(self.root_path,'images'))): + if '.DS_Store' in case_no: + continue + case_idx = int(case_no[:case_no.find('.')]) + if not((case_idx>=self.folder_start) and (case_idx=0.5)+0 + + + return im, gold, self.label_dict[label_text], label_text, s + +class IDRID_Transform(): + def __init__(self, config): + self.pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1,1,1) + self.pixel_std = torch.Tensor([53.395, 57.12, 57.375]).view(-1,1,1) + self.degree = config['data_transforms']['rotation_angle'] + self.saturation = config['data_transforms']['saturation'] + self.brightness = config['data_transforms']['brightness'] + self.img_size = config['data_transforms']['img_size'] + self.resize = transforms.Resize(self.img_size-1, max_size=self.img_size, antialias=True) + + self.data_transforms = config['data_transforms'] + + def __call__(self, img, mask, apply_norm, is_train): + #crop the image so that only the main arrea is in consideration + img = img[:,:,270:3700] + mask = mask[:,:,270:3700] + if is_train: + #flip horizontally with some probability + if self.data_transforms['use_horizontal_flip']: + p = random.random() + if p<0.5: + img = F.hflip(img) + mask = F.hflip(mask) + + #rotate with p1 probability + if self.data_transforms['use_rotation']: + p = random.random() + if p<0.5: + img = F.rotate(img, angle = self.degree) + mask = F.rotate(mask, angle=self.degree) + + #adjust saturation with some probability + if self.data_transforms['use_saturation']: + p = random.random() + if p<0.2: + img = F.adjust_saturation(img, self.saturation) + + #adjust brightness with some probability + if self.data_transforms['use_brightness']: + p = random.random() + if p<0.5: + img = F.adjust_brightness(img, self.brightness*random.random()) + + #take random crops of img size X img_size such that label is non zero + if self.data_transforms['use_random_crop']: + fallback = 20 + fall_back_ctr = 0 + repeat_flag = True + while(repeat_flag): + fall_back_ctr += 1 + t = transforms.RandomCrop((self.img_size, self.img_size)) + i,j,h,w = t.get_params(img, (self.img_size, self.img_size)) + + #if mask is all zeros, exit the loop + if not mask.any(): + repeat_flag = False + + #fallback to avoid long loops + if fall_back_ctr >= fallback: + temp1, temp2, temp3 = np.where(mask!=0) + point_of_interest = random.choice(list(range(len(temp2)))) + i = temp2[point_of_interest] - (h//2) + j = temp3[point_of_interest] - (w//2) + repeat_flag = False + + cropped_img = F.crop(img, i, j, h, w) + cropped_mask = F.crop(mask, i, j, h, w) + if cropped_mask.any(): + repeat_flag = False + img = cropped_img + mask = cropped_mask + else: + #if no random crops then perform resizing + img = self.resize(img) + mask = self.resize(mask) + #pad if necessary + h, w = img.shape[-2:] + padh = self.img_size - h + padw = self.img_size - w + img = pad(img, (0, padw, 0, padh), value=b_min) + mask = pad(mask, (0, padw, 0, padh), value=b_min) + + + #apply centering based on SAM's expected mean and variance + if apply_norm: + b_min=0 + #scale intensities to 0-255 + b_min,b_max = 0, 255 + img = (img - self.data_transforms['a_min']) / (self.data_transforms['a_max'] - self.data_transforms['a_min']) + img = img * (b_max - b_min) + b_min + img = torch.clamp(img,b_min,b_max) + + #center around SAM's expected mean + img = (img - self.pixel_mean)/self.pixel_std + + return img, mask + + +class IDRID_Dataset(Dataset): + def __init__(self, config, is_train=False, folder_start=0, folder_end=40, shuffle_list=True, apply_norm=True): + super().__init__() + self.root_path = config['data']['root_path'] + self.img_path_list = [] + self.label_path_list = [] + self.label_names_text = [] + self.label_names = config['data']['label_names'] + self.label_list = config['data']['label_list'] + self.is_train = is_train + self.folder_start = folder_start + self.folder_end = folder_end + self.config = config + self.apply_norm = apply_norm + self.acronym = { + 'Microaneurysms': 'MA', + 'Haemorrhages': 'HE', + 'Hard Exudates': 'EX', + 'Optic Disc': 'OD', + 'Soft Exudates': 'SE' + } + + self.populate_lists() + if shuffle_list: + p = [x for x in range(len(self.img_path_list))] + random.shuffle(p) + self.img_path_list = [self.img_path_list[pi] for pi in p] + self.label_path_list = [self.label_path_list[pi] for pi in p] + self.label_names_text = [self.label_names_text[pi] for pi in p] + + + #define data transforms + self.idrid_transform = IDRID_Transform(config = config) + + def populate_lists(self): + # print(self.folder_start, self.folder_end, self.label_list) + + for case_no in sorted(os.listdir(os.path.join(self.root_path,'images'))): + case_idx = int(case_no[case_no.find('_')+1:case_no.find('.')]) + if not((case_idx>=self.folder_start) and (case_idx=0.5)+0 + + # print('debug5: ', label.shape, label.any()) + + return img, label, label_segmask_no, label_text + +class Ultrasound_Dataset(Dataset): + def __init__(self, config, is_train=False, apply_norm=True, shuffle_list=True, no_text_mode=False): + super().__init__() + self.root_path = config['data']['root_path'] + self.img_names = [] + self.img_path_list = [] + self.label_path_list = [] + self.label_list = [] + self.is_train = is_train + self.label_names = config['data']['label_names'] + self.config = config + self.apply_norm = apply_norm + self.no_text_mode = no_text_mode + self.data_transform = Ultrasound_Transform(config=config) + self.label_dict = { + 'Liver': [[100,0,100]], + 'Kidney': [[255,255,0]], + 'Pancreas': [[0,0,255]], + 'Vessels': [[255,0,0]], + 'Adrenals': [[0,255,255]], + 'Gall Bladder': [[0,255,0]], + 'Bones': [[255,255,255]], + 'Spleen': [[255,0,255]] + } + self.num_classes = len(list(self.label_dict.keys())) + if self.is_train: + self.ctlist = ['ct1','ct2','ct3','ct4','ct5','ct6','ct7','ct8','ct9','ct10','ct11','ct12'] + else: + self.ctlist = ['ct13','ct14','ct15'] + + self.populate_lists() + if shuffle_list: + p = [x for x in range(len(self.img_path_list))] + random.shuffle(p) + self.img_path_list = [self.img_path_list[pi] for pi in p] + self.img_names = [self.img_names[pi] for pi in p] + self.label_path_list = [self.label_path_list[pi] for pi in p] + self.label_list = [self.label_list[pi] for pi in p] + + def populate_lists(self): + imgs_path = os.path.join(self.root_path, 'images/train') + labels_path = os.path.join(self.root_path, 'annotations/train') + for img in os.listdir(imgs_path): + ct = img[:img.find('-')] + if ct not in self.ctlist: + continue + if self.no_text_mode: + self.img_names.append(img) + self.img_path_list.append(os.path.join(imgs_path,img)) + self.label_path_list.append(os.path.join(labels_path, img)) + self.label_list.append('') + else: + for label_name in self.label_names: + self.img_names.append(img) + self.img_path_list.append(os.path.join(imgs_path,img)) + self.label_path_list.append(os.path.join(labels_path, img)) + self.label_list.append(label_name) + + def __len__(self): + return len(self.img_path_list) + + def __getitem__(self, index): + img = torch.as_tensor(np.array(Image.open(self.img_path_list[index]).convert("RGB"))) + try: + label = (np.array(Image.open(self.label_path_list[index]).convert("RGB"))) + except: + label = np.zeros(img.shape[0], img.shape[1], 1) + + if self.config['data']['volume_channel']==2: + img = img.permute(2,0,1) + + if self.no_text_mode: + mask = np.zeros((self.num_classes,img.shape[1], img.shape[2])) + for i,c in enumerate(list(self.label_dict.keys())): + temp = np.zeros(label.shape).astype('uint8')[:,:,0] + selected_color_list = self.label_dict[c] + for c in selected_color_list: + temp = temp | (np.all(np.where(label==c,1,0),axis=2)) + mask[i,:,:] = temp + mask = torch.Tensor(mask) + img, mask = self.data_transform(img, mask, is_train=self.is_train, apply_norm=self.apply_norm) + mask = (mask>=0.5)+0 + label_of_interest = '' + else: + temp = np.zeros(label.shape).astype('uint8')[:,:,0] + selected_color_list = self.label_dict[self.label_list[index]] + for c in selected_color_list: + temp = temp | (np.all(np.where(label==c,1,0),axis=2)) + + mask = torch.Tensor(temp).unsqueeze(0) + label_of_interest = self.label_list[index] + img, mask = self.data_transform(img, mask, is_train=self.is_train, apply_norm=self.apply_norm) + #convert all grayscale pixels due to resizing back to 0, 1 + mask = (mask>=0.5)+0 + mask = mask[0] + + return img, mask, self.img_path_list[index], label_of_interest + + + +class Cholec_Ins_Dataset(Dataset): + def __init__(self, config, is_train=False, apply_norm=True, shuffle_list=True, no_text_mode=False) -> None: + super().__init__() + self.root_path = config['data']['root_path'] + self.img_names = [] + self.img_path_list = [] + self.label_path_list = [] + self.label_list = [] + self.is_train = is_train + self.label_names = config['data']['label_names'] + self.config = config + self.no_text_mode = no_text_mode + self.shuffle_list = shuffle_list + self.apply_norm = apply_norm + self.data_transform = Cholec_8k_Transform(config=config) + self.label_dict = { + 'Grasper':31, + 'L Hook Electrocautery':32, + 'Liver':21, + 'Fat':12, + 'Gall Bladder':22, + 'Abdominal Wall':11, + 'Gastrointestinal Tract':13, + 'Cystic Duct':25, + 'Blood':24, + 'Hepatic Vein':33, + 'Liver Ligament':5, + 'Connective Tissue':23 + } + self.num_classes = len(list(self.label_dict.keys())) + + if is_train: + self.folder_list = ['video01','video09','video18','video20','video24','video25', 'video26','video35', 'video43', 'video55', 'video28', 'video37'] + else: + # self.folder_list = ['video17','video52'] + self.folder_list = ['video12','video27'] + #populate the above lists + self.populate_lists() + + #get positive negative lists dictionary + try: + if is_train: + fp = open(os.path.join(self.root_path,'train_pos_neg_dict.json')) + else: + fp = open(os.path.join(self.root_path,'val_pos_neg_dict.json')) + + self.pos_neg_dict = json.load(fp) + except: + print("Passing because pos neg json not found") + pass + + if shuffle_list: + p = [x for x in range(len(self.img_path_list))] + random.shuffle(p) + self.img_path_list = [self.img_path_list[pi] for pi in p] + # self.img_names = [self.img_names[pi] for pi in p] + self.label_path_list = [self.label_path_list[pi] for pi in p] + self.label_list = [self.label_list[pi] for pi in p] + + self.final_img_path_list = self.img_path_list + self.final_label_list = self.label_list + self.final_label_path_list = self.label_path_list + + def populate_lists(self): + for folder in (self.folder_list): + path1 = os.path.join(self.root_path, folder) + for sub in sorted(os.listdir(path1)): + path2 = os.path.join(path1, sub) + for im in sorted(os.listdir(path2)): + if 'endo.png' not in im: + continue + im_path = os.path.join(path2, im) + im_name = im[:-4] + label_img_path = os.path.join(path2, im_name+'_watershed_mask.png') + if self.no_text_mode: + self.img_names.append(im_name) + self.img_path_list.append(os.path.join(im_path)) + self.label_path_list.append(os.path.join(label_img_path)) + self.label_list.append('') + else: + for label_name in self.label_names: + self.img_names.append(im_name) + self.img_path_list.append(im_path) + self.label_path_list.append(label_img_path) + self.label_list.append(label_name) + + def one_time_generate_pos_neg_list_dicts(self, prefix): + make_positive_negative_files(self.config, self.root_path, self.label_dict, self.img_path_list, self.label_path_list, self.label_list, name_prefix=prefix) + + def generate_examples(self, neg2pos_ratio=2): + self.final_img_path_list = [] + self.final_img_names = [] + self.final_label_path_list = [] + self.final_label_list = [] + + for c in self.pos_neg_dict: + for i,pos_im in enumerate(self.pos_neg_dict[c]['pos_img']): + self.final_img_path_list.append(pos_im) + self.final_label_path_list.append(self.pos_neg_dict[c]['pos_label'][i]) + self.final_label_list.append(c) + # print(c, len(self.pos_neg_dict[c]['pos_img']), len(self.pos_neg_dict[c]['neg_img'])) + try: + selected_neg_samples = random.sample(self.pos_neg_dict[c]['neg_img'], neg2pos_ratio*len(self.pos_neg_dict[c]['pos_img'])) + except: + selected_neg_samples = self.pos_neg_dict[c]['neg_img'] + self.final_img_path_list = self.final_img_path_list + selected_neg_samples + self.final_label_path_list = self.final_label_path_list + [None]*len(selected_neg_samples) + self.final_label_list = self.final_label_list + [c]*len(selected_neg_samples) + + #shuffle if required + if self.shuffle_list: + p = [x for x in range(len(self.final_img_path_list))] + random.shuffle(p) + self.final_img_path_list = [self.final_img_path_list[pi] for pi in p] + self.final_label_path_list = [self.final_label_path_list[pi] for pi in p] + self.final_label_list = [self.final_label_list[pi] for pi in p] + return + + + def __len__(self): + return len(self.final_img_path_list) + + + def __getitem__(self, index): + img = torch.as_tensor(np.array(Image.open(self.final_img_path_list[index]).convert("RGB"))) + + label_of_interest = self.final_label_list[index] + if self.final_label_path_list[index] is None: + gold = np.zeros_like(img) + else: + gold = np.array(Image.open(self.final_label_path_list[index])) + + if self.config['data']['volume_channel']==2: + img = img.permute(2,0,1) + + if len(gold.shape)==3: + gold = gold[:,:,0] + if gold.max()<2: + gold = (gold*255).astype(int) + + + if self.no_text_mode: + mask = np.zeros((self.num_classes,img.shape[1], img.shape[2])) + for i,c in enumerate(list(self.label_dict.keys())): + mask[i,:,:] = (gold==self.label_dict[c]) + mask = torch.Tensor(mask) + img, mask = self.data_transform(img, mask, is_train=self.is_train, apply_norm=self.apply_norm) + mask = (mask>=0.5)+0 + label_of_interest = '' + else: + # plt.imshow(gold) + # plt.show() + mask = (gold==self.label_dict[label_of_interest]) + + mask = torch.Tensor(mask+0) + mask = torch.Tensor(mask).unsqueeze(0) + + + img, mask = self.data_transform(img, mask, is_train=self.is_train, apply_norm=self.apply_norm) + + # plt.imshow(mask, cmap='gray') + # plt.show() + #convert all grayscale pixels due to resizing back to 0, 1 + mask = (mask>=0.5)+0 + mask = mask[0] + # plt.imshow(mask, cmap='gray') + # plt.show() + return img, mask, self.final_img_path_list[index], label_of_interest + +class ChestXDet_Dataset(Dataset): + def __init__(self, config, start = 0, end = 69565, is_train=False, apply_norm=True, shuffle_list=True, no_text_mode=False) -> None: + super().__init__() + self.root_path = config['data']['root_path'] + self.img_names = [] + self.img_path_list = [] + self.label_path_list = [] + self.label_list = [] + self.is_train = is_train + self.label_names = config['data']['label_names'] + self.config = config + self.no_text_mode = no_text_mode + self.apply_norm = apply_norm + self.start = start + self.end = end + self.data_transform = ChestXDet_Transform(config=config) + self.label_dict = { + 'Effusion': 1, + 'Nodule': 2, + 'Cardiomegaly': 3, + 'Fibrosis': 4, + 'Consolidation': 5, + 'Emphysema': 6, + 'Mass': 7, + 'Fracture': 8, + 'Calcification': 9, + 'Pleural Thickening': 10, + 'Pneumothorax': 11, + 'Atelectasis': 12, + 'Diffuse Nodule': 13 + } + self.num_classes = len(list(self.label_dict.keys())) + + #populate the above lists + self.populate_lists() + if shuffle_list: + p = [x for x in range(len(self.img_path_list))] + random.shuffle(p) + self.img_path_list = [self.img_path_list[pi] for pi in p] + self.img_names = [self.img_names[pi] for pi in p] + self.label_path_list = [self.label_path_list[pi] for pi in p] + self.label_list = [self.label_list[pi] for pi in p] + + def populate_lists(self): + im_folder_path = os.path.join(self.root_path, 'images') + mask_folder_path = os.path.join(self.root_path, 'masks') + for im in os.listdir(im_folder_path): + if (int(im[:im.find('.')]) >= self.start) and (int(im[:im.find('.')])<=self.end): + im_path = os.path.join(im_folder_path, im) + label_img_path = os.path.join(mask_folder_path, im) + if self.no_text_mode: + self.img_names.append(im) + self.img_path_list.append(im_path) + self.label_path_list.append(label_img_path) + self.label_list.append('') + else: + for label_name in self.label_names: + self.img_names.append(im) + self.img_path_list.append(im_path) + self.label_path_list.append(label_img_path) + self.label_list.append(label_name) + + def __len__(self): + return len(self.img_path_list) + + + def __getitem__(self, index): + img = torch.as_tensor(np.array(Image.open(self.img_path_list[index]).convert("RGB"))) + if self.config['data']['volume_channel']==2: + img = img.permute(2,0,1) + + label_of_interest = self.label_list[index] + gold = np.array(Image.open(self.label_path_list[index])) + + if len(gold.shape)==3: + gold = gold[:,:,0] + + if self.no_text_mode: + mask = np.zeros((self.num_classes,img.shape[1], img.shape[2])) + for i,c in enumerate(list(self.label_dict.keys())): + mask[i,:,:] = (gold==self.label_dict[c]) + mask = torch.Tensor(mask) + img, mask = self.data_transform(img, mask, is_train=self.is_train, apply_norm=self.apply_norm) + mask = (mask>=0.5)+0 + label_of_interest = '' + else: + # plt.imshow(gold) + # plt.show() + mask = (gold==self.label_dict[label_of_interest]) + + mask = torch.Tensor(mask+0) + mask = torch.Tensor(mask).unsqueeze(0) + + + img, mask = self.data_transform(img, mask, is_train=self.is_train, apply_norm=self.apply_norm) + + # plt.imshow(mask, cmap='gray') + # plt.show() + #convert all grayscale pixels due to resizing back to 0, 1 + mask = (mask>=0.5)+0 + mask = mask[0] + # plt.imshow(mask, cmap='gray') + # plt.show() + return img, mask, self.img_path_list[index], label_of_interest + + +class Endovis_18(Dataset): + def __init__(self, config, start=0, end=200, is_train=False, shuffle_list = True, apply_norm=True, no_text_mode=False): + super().__init__() + self.root_path = config['data']['root_path'] + self.img_names = [] + self.img_path_list = [] + self.label_path_list = [] + self.label_list = [] + self.is_train = is_train + self.start = start + self.end = end + self.shuffle_list = shuffle_list + self.label_names = config['data']['label_names'] + self.config = config + self.no_text_mode = no_text_mode + self.apply_norm = apply_norm + if self.is_train: + self.seqs = ['seq_1', 'seq_2', 'seq_3', 'seq_5', 'seq_6', 'seq_9', 'seq_10', 'seq_11', 'seq_13', 'seq_14', 'seq_15'] + else: + self.seqs = ['seq_4', 'seq_7', 'seq_12', 'seq_16'] + + self.label_dict = { + 'background tissue': [[0,0,0]], + 'surgical instrument': [[0,255,0],[0,255,255],[125,255,12]], + 'kidney parenchyma': [[255,55,0]], + 'covered kidney': [[24,55,125]], + 'thread': [[187,155,25]], + 'clamps': [[0,255,125]], + 'suturing needle': [[255,255,125]], + 'suction instrument': [[123,15,175]], + 'small intestine': [[124,155,5]], + 'ultrasound probe': [[12,255,141]] + } + self.num_classes = len(list(self.label_dict.keys())) + + + self.populate_lists() + + #get positive negative lists dictionary + if config['data']['negative_to_positive_ratio']>0: + try: + if is_train: + fp = open(os.path.join(self.root_path,'train_pos_neg_dict.json')) + else: + fp = open(os.path.join(self.root_path,'val_pos_neg_dict.json')) + + self.pos_neg_dict = json.load(fp) + except: + print("Passing because pos neg json not found") + pass + + if shuffle_list: + p = [x for x in range(len(self.img_path_list))] + random.shuffle(p) + self.img_path_list = [self.img_path_list[pi] for pi in p] + # self.img_names = [self.img_names[pi] for pi in p] + self.label_path_list = [self.label_path_list[pi] for pi in p] + self.label_list = [self.label_list[pi] for pi in p] + + self.final_img_path_list = self.img_path_list + self.final_label_list = self.label_list + self.final_label_path_list = self.label_path_list + + #define data transform + self.data_transform = ENDOVIS_18_Transform(config=config) + + def populate_lists(self): + #generate dataset for instrument 1 4 training + for dataset_num in os.listdir(self.root_path): + if 'json' in dataset_num: + continue + for seq in os.listdir(os.path.join(self.root_path, dataset_num)): + if seq not in self.seqs: + continue + lbl_folder_path = os.path.join(self.root_path, dataset_num, seq, 'labels') + frames_folder_path = os.path.join(self.root_path, dataset_num, seq, 'left_frames') + for frame_no in os.listdir(frames_folder_path): + if 'png' not in frame_no: + continue + if self.no_text_mode: + self.img_names.append(frame_no) + self.img_path_list.append(os.path.join(frames_folder_path,frame_no)) + self.label_path_list.append(os.path.join(lbl_folder_path, frame_no)) + self.label_list.append('') + else: + for label_name in self.label_names: + lbl_path = os.path.join(lbl_folder_path,frame_no) + self.img_names.append(frame_no) + self.img_path_list.append(os.path.join(frames_folder_path, frame_no)) + self.label_list.append(label_name) + self.label_path_list.append(lbl_path) + + def one_time_generate_pos_neg_list_dicts(self, prefix): + make_positive_negative_files(self.config, self.root_path, self.label_dict, self.img_path_list, self.label_path_list, self.label_list, name_prefix=prefix, rgb_gt=True) + + def generate_examples(self, neg2pos_ratio=2): + self.final_img_path_list = [] + self.final_img_names = [] + self.final_label_path_list = [] + self.final_label_list = [] + + for c in self.pos_neg_dict: + for i,pos_im in enumerate(self.pos_neg_dict[c]['pos_img']): + self.final_img_path_list.append(pos_im) + self.final_label_path_list.append(self.pos_neg_dict[c]['pos_label'][i]) + self.final_label_list.append(c) + # print(c, len(self.pos_neg_dict[c]['pos_img']), len(self.pos_neg_dict[c]['neg_img'])) + try: + selected_neg_samples = random.sample(self.pos_neg_dict[c]['neg_img'], neg2pos_ratio*len(self.pos_neg_dict[c]['pos_img'])) + except: + selected_neg_samples = self.pos_neg_dict[c]['neg_img'] + self.final_img_path_list = self.final_img_path_list + selected_neg_samples + self.final_label_path_list = self.final_label_path_list + [None]*len(selected_neg_samples) + self.final_label_list = self.final_label_list + [c]*len(selected_neg_samples) + + #shuffle if required + if self.shuffle_list: + p = [x for x in range(len(self.final_img_path_list))] + random.shuffle(p) + self.final_img_path_list = [self.final_img_path_list[pi] for pi in p] + self.final_label_path_list = [self.final_label_path_list[pi] for pi in p] + self.final_label_list = [self.final_label_list[pi] for pi in p] + return + + def __len__(self): + return len(self.final_img_path_list) + + def __getitem__(self, index): + img = torch.as_tensor(np.array(Image.open(self.img_path_list[index]).convert("RGB"))) + try: + label = (np.array(Image.open(self.label_path_list[index]).convert("RGB"))) + except: + label = np.zeros(img.shape[0], img.shape[1], 1) + + if self.config['data']['volume_channel']==2: + img = img.permute(2,0,1) + + if self.no_text_mode: + mask = np.zeros((self.num_classes,img.shape[1], img.shape[2])) + for i,c in enumerate(list(self.label_dict.keys())): + temp = np.zeros(label.shape).astype('uint8')[:,:,0] + selected_color_list = self.label_dict[c] + for c in selected_color_list: + temp = temp | (np.all(np.where(label==c,1,0),axis=2)) + mask[i,:,:] = temp + mask = torch.Tensor(mask) + img, mask = self.data_transform(img, mask, is_train=self.is_train, apply_norm=self.apply_norm) + mask = (mask>=0.5)+0 + label_of_interest = '' + else: + temp = np.zeros(label.shape).astype('uint8')[:,:,0] + selected_color_list = self.label_dict[self.label_list[index]] + for c in selected_color_list: + temp = temp | (np.all(np.where(label==c,1,0),axis=2)) + + mask = torch.Tensor(temp).unsqueeze(0) + label_of_interest = self.label_list[index] + img, mask = self.data_transform(img, mask, is_train=self.is_train, apply_norm=self.apply_norm) + #convert all grayscale pixels due to resizing back to 0, 1 + mask = (mask>=0.5)+0 + mask = mask[0] + + return img, mask, self.img_path_list[index], label_of_interest + + +class Endovis_Dataset(Dataset): + def __init__(self, config, start=0, end=200, is_train=False, shuffle_list = True, apply_norm=True, no_text_mode=False): + super().__init__() + self.root_path = config['data']['root_path'] + self.img_names = [] + self.img_path_list = [] + self.label_path_list = [] + self.label_list = [] + self.is_train = is_train + self.start = start + self.end = end + self.label_names = config['data']['label_names'] + self.num_classes = len(self.label_names) + self.config = config + self.apply_norm = apply_norm + self.no_text_mode = no_text_mode + + self.populate_lists() + if shuffle_list: + p = [x for x in range(len(self.img_path_list))] + random.shuffle(p) + self.img_path_list = [self.img_path_list[pi] for pi in p] + self.img_names = [self.img_names[pi] for pi in p] + self.label_path_list = [self.label_path_list[pi] for pi in p] + self.label_list = [self.label_list[pi] for pi in p] + + #define data transform + self.data_transform = ENDOVIS_Transform(config=config) + + def populate_lists(self): + #generate dataset for instrument 1 4 training + for dataset_num in os.listdir(self.root_path): + if 'dataset' not in dataset_num: + continue + lbl_folder_path = os.path.join(self.root_path, dataset_num, 'ground_truth') + frames_folder_path = os.path.join(self.root_path, dataset_num, 'left_frames') + for frame_no in os.listdir(frames_folder_path): + if int(frame_no[5:8])>=self.start and int(frame_no[5:8])0)+0 + + img, label = self.data_transform(img, label, is_train=self.is_train, apply_norm=self.apply_norm) + label = (label>=0.5)+0 + label_of_interest = '' + # print("img shape: ",img.shape) + # print("label shape: ", label.shape) + + else: + try: + label = torch.Tensor(np.array(Image.open(self.label_path_list[index]))) + except: + label = torch.zeros(img.shape[1], img.shape[2]) + + + label = label.unsqueeze(0) + label = (label>0)+0 + label_of_interest = self.label_list[index] + img, label = self.data_transform(img, label, is_train=self.is_train, apply_norm=self.apply_norm) + + #convert all grayscale pixels due to resizing back to 0, 1 + label = (label>=0.5)+0 + label = label[0] + + + return img, label, self.img_path_list[index], label_of_interest + + def __len__(self): + return len(self.img_path_list) + +class LiTS2_Dataset(Dataset): + def __init__(self, config, is_train=False, shuffle_list = True, apply_norm=True, no_text_mode=False) -> None: + super().__init__() + self.root_path = config['data']['root_path'] + self.df = pd.read_csv(os.path.join(self.root_path, 'lits_train.csv')) + self.df = self.df.sample(frac=1) + self.train_df = self.df[:int(0.8*len(self.df))] + self.val_df = self.df[int(0.8*len(self.df)):] + self.img_names = [] + self.img_path_list = [] + self.label_path_list = [] + self.label_list = [] + self.is_train = is_train + self.label_names = config['data']['label_names'] + self.num_classes = len(self.label_names) + self.config = config + self.apply_norm = apply_norm + self.no_text_mode = no_text_mode + + self.populate_lists() + if shuffle_list: + p = [x for x in range(len(self.img_path_list))] + random.shuffle(p) + self.img_path_list = [self.img_path_list[pi] for pi in p] + self.img_names = [self.img_names[pi] for pi in p] + self.label_path_list = [self.label_path_list[pi] for pi in p] + self.label_list = [self.label_list[pi] for pi in p] + + #define data transform + self.data_transform = LiTS2_Transform(config=config) + + def __len__(self): + return len(self.img_path_list) + + def set_is_train(self,istrain): + self.is_train = istrain + + def populate_lists(self): + self.img_names = [] + self.img_path_list = [] + self.label_path_list = [] + self.label_list = [] + if self.is_train: + df = self.train_df + else: + df = self.val_df + + for i in range(len(df)): + img_path = os.path.join(self.root_path,'dataset_6',df['filepath'].iloc[i][18:]) + liver_mask_path = os.path.join(self.root_path,'dataset_6',df['liver_maskpath'].iloc[i][18:]) + tumor_mask_path = os.path.join(self.root_path,'dataset_6',df['tumor_maskpath'].iloc[i][18:]) + self.img_path_list.append(img_path) + self.img_path_list.append(img_path) + self.img_names.append(df['filepath'].iloc[i][28:]) + self.img_names.append(df['filepath'].iloc[i][28:]) + self.label_path_list.append(liver_mask_path) + self.label_path_list.append(tumor_mask_path) + self.label_list.append("Liver") + self.label_list.append('Tumor') + + def __getitem__(self, index): + img = torch.as_tensor(np.array(Image.open(self.img_path_list[index]).convert("RGB"))) + if self.config['data']['volume_channel']==2: + img = img.permute(2,0,1) + + try: + label = torch.Tensor(np.array(Image.open(self.label_path_list[index])))[:,:,0] + except: + label = torch.zeros(img.shape[1], img.shape[2]) + + + label = label.unsqueeze(0) + label = (label>0)+0 + label_of_interest = self.label_list[index] + + #convert all grayscale pixels due to resizing back to 0, 1 + img, label = self.data_transform(img, label, is_train=self.is_train, apply_norm=self.apply_norm) + label = (label>=0.5)+0 + label = label[0] + + + return img, label, self.img_path_list[index], label_of_interest + + +class KvasirSeg_Dataset(Dataset): + def __init__(self, config, is_train=False, shuffle_list = True, apply_norm=True, no_text_mode=False): + super().__init__() + self.root_path = config['data']['root_path'] + self.img_names = [] + self.img_path_list = [] + self.label_path_list = [] + self.label_list = [] + self.is_train = is_train + self.label_names = config['data']['label_names'] + self.num_classes = len(self.label_names) + self.config = config + self.apply_norm = apply_norm + self.no_text_mode = no_text_mode + + self.populate_lists() + if shuffle_list: + p = [x for x in range(len(self.img_path_list))] + random.shuffle(p) + self.img_path_list = [self.img_path_list[pi] for pi in p] + self.img_names = [self.img_names[pi] for pi in p] + self.label_path_list = [self.label_path_list[pi] for pi in p] + self.label_list = [self.label_list[pi] for pi in p] + + #define data transform + self.data_transform = kvasirSeg_Transform(config=config) + + def __len__(self): + return len(self.img_path_list) + + def populate_lists(self): + + if self.is_train: + imgs_path = os.path.join(self.root_path, "train/images") + masks_path = os.path.join(self.root_path, "train/masks") + else: + imgs_path = os.path.join(self.root_path, "val/images") + masks_path = os.path.join(self.root_path, "val/masks") + + for i in os.listdir(imgs_path): + if self.no_text_mode: + self.img_names.append(i) + self.img_path_list.append(os.path.join(imgs_path,i)) + self.label_path_list.append(os.path.join(masks_path, i)) + self.label_list.append('') + else: + for label_name in self.label_names: + self.img_names.append(i) + self.img_path_list.append(os.path.join(imgs_path,i)) + self.label_path_list.append(os.path.join(masks_path, i)) + self.label_list.append(label_name) + + def __getitem__(self, index): + img = torch.as_tensor(np.array(Image.open(self.img_path_list[index]).convert("RGB"))) + if self.config['data']['volume_channel']==2: + img = img.permute(2,0,1) + + try: + label = torch.Tensor(np.array(Image.open(self.label_path_list[index])))[:,:,0] + except: + label = torch.zeros(img.shape[1], img.shape[2]) + + + label = label.unsqueeze(0) + label = (label>0)+0 + label_of_interest = self.label_list[index] + img, label = self.data_transform(img, label, is_train=self.is_train, apply_norm=self.apply_norm) + + #convert all grayscale pixels due to resizing back to 0, 1 + img, label = self.data_transform(img, label, is_train=self.is_train, apply_norm=self.apply_norm) + label = (label>=0.5)+0 + label = label[0] + + + return img, label, self.img_path_list[index], label_of_interest + +def get_data(config, tr_folder_start, tr_folder_end, val_folder_start, val_folder_end, use_norm=True, no_text_mode=False): + dataset_dict = {} + dataloader_dict = {} + dataset_sizes = {} + #generate label_dict + print("hEREE") + label_dict = {} + for i,ln in enumerate(config['data']['label_names']): + label_dict[ln] = i + + if config['data']['name']=='IDRID': + for x in ['train','val']: + if x=='train': + dataset_dict[x] = IDRID_Dataset(config, folder_start=0, folder_end=40, shuffle_list=True, is_train=True, apply_norm=use_norm) + if x=='val': + dataset_dict[x] = IDRID_Dataset(config, folder_start=40, folder_end=60, shuffle_list=False, apply_norm=use_norm) + dataset_sizes[x] = len(dataset_dict[x]) + elif config['data']['name'] == 'AMOS22': + for x in ['train','val']: + if x=='train': + dataset_dict[x] = Generic_Dataset_3d(config, folder_start=0, folder_end=40, shuffle_list=True, is_train=True, apply_norm=use_norm, use_folder_idx=False) + if x=='val': + dataset_dict[x] = Generic_Dataset_3d(config, folder_start=40, folder_end=60, shuffle_list=False, apply_norm=use_norm, use_folder_idx=False) + dataset_sizes[x] = len(dataset_dict[x]) + + elif config['data']['name']=='ENDOVIS': + for x in ['train','val']: + if x=='train': + dataset_dict[x] = Endovis_Dataset(config, start=0, end=180, shuffle_list=True, is_train=True, apply_norm=use_norm, no_text_mode=no_text_mode) + if x=='val': + dataset_dict[x] = Endovis_Dataset(config, start=180, end=330, shuffle_list=False, apply_norm=use_norm, no_text_mode=no_text_mode) + dataset_sizes[x] = len(dataset_dict[x]) + + elif config['data']['name']=='ENDOVIS 18': + for x in ['train','val']: + if x=='train': + dataset_dict[x] = Endovis_18(config, start=0, end=18000, shuffle_list=True, is_train=True, apply_norm=use_norm, no_text_mode=no_text_mode) + if x=='val': + dataset_dict[x] = Endovis_18(config, start=0, end=33000, shuffle_list=False, apply_norm=use_norm, is_train=False, no_text_mode=no_text_mode) + dataset_sizes[x] = len(dataset_dict[x]) + + elif config['data']['name']=='CHESTXDET': + for x in ['train','val']: + if x=='train': + dataset_dict[x] = ChestXDet_Dataset(config, start=0, end=69565, shuffle_list=True, is_train=True, apply_norm=use_norm, no_text_mode=no_text_mode) + if x=='val': + dataset_dict[x] = ChestXDet_Dataset(config, start=69566, end=83000, shuffle_list=False, apply_norm=use_norm, is_train=False, no_text_mode=no_text_mode) + dataset_sizes[x] = len(dataset_dict[x]) + + elif config['data']['name']=='CHOLEC 8K': + for x in ['train','val']: + if x=='train': + dataset_dict[x] = Cholec_Ins_Dataset(config, shuffle_list=True, is_train=True, apply_norm=use_norm, no_text_mode=no_text_mode) + if x=='val': + dataset_dict[x] = Cholec_Ins_Dataset(config, shuffle_list=False, apply_norm=use_norm, is_train=False, no_text_mode=no_text_mode) + dataset_sizes[x] = len(dataset_dict[x]) + + elif config['data']['name']=='ULTRASOUND': + for x in ['train','val']: + if x=='train': + dataset_dict[x] = Ultrasound_Dataset(config, shuffle_list=True, is_train=True, apply_norm=use_norm, no_text_mode=no_text_mode) + if x=='val': + dataset_dict[x] = Ultrasound_Dataset(config, shuffle_list=False, apply_norm=use_norm, is_train=False, no_text_mode=no_text_mode) + dataset_sizes[x] = len(dataset_dict[x]) + + elif config['data']['name']=='KVASIRSEG': + for x in ['train','val']: + if x=='train': + dataset_dict[x] = KvasirSeg_Dataset(config, shuffle_list=True, is_train=True, apply_norm=use_norm, no_text_mode=no_text_mode) + if x=='val': + dataset_dict[x] = KvasirSeg_Dataset(config, shuffle_list=False, apply_norm=use_norm, is_train=False, no_text_mode=no_text_mode) + dataset_sizes[x] = len(dataset_dict[x]) + + elif config['data']['name']=='LITS2': + dataset_lits = LiTS2_Dataset(config, shuffle_list=True, is_train=True, apply_norm=use_norm, no_text_mode=no_text_mode) + for x in ['train','val']: + if x=='train': + dataset_lits.set_is_train = True + if x=='val': + dataset_lits.set_is_train = False + dataset_lits.populate_lists() + dataset_dict[x] = dataset_lits + dataset_sizes[x] = len(dataset_dict[x]) + + elif config['data']['name']=="ISIC2018": + for x in ['train','val']: + if x=='train': + dataset_dict[x] = ISIC2018_Dataset(config, shuffle_list=True, is_train=True, apply_norm=use_norm, no_text_mode=no_text_mode) + if x=='val': + dataset_dict[x] = ISIC2018_Dataset(config, shuffle_list=False, apply_norm=use_norm, is_train=False, no_text_mode=no_text_mode) + dataset_sizes[x] = len(dataset_dict[x]) + + elif config['data']['name']=="Polyp": + for x in ['train','val']: + if x=='train': + dataset_dict[x] = Polyp_Dataset(config, shuffle_list=True, is_train=True, apply_norm=use_norm, no_text_mode=no_text_mode) + if x=='val': + dataset_dict[x] = Polyp_Dataset(config, shuffle_list=False, apply_norm=use_norm, is_train=False, no_text_mode=no_text_mode) + dataset_sizes[x] = len(dataset_dict[x]) + + elif config['data']['name']=='RITE': + for x in ['train','val']: + if x=='train': + dataset_dict[x] = RITE_Dataset(config, shuffle_list=True, is_train=True, apply_norm=use_norm, no_text_mode=no_text_mode) + if x=='val': + dataset_dict[x] = RITE_Dataset(config, shuffle_list=False, apply_norm=use_norm, is_train=False, no_text_mode=no_text_mode) + dataset_sizes[x] = len(dataset_dict[x]) + + elif config['data']['name']=='GLAS': + for x in ['train','val']: + if x=='train': + dataset_dict[x] = GLAS_Dataset(config, shuffle_list=True, is_train=True, apply_norm=use_norm, no_text_mode=no_text_mode) + if x=='val': + dataset_dict[x] = GLAS_Dataset(config, shuffle_list=False, apply_norm=use_norm, is_train=False, no_text_mode=no_text_mode) + dataset_sizes[x] = len(dataset_dict[x]) + + elif config['data']['name']=='Refuge': + for x in ['train','val']: + if x=='train': + dataset_dict[x] = Refuge_Dataset(config, shuffle_list=True, is_train=True, apply_norm=use_norm, no_text_mode=no_text_mode) + if x=='val': + dataset_dict[x] = Refuge_Dataset(config, shuffle_list=False, apply_norm=use_norm, is_train=False, no_text_mode=no_text_mode) + dataset_sizes[x] = len(dataset_dict[x]) + + elif config['data']['name']=='BTCV': + for x in ['train','val']: + if x=='train': + dataset_dict[x] = BTCV_Dataset(config, shuffle_list=True, is_train=True, apply_norm=use_norm, no_text_mode=no_text_mode) + if x=='val': + dataset_dict[x] = BTCV_Dataset(config, shuffle_list=False, apply_norm=use_norm, is_train=False, no_text_mode=no_text_mode) + dataset_sizes[x] = len(dataset_dict[x]) + + elif config['data']['name']=='ATR': + for x in ['train','val']: + if x=='train': + dataset_dict[x] = ATR_Dataset(config, shuffle_list=True, is_train=True, apply_norm=use_norm, no_text_mode=no_text_mode) + if x=='val': + dataset_dict[x] = ATR_Dataset(config, shuffle_list=False, apply_norm=use_norm, is_train=False, no_text_mode=no_text_mode) + dataset_sizes[x] = len(dataset_dict[x]) + + elif config['data']['name']=='ArcadeDataset': + print("HEREEEEEE") + for x in ['train', 'val']: # Changed 'test' to 'val' + is_train = x == 'train' + dataset_dict[x] = ArcadeDataset(config, is_train=is_train, shuffle_list=True, apply_norm=use_norm) + dataset_sizes[x] = len(dataset_dict[x]) + + print(f"{x.capitalize()} dataset size: {dataset_sizes[x]}") + + + else: + + for x in ['train','val']: + if x=='train': + dataset_dict[x] = Generic_Dataset_3d(config, is_train=True, folder_start=tr_folder_start, folder_end=tr_folder_end) + elif x=='val': + dataset_dict[x] = Generic_Dataset_3d(config, is_train=False, folder_start=val_folder_start, folder_end=val_folder_end) + + dataset_sizes[x] = len(dataset_dict[x]) + return dataset_dict, dataset_sizes, label_dict \ No newline at end of file diff --git a/AllinonSAM/datasets/__pycache__/arcade.cpython-312.pyc b/AllinonSAM/datasets/__pycache__/arcade.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f6b32e2baa7dad188d721b2d9a317e8c35d02e0f Binary files /dev/null and b/AllinonSAM/datasets/__pycache__/arcade.cpython-312.pyc differ diff --git a/AllinonSAM/datasets/__pycache__/arcade.cpython-38.pyc b/AllinonSAM/datasets/__pycache__/arcade.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16af74f1024278a86a33c1950354e1ae9a85a71f Binary files /dev/null and b/AllinonSAM/datasets/__pycache__/arcade.cpython-38.pyc differ diff --git a/AllinonSAM/datasets/__pycache__/atr.cpython-312.pyc b/AllinonSAM/datasets/__pycache__/atr.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..11fc132e1223ecbcc2b72b68ff3b6fdc4e4b0cbf Binary files /dev/null and b/AllinonSAM/datasets/__pycache__/atr.cpython-312.pyc differ diff --git a/AllinonSAM/datasets/__pycache__/atr.cpython-38.pyc b/AllinonSAM/datasets/__pycache__/atr.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8b9dc2e94443523f0cc65ba38195879308367c3 Binary files /dev/null and b/AllinonSAM/datasets/__pycache__/atr.cpython-38.pyc differ diff --git a/AllinonSAM/datasets/__pycache__/btcv.cpython-312.pyc b/AllinonSAM/datasets/__pycache__/btcv.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e40c6992b27602a77975921fd60052a133c5b479 Binary files /dev/null and b/AllinonSAM/datasets/__pycache__/btcv.cpython-312.pyc differ diff --git a/AllinonSAM/datasets/__pycache__/btcv.cpython-38.pyc b/AllinonSAM/datasets/__pycache__/btcv.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2449b02236b052ee097dd88175d46ff2e827587e Binary files /dev/null and b/AllinonSAM/datasets/__pycache__/btcv.cpython-38.pyc differ diff --git a/AllinonSAM/datasets/__pycache__/glas.cpython-312.pyc b/AllinonSAM/datasets/__pycache__/glas.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e7711ef664ad6a3f6e220e2979939df5ce481579 Binary files /dev/null and b/AllinonSAM/datasets/__pycache__/glas.cpython-312.pyc differ diff --git a/AllinonSAM/datasets/__pycache__/glas.cpython-38.pyc b/AllinonSAM/datasets/__pycache__/glas.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..00db36f101f2f8c773d558af3cd4ce41f6cad687 Binary files /dev/null and b/AllinonSAM/datasets/__pycache__/glas.cpython-38.pyc differ diff --git a/AllinonSAM/datasets/__pycache__/isic2018.cpython-312.pyc b/AllinonSAM/datasets/__pycache__/isic2018.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..08c133a9304aeacdf7a34d479c52ea688ecaf2de Binary files /dev/null and b/AllinonSAM/datasets/__pycache__/isic2018.cpython-312.pyc differ diff --git a/AllinonSAM/datasets/__pycache__/isic2018.cpython-38.pyc b/AllinonSAM/datasets/__pycache__/isic2018.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f579393cd814d1877f7614625f66f8e808360002 Binary files /dev/null and b/AllinonSAM/datasets/__pycache__/isic2018.cpython-38.pyc differ diff --git a/AllinonSAM/datasets/__pycache__/polyp.cpython-312.pyc b/AllinonSAM/datasets/__pycache__/polyp.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2198ed78cc06ed6b64e2accdd4c0ff6b0627dd51 Binary files /dev/null and b/AllinonSAM/datasets/__pycache__/polyp.cpython-312.pyc differ diff --git a/AllinonSAM/datasets/__pycache__/polyp.cpython-38.pyc b/AllinonSAM/datasets/__pycache__/polyp.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8aa0c0660cbffce26b1e2698300d7d0cb4eec1f6 Binary files /dev/null and b/AllinonSAM/datasets/__pycache__/polyp.cpython-38.pyc differ diff --git a/AllinonSAM/datasets/__pycache__/refuge.cpython-312.pyc b/AllinonSAM/datasets/__pycache__/refuge.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ffce261ca02ab4f3f0d94ae4d841b38df3b5e53 Binary files /dev/null and b/AllinonSAM/datasets/__pycache__/refuge.cpython-312.pyc differ diff --git a/AllinonSAM/datasets/__pycache__/refuge.cpython-38.pyc b/AllinonSAM/datasets/__pycache__/refuge.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc5841392a05058423fc7261b4f09ce748656090 Binary files /dev/null and b/AllinonSAM/datasets/__pycache__/refuge.cpython-38.pyc differ diff --git a/AllinonSAM/datasets/__pycache__/rite.cpython-312.pyc b/AllinonSAM/datasets/__pycache__/rite.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5557cc89693e08282da4cce6cab1d248a6fae3c6 Binary files /dev/null and b/AllinonSAM/datasets/__pycache__/rite.cpython-312.pyc differ diff --git a/AllinonSAM/datasets/__pycache__/rite.cpython-38.pyc b/AllinonSAM/datasets/__pycache__/rite.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97dacfbd3646f915707fe60e65a1690b0c8cb936 Binary files /dev/null and b/AllinonSAM/datasets/__pycache__/rite.cpython-38.pyc differ diff --git a/AllinonSAM/datasets/arcade.py b/AllinonSAM/datasets/arcade.py new file mode 100644 index 0000000000000000000000000000000000000000..2fb3bed21077a03a829d8805b53ed66b856e473c --- /dev/null +++ b/AllinonSAM/datasets/arcade.py @@ -0,0 +1,88 @@ +import os +import numpy as np +import torch +from PIL import Image +from torch.utils.data import Dataset +from data_transforms.btcv_transform import BTCV_Transform + +class ArcadeDataset(Dataset): + def __init__(self, config, file_list=None, is_train=False, shuffle_list=True, apply_norm=True, no_text_mode=False) -> None: + super().__init__() + self.root_dir = config['data']['root_path'] + self.is_train = is_train + self.config = config + self.apply_norm = apply_norm + self.no_text_mode = no_text_mode + self.label_names = config['data']['label_names'] + self.label_list = config['data']['label_list'] + + self.image_dir = os.path.join(self.root_dir, 'images') + self.mask_dir = os.path.join(self.root_dir, 'masks') + + if not os.path.exists(self.image_dir) or not os.path.exists(self.mask_dir): + raise ValueError(f"Image or mask directory not found in {self.root_dir}") + + # If a file list is provided, use it. Otherwise, load all images in the directory. + if file_list is not None: + self.images = file_list + else: + self.images = sorted([f for f in os.listdir(self.image_dir) if f.endswith('.png') or f.endswith('.jpg')]) + + if shuffle_list: + np.random.shuffle(self.images) + + self.data_transform = BTCV_Transform(config=config) + + def __len__(self): + return len(self.images) + + def __getitem__(self, index): + img_name = self.images[index] + image_name = os.path.splitext(os.path.basename(img_name))[0] + img_path = os.path.join(self.image_dir, image_name+'.png') + mask_name = os.path.splitext(image_name)[0] + '_mask.png' + # print(self.mask_dir) + # print(mask_name) + mask_path = os.path.join(self.mask_dir, mask_name) + + # print("Data point") + # print("Train: " , self.is_train) + # print("Img:" , img_path) + # print("Mask: ", mask_path) + + # Load and process image + img = Image.open(img_path).convert("RGB") + img = torch.as_tensor(np.array(img)).permute(2, 0, 1) # Change to CHW format + + # Load and process mask + if os.path.exists(mask_path): + mask = Image.open(mask_path).convert("L") + mask = torch.as_tensor(np.array(mask)) + else: + print(f"Mask not found for {mask_name}, using blank mask") + mask = torch.zeros((img.shape[1], img.shape[2]), dtype=torch.uint8) + + # Resize mask to match image size if necessary + if mask.shape != (img.shape[1], img.shape[2]): + mask = torch.as_tensor(np.array(Image.fromarray(mask.numpy()).resize((img.shape[2], img.shape[1])))) + + # Convert mask to binary + mask = (mask > 0).float() + + # Apply data transformations + img, mask = self.data_transform(img, mask.unsqueeze(0), is_train=self.is_train, apply_norm=self.apply_norm) + + if self.no_text_mode: + return img, mask, img_path, "" + else: + return img, mask[0], img_path, self.label_names[1] # Assuming "Vein" is the label of interest + + def get_category_ids(self, image_id): + img_name = self.images[image_id] + mask_name = os.path.splitext(img_name)[0] + '_mask.png' + mask_path = os.path.join(self.mask_dir, mask_name) + mask = Image.open(mask_path).convert("L") + mask = np.array(mask) + unique_values = np.unique(mask) + category_ids = [self.label_list[v] for v in unique_values if v in self.label_list] + return category_ids diff --git a/AllinonSAM/datasets/atr.py b/AllinonSAM/datasets/atr.py new file mode 100644 index 0000000000000000000000000000000000000000..3637ba901bc2272127eadeec7b71211f18e6c7c9 --- /dev/null +++ b/AllinonSAM/datasets/atr.py @@ -0,0 +1,104 @@ +import random +import os +import numpy as np +import torch +from PIL import Image +from torch.utils.data import Dataset +import pandas as pd +from data_transforms.atr_transform import ATR_Transform + + +class ATR_Dataset(Dataset): + def __init__(self, config, is_train=False, shuffle_list = True, apply_norm=True, no_text_mode=False) -> None: + super().__init__() + self.root_path = config['data']['root_path'] + self.img_names = [] + self.img_path_list = [] + self.label_path_list = [] + self.label_list = [] + self.class_in_image = [] + self.is_train = is_train + self.label_names = config['data']['label_names'] + self.num_classes = len(self.label_names) + self.config = config + self.apply_norm = apply_norm + self.no_text_mode = no_text_mode + if self.is_train: + self.df = pd.read_csv(os.path.join(self.root_path, 'folds_masks', 'train0.csv')) + else: + self.df = pd.read_csv(os.path.join(self.root_path, 'folds_masks', 'val0.csv')) + + self.populate_lists() + if shuffle_list: + p = [x for x in range(len(self.img_path_list))] + random.shuffle(p) + self.img_path_list = [self.img_path_list[pi] for pi in p] + self.img_names = [self.img_names[pi] for pi in p] + self.label_path_list = [self.label_path_list[pi] for pi in p] + self.label_list = [self.label_list[pi] for pi in p] + self.class_in_image = [self.class_in_image[pi] for pi in p] + + #define data transform + self.data_transform = ATR_Transform(config=config) + + def __len__(self): + return len(self.img_path_list) + + def populate_lists(self): + for i in range(len(self.df)): + img = self.df['mask_path'][i][6:] + img_path = os.path.join(self.root_path, 'imgs', img) + mask_path = os.path.join(self.root_path,self.df['mask_path'][i]) + # print(img) + if (('jpg' not in img) and ('jpeg not in img') and ('png' not in img) and ('bmp' not in img)): + continue + if self.no_text_mode: + self.img_names.append(img) + self.img_path_list.append(img_path) + self.label_path_list.append(mask_path) + self.label_list.append('') + self.class_in_image.append(self.df['tgt'][i]) + else: + for label_name in self.label_names: + self.img_names.append(img) + self.img_path_list.append(img_path) + self.label_path_list.append(mask_path) + self.label_list.append(label_name) + self.class_in_image.append(self.df['tgt'][i]) + + + def __getitem__(self, index): + img = torch.as_tensor(np.array(Image.open(self.img_path_list[index]).convert("RGB"))) + # print(img.shape) + if self.config['data']['volume_channel']==2: + img = img.permute(2,0,1) + + try: + if self.num_classes>1: + # print("classs in image: ", self.class_in_image[index]) + # print("label list: ", self.label_list[index]) + if self.class_in_image[index]+' Vehicle'==self.label_list[index]: + label = torch.Tensor(np.array(Image.open(self.label_path_list[index]))) + else: + label = torch.zeros(img.shape[1], img.shape[2]) + else: + label = torch.Tensor(np.array(Image.open(self.label_path_list[index]))) + + if len(label.shape)==3: + label = label[:,:,0] + # print(label.shape) + except: + 1/0 + label = torch.zeros(img.shape[1], img.shape[2]) + + label = label.unsqueeze(0) + label = (label>0)+0 + label_of_interest = self.label_list[index] + + #convert all grayscale pixels due to resizing back to 0, 1 + img, label = self.data_transform(img, label, is_train=self.is_train, apply_norm=self.apply_norm) + label = (label>=0.5)+0 + label = label[0] + + + return img, label, self.img_path_list[index], label_of_interest diff --git a/AllinonSAM/datasets/btcv.py b/AllinonSAM/datasets/btcv.py new file mode 100644 index 0000000000000000000000000000000000000000..f83842086647b37a2b095856b22a42e806e8d2f6 --- /dev/null +++ b/AllinonSAM/datasets/btcv.py @@ -0,0 +1,99 @@ +import random +import os +import numpy as np +import torch +from PIL import Image +from torch.utils.data import Dataset + +from data_transforms.btcv_transform import BTCV_Transform + + +class BTCV_Dataset(Dataset): + def __init__(self, config, is_train=False, shuffle_list = True, apply_norm=True, no_text_mode=False) -> None: + super().__init__() + self.root_path = config['data']['root_path'] + self.img_names = [] + self.img_path_list = [] + self.label_path_list = [] + self.label_list = [] + self.is_train = is_train + self.label_names = config['data']['label_names'] + self.num_classes = len(self.label_names) + self.config = config + self.apply_norm = apply_norm + self.no_text_mode = no_text_mode + self.label_dict = { + "Spleen":1, + "Right Kidney": 2, + "Left Kidney": 3, + "Gall Bladder": 4, + "Liver": 5, + "Stomach": 6, + "Aorta": 7, + "Pancreas": 8 + } + + self.populate_lists() + if shuffle_list: + p = [x for x in range(len(self.img_path_list))] + random.shuffle(p) + self.img_path_list = [self.img_path_list[pi] for pi in p] + self.img_names = [self.img_names[pi] for pi in p] + self.label_path_list = [self.label_path_list[pi] for pi in p] + self.label_list = [self.label_list[pi] for pi in p] + + #define data transform + self.data_transform = BTCV_Transform(config=config) + + def __len__(self): + return len(self.img_path_list) + + def populate_lists(self): + imgs_labels_path = os.path.join(self.root_path, 'train_npz') + + for npz in os.listdir(imgs_labels_path): + case_no = int(npz[4:npz.find("_")]) + if self.is_train: + if case_no>=34: + continue + else: + if case_no<34: + continue + # print(img) + if self.no_text_mode: + self.img_names.append(npz) + self.img_path_list.append(os.path.join(imgs_labels_path,npz)) + self.label_path_list.append(os.path.join(imgs_labels_path, npz)) + self.label_list.append('') + else: + for label_name in self.label_names: + self.img_names.append(npz) + self.img_path_list.append(os.path.join(imgs_labels_path,npz)) + self.label_path_list.append(os.path.join(imgs_labels_path, npz)) + self.label_list.append(label_name) + + + def __getitem__(self, index): + data = np.load(self.img_path_list[index]) + img, all_class_labels = data['image'], data['label'] + # print("img max min: ", np.max(img), np.min(img)) + img = torch.Tensor(img).unsqueeze(0).repeat(3,1,1) + + try: + label = torch.Tensor(all_class_labels)==self.label_dict[self.label_list[index]]+0 + if len(label.shape)==3: + label = label[:,:,0] + except: + 1/0 + label = torch.zeros(img.shape[1], img.shape[2]) + + label = label.unsqueeze(0) + label_of_interest = self.label_list[index] + + #convert all grayscale pixels due to resizing back to 0, 1 + img, label = self.data_transform(img, label, is_train=self.is_train, apply_norm=self.apply_norm) + label = (label>=0.5)+0 + label = label[0] + + + return img, label, self.img_path_list[index], label_of_interest diff --git a/AllinonSAM/datasets/glas.py b/AllinonSAM/datasets/glas.py new file mode 100644 index 0000000000000000000000000000000000000000..2f509d49e2e61eddfab41f9586173d4da22c5c8c --- /dev/null +++ b/AllinonSAM/datasets/glas.py @@ -0,0 +1,92 @@ +import random +import os +import numpy as np +import torch +from PIL import Image +from torch.utils.data import Dataset + +from data_transforms.glas_transform import GLAS_Transform + + +class GLAS_Dataset(Dataset): + def __init__(self, config, is_train=False, shuffle_list = True, apply_norm=True, no_text_mode=False) -> None: + super().__init__() + self.root_path = config['data']['root_path'] + self.img_names = [] + self.img_path_list = [] + self.label_path_list = [] + self.label_list = [] + self.is_train = is_train + self.label_names = config['data']['label_names'] + self.num_classes = len(self.label_names) + self.config = config + self.apply_norm = apply_norm + self.no_text_mode = no_text_mode + + self.populate_lists() + if shuffle_list: + p = [x for x in range(len(self.img_path_list))] + random.shuffle(p) + self.img_path_list = [self.img_path_list[pi] for pi in p] + self.img_names = [self.img_names[pi] for pi in p] + self.label_path_list = [self.label_path_list[pi] for pi in p] + self.label_list = [self.label_list[pi] for pi in p] + + #define data transform + self.data_transform = GLAS_Transform(config=config) + + def __len__(self): + return len(self.img_path_list) + + def populate_lists(self): + if self.is_train: + imgs_path = os.path.join(self.root_path, 'train') + labels_path = os.path.join(self.root_path, 'train') + else: + # imgs_path = os.path.join(self.root_path, 'validation') + # labels_path = os.path.join(self.root_path, 'validation') + imgs_path = os.path.join(self.root_path, 'test') + labels_path = os.path.join(self.root_path, 'test') + + for img in os.listdir(imgs_path): + # print(img) + if (('jpg' not in img) and ('jpeg not in img') and ('png' not in img) and ('bmp' not in img)): + continue + if 'anno' in img: + continue + if self.no_text_mode: + self.img_names.append(img) + self.img_path_list.append(os.path.join(imgs_path,img)) + self.label_path_list.append(os.path.join(labels_path, img[:-4]+'_anno.bmp')) + self.label_list.append('') + else: + for label_name in self.label_names: + self.img_names.append(img) + self.img_path_list.append(os.path.join(imgs_path,img)) + self.label_path_list.append(os.path.join(labels_path, img[:-4]+'_anno.bmp')) + self.label_list.append(label_name) + + + def __getitem__(self, index): + img = torch.as_tensor(np.array(Image.open(self.img_path_list[index]).convert("RGB"))) + if self.config['data']['volume_channel']==2: + img = img.permute(2,0,1) + + try: + label = torch.Tensor(np.array(Image.open(self.label_path_list[index]))) + if len(label.shape)==3: + label = label[:,:,0] + except: + label = torch.zeros(img.shape[1], img.shape[2]) + + label = label.unsqueeze(0) + label = (label>0)+0 + label_of_interest = self.label_list[index] + + #convert all grayscale pixels due to resizing back to 0, 1 + img, label = self.data_transform(img, label, is_train=self.is_train, apply_norm=self.apply_norm) + label = (label>=0.5)+0 + label = label[0] + + + return img, label, self.img_path_list[index], label_of_interest diff --git a/AllinonSAM/datasets/isic2018.py b/AllinonSAM/datasets/isic2018.py new file mode 100644 index 0000000000000000000000000000000000000000..b1bcf5e9951f4c6e0af6bbfcf4737791c8f1f4c5 --- /dev/null +++ b/AllinonSAM/datasets/isic2018.py @@ -0,0 +1,90 @@ +import random +import os +import numpy as np +import torch +from PIL import Image +from torch.utils.data import Dataset + +from data_transforms.isic2018_transform import ISIC_Transform + + +class ISIC2018_Dataset(Dataset): + def __init__(self, config, is_train=False, shuffle_list = True, apply_norm=True, no_text_mode=False) -> None: + super().__init__() + self.root_path = config['data']['root_path'] + self.img_names = [] + self.img_path_list = [] + self.label_path_list = [] + self.label_list = [] + self.is_train = is_train + self.label_names = config['data']['label_names'] + self.num_classes = len(self.label_names) + self.config = config + self.apply_norm = apply_norm + self.no_text_mode = no_text_mode + + self.populate_lists() + if shuffle_list: + p = [x for x in range(len(self.img_path_list))] + random.shuffle(p) + self.img_path_list = [self.img_path_list[pi] for pi in p] + self.img_names = [self.img_names[pi] for pi in p] + self.label_path_list = [self.label_path_list[pi] for pi in p] + self.label_list = [self.label_list[pi] for pi in p] + + #define data transform + self.data_transform = ISIC_Transform(config=config) + + def __len__(self): + return len(self.img_path_list) + + def populate_lists(self): + # if self.is_train: + # imgs_path = os.path.join(self.root_path, 'ISIC2018_Task1-2_Training_Input') + # labels_path = os.path.join(self.root_path, 'ISIC2018_Task1_Training_GroundTruth') + if self.is_train: + imgs_path = os.path.join(self.root_path, 'ISIC2018_Task1-2_TrainVal_Input') + labels_path = os.path.join(self.root_path, 'ISIC2018_Task1_TrainVal_GroundTruth') + else: + imgs_path = os.path.join(self.root_path, 'ISIC2018_Task1-2_Validation_Input') + labels_path = os.path.join(self.root_path, 'ISIC2018_Task1_Validation_GroundTruth') + + for img in os.listdir(imgs_path): + if (('jpg' not in img) and ('jpeg not in img') and ('png' not in img)): + continue + if self.no_text_mode: + self.img_names.append(img) + self.img_path_list.append(os.path.join(imgs_path,img)) + self.label_path_list.append(os.path.join(labels_path, img[:-4]+'_segmentation.png')) + self.label_list.append('') + else: + for label_name in self.label_names: + self.img_names.append(img) + self.img_path_list.append(os.path.join(imgs_path,img)) + self.label_path_list.append(os.path.join(labels_path, img[:-4]+'_segmentation.png')) + self.label_list.append(label_name) + + + def __getitem__(self, index): + img = torch.as_tensor(np.array(Image.open(self.img_path_list[index]).convert("RGB"))) + if self.config['data']['volume_channel']==2: + img = img.permute(2,0,1) + + try: + label = torch.Tensor(np.array(Image.open(self.label_path_list[index]))) + if len(label.shape)==3: + label = label[:,:,0] + except: + label = torch.zeros(img.shape[1], img.shape[2]) + + label = label.unsqueeze(0) + label = (label>0)+0 + label_of_interest = self.label_list[index] + + #convert all grayscale pixels due to resizing back to 0, 1 + img, label = self.data_transform(img, label, is_train=self.is_train, apply_norm=self.apply_norm) + label = (label>=0.5)+0 + label = label[0] + + + return img, label, self.img_path_list[index], label_of_interest diff --git a/AllinonSAM/datasets/polyp.py b/AllinonSAM/datasets/polyp.py new file mode 100644 index 0000000000000000000000000000000000000000..9178bd1b1ed87582111612bbb0e6b44999e432e5 --- /dev/null +++ b/AllinonSAM/datasets/polyp.py @@ -0,0 +1,113 @@ +import random +import argparse +import os +import sys +import numpy as np +import pandas as pd +import torch +from matplotlib import pyplot as plt +from PIL import Image +from torch.utils.data import Dataset, TensorDataset +from torchvision import datasets, models +from torchvision import transforms +from torchvision.transforms import functional as F +from torch.nn.functional import pad +from skimage.transform import resize +import nibabel as nib +import time +import json + +from data_transforms.polyp_transform import Polyp_Transform + + +class Polyp_Dataset(Dataset): + def __init__(self, config, is_train=False, shuffle_list = True, apply_norm=True, no_text_mode=False) -> None: + super().__init__() + self.root_path = config['data']['root_path'] + self.img_names = [] + self.img_path_list = [] + self.label_path_list = [] + self.label_list = [] + self.is_train = is_train + self.label_names = config['data']['label_names'] + self.num_classes = len(self.label_names) + self.config = config + self.apply_norm = apply_norm + self.no_text_mode = no_text_mode + self.train_df = os.path.join(self.root_path, 'train.csv') + self.val_df = os.path.join(self.root_path, 'val.csv') + + self.populate_lists() + if shuffle_list: + p = [x for x in range(len(self.img_path_list))] + random.shuffle(p) + self.img_path_list = [self.img_path_list[pi] for pi in p] + self.img_names = [self.img_names[pi] for pi in p] + self.label_path_list = [self.label_path_list[pi] for pi in p] + self.label_list = [self.label_list[pi] for pi in p] + + #define data transform + self.data_transform = Polyp_Transform(config=config) + print("Length of dataset: ", len(self.img_path_list)) + + def __len__(self): + return len(self.img_path_list) + + def populate_lists(self): + # imgs_path = os.path.join(self.root_path, 'CVC_clinicTRimage') + # labels_path = os.path.join(self.root_path, 'CVC_clinicTRmask') + # imgs_path = os.path.join(self.root_path, 'kvasirsegTRimage') + # labels_path = os.path.join(self.root_path, 'kvasirsegTRmask') + if self.is_train: + imgs_path = os.path.join(self.root_path, 'TrainDataset/image') + labels_path = os.path.join(self.root_path, 'TrainDataset/masks') + else: + imgs_path = os.path.join(self.root_path, 'TestDataset/CVC-ColonDB/images') + labels_path = os.path.join(self.root_path, 'TestDataset/CVC-ColonDB/masks') + # imgs_path = os.path.join(self.root_path, 'NewTRimage') + # labels_path = os.path.join(self.root_path, 'NewTRmask') + # if self.is_train: + # df = pd.read_csv(self.train_df) + # else: + # df = pd.read_csv(self.val_df) + # for i in range(len(df)): + # img = df['image_path'].iloc[i] + # lbl = df['mask_path'].iloc[i] + for img in os.listdir(imgs_path): + if self.no_text_mode: + self.img_names.append(img) + self.img_path_list.append(os.path.join(imgs_path,img)) + self.label_path_list.append(os.path.join(labels_path, img)) + self.label_list.append('') + else: + for label_name in self.label_names: + self.img_names.append(img) + self.img_path_list.append(os.path.join(imgs_path,img)) + self.label_path_list.append(os.path.join(labels_path, img)) + self.label_list.append(label_name) + + + def __getitem__(self, index): + img = torch.as_tensor(np.array(Image.open(self.img_path_list[index]).convert("RGB"))) + if self.config['data']['volume_channel']==2: + img = img.permute(2,0,1) + + try: + label = torch.Tensor(np.array(Image.open(self.label_path_list[index]))) + if len(label.shape)==3: + label = label[:,:,0] + except: + label = torch.zeros(img.shape[1], img.shape[2]) + + + label = label.unsqueeze(0) + label = (label>0)+0 + label_of_interest = self.label_list[index] + + #convert all grayscale pixels due to resizing back to 0, 1 + img, label = self.data_transform(img, label, is_train=self.is_train, apply_norm=self.apply_norm) + label = (label>=0.5)+0 + label = label[0] + + + return img, label, self.img_path_list[index], label_of_interest diff --git a/AllinonSAM/datasets/refuge.py b/AllinonSAM/datasets/refuge.py new file mode 100644 index 0000000000000000000000000000000000000000..8183acc2483d8d1cd12cc76ac86b2affbdab30ef --- /dev/null +++ b/AllinonSAM/datasets/refuge.py @@ -0,0 +1,92 @@ +import random +import os +import numpy as np +import torch +from PIL import Image +from torch.utils.data import Dataset + +from data_transforms.refuge_transform import Refuge_Transform + + +class Refuge_Dataset(Dataset): + def __init__(self, config, is_train=False, shuffle_list = True, apply_norm=True, no_text_mode=False) -> None: + super().__init__() + self.root_path = config['data']['root_path'] + self.img_names = [] + self.img_path_list = [] + self.label_path_list = [] + self.label_list = [] + self.is_train = is_train + self.label_names = config['data']['label_names'] + self.num_classes = len(self.label_names) + self.config = config + self.apply_norm = apply_norm + self.no_text_mode = no_text_mode + self.label_dict = { + 'optic cup': 2, + 'optic disk': 1 + } + + self.populate_lists() + if shuffle_list: + p = [x for x in range(len(self.img_path_list))] + random.shuffle(p) + self.img_path_list = [self.img_path_list[pi] for pi in p] + self.img_names = [self.img_names[pi] for pi in p] + self.label_path_list = [self.label_path_list[pi] for pi in p] + self.label_list = [self.label_list[pi] for pi in p] + + #define data transform + self.data_transform = Refuge_Transform(config=config) + + def __len__(self): + return len(self.img_path_list) + + def populate_lists(self): + if self.is_train: + imgs_path = os.path.join(self.root_path, 'train/Images_Cropped') + labels_path = os.path.join(self.root_path, 'train/Masks_Cropped') + else: + imgs_path = os.path.join(self.root_path, 'val/Images_Cropped') + labels_path = os.path.join(self.root_path, 'val/Masks_Cropped') + + for img in os.listdir(imgs_path): + if (('jpg' not in img) and ('jpeg not in img') and ('png' not in img)): + continue + if self.no_text_mode: + self.img_names.append(img) + self.img_path_list.append(os.path.join(imgs_path,img)) + self.label_path_list.append(os.path.join(labels_path, img[:-4]+'.png')) + self.label_list.append('') + else: + for label_name in self.label_names: + self.img_names.append(img) + self.img_path_list.append(os.path.join(imgs_path,img)) + self.label_path_list.append(os.path.join(labels_path, img[:-4]+'.png')) + self.label_list.append(label_name) + + + def __getitem__(self, index): + img = torch.as_tensor(np.array(Image.open(self.img_path_list[index]).convert("RGB"))) + if self.config['data']['volume_channel']==2: + img = img.permute(2,0,1) + + try: + label = torch.Tensor(np.array(Image.open(self.label_path_list[index]))) + if len(label.shape)==3: + label = label[:,:,0] + label = (label==self.label_dict[self.label_list[index]]) + except: + label = torch.zeros(img.shape[1], img.shape[2]) + + label = label.unsqueeze(0) + label = (label>0)+0 + label_of_interest = self.label_list[index] + + #convert all grayscale pixels due to resizing back to 0, 1 + img, label = self.data_transform(img, label, is_train=self.is_train, apply_norm=self.apply_norm) + label = (label>=0.5)+0 + label = label[0] + + + return img, label, self.img_path_list[index], label_of_interest diff --git a/AllinonSAM/datasets/rite.py b/AllinonSAM/datasets/rite.py new file mode 100644 index 0000000000000000000000000000000000000000..70630146ac814b3a8c96c3be2f991fbdea51c1d8 --- /dev/null +++ b/AllinonSAM/datasets/rite.py @@ -0,0 +1,87 @@ +import random +import os +import numpy as np +import torch +from PIL import Image +from torch.utils.data import Dataset + +from data_transforms.rite_transform import RITE_Transform + + +class RITE_Dataset(Dataset): + def __init__(self, config, is_train=False, shuffle_list = True, apply_norm=True, no_text_mode=False) -> None: + super().__init__() + self.root_path = config['data']['root_path'] + self.img_names = [] + self.img_path_list = [] + self.label_path_list = [] + self.label_list = [] + self.is_train = is_train + self.label_names = config['data']['label_names'] + self.num_classes = len(self.label_names) + self.config = config + self.apply_norm = apply_norm + self.no_text_mode = no_text_mode + + self.populate_lists() + if shuffle_list: + p = [x for x in range(len(self.img_path_list))] + random.shuffle(p) + self.img_path_list = [self.img_path_list[pi] for pi in p] + self.img_names = [self.img_names[pi] for pi in p] + self.label_path_list = [self.label_path_list[pi] for pi in p] + self.label_list = [self.label_list[pi] for pi in p] + + #define data transform + self.data_transform = RITE_Transform(config=config) + + def __len__(self): + return len(self.img_path_list) + + def populate_lists(self): + if self.is_train: + imgs_path = os.path.join(self.root_path, 'train/images') + labels_path = os.path.join(self.root_path, 'train/masks') + else: + imgs_path = os.path.join(self.root_path, 'validation/images') + labels_path = os.path.join(self.root_path, 'validation/masks') + + for img in os.listdir(imgs_path): + if (('jpg' not in img) and ('jpeg not in img') and ('png' not in img)): + continue + if self.no_text_mode: + self.img_names.append(img) + self.img_path_list.append(os.path.join(imgs_path,img)) + self.label_path_list.append(os.path.join(labels_path, img)) + self.label_list.append('') + else: + for label_name in self.label_names: + self.img_names.append(img) + self.img_path_list.append(os.path.join(imgs_path,img)) + self.label_path_list.append(os.path.join(labels_path, img)) + self.label_list.append(label_name) + + + def __getitem__(self, index): + img = torch.as_tensor(np.array(Image.open(self.img_path_list[index]).convert("RGB"))) + if self.config['data']['volume_channel']==2: + img = img.permute(2,0,1) + + try: + label = torch.Tensor(np.array(Image.open(self.label_path_list[index]))) + if len(label.shape)==3: + label = label[:,:,0] + except: + label = torch.zeros(img.shape[1], img.shape[2]) + + label = label.unsqueeze(0) + label = (label>0)+0 + label_of_interest = self.label_list[index] + + #convert all grayscale pixels due to resizing back to 0, 1 + img, label = self.data_transform(img, label, is_train=self.is_train, apply_norm=self.apply_norm) + label = (label>=0.5)+0 + label = label[0] + + + return img, label, self.img_path_list[index], label_of_interest diff --git a/AllinonSAM/driver_scratchpad.py b/AllinonSAM/driver_scratchpad.py new file mode 100644 index 0000000000000000000000000000000000000000..6400a404ef753451dd3ddfa28aecae3f7ac1a04b --- /dev/null +++ b/AllinonSAM/driver_scratchpad.py @@ -0,0 +1,988 @@ +import argparse +import yaml +import torch.nn as nn +import torch.optim as optim +from torch.optim import lr_scheduler +from data_utils import * +from model import * +from test import * +import pandas as pd +from train import * +import sys +import torch +import os + +source_path = os.path.join("/home/abdelrahman.elsayed/CVPR/AllinonSAM/datasets") +sys.path.append(source_path) +from arcade import ArcadeDataset +from crfseg import CRF +import itertools +from utils import CosineAnnealingWarmupScheduler + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument( + "--data_config", + default="/home/abdelrahman.elsayed/CVPR/AllinonSAM/config_arcade.yml", + help="data config file path", + ) + + parser.add_argument( + "--model_config", + default="/home/abdelrahman.elsayed/CVPR/AllinonSAM/model_svdtuning.yml", + help="model config file path", + ) + + parser.add_argument("--pretrained_path", default=None, help="pretrained model path") + + parser.add_argument( + "--save_path", default="checkpoints/temp.pth", help="pretrained model path" + ) + parser.add_argument( + "--training_strategy", default="svdtuning", help="how to train the model" + ) + + parser.add_argument("--device", default="cuda:0", help="device to train on") + + args = parser.parse_args() + + return args + + +def main_onetime_functions(config): + dataset_dict, dataset_sizes, label_dict = get_data( + config, + tr_folder_start=0, + tr_folder_end=78000, + val_folder_start=0, + val_folder_end=104000, + use_norm=False, + ) + for x in dataset_dict: + dataset_dict[x].one_time_generate_pos_neg_list_dicts(x) + + +def main_datautils(config, use_norm=True): + selected_idxs = [0, 12, 42, 79, 100] + print(config) + dataset_dict, dataset_sizes, label_dict = get_data( + config, + tr_folder_start=0, + tr_folder_end=78000, + val_folder_start=0, + val_folder_end=104000, + use_norm=use_norm, + ) + + # test without generating examples for legacy + # print(len(dataset_dict['train'])) + # for i in selected_idxs: + # temp = (dataset_dict['train'][i]) + # print(temp[-1]) + # print(temp[-2]) + # print(temp[0].shape) + # print(temp[1].shape) + # plt.imshow(temp[0].permute(1,2,0), cmap='gray') + # plt.show() + # plt.imshow(temp[1], cmap='gray') + # plt.show() + + # test generate examples function + print("testing generate examples\n") + try: + dataset_dict["train"].generate_examples() + except: + pass + print(len(dataset_dict["train"])) + for i in selected_idxs: + temp = dataset_dict["train"][i] + print(temp[-1]) + print(temp[-2]) + print(temp[0].shape) + print(temp[1].shape) + try: + plt.imshow(temp[1], cmap="gray") + plt.show() + print(temp[0].min(), temp[0].max()) + plt.imshow(temp[0].permute(1, 2, 0), cmap="gray") + plt.show() + + except: + print("temp range: ", temp[0][0].min(), temp[0][0].max()) + plt.imshow(temp[0][0].permute(1, 2, 0), cmap="gray") + plt.show() + print("temp label range: ", temp[1][0].min(), temp[1][0].max()) + plt.imshow(temp[1][0], cmap="gray") + plt.show() + + +def main_model(config): + print(config) + training_strategy = "svdtuning" + label_dict = {"liver": 0, "tumor": 1} + model = Prompt_Adapted_SAM(config, label_dict) + + # freeze correct weights + for p in model.parameters(): + p.requires_grad = True + + # unfreeze according to strategy: + for name, p in model.named_parameters(): + # if training_strategy=='svdtuning': + # if 'trainable' in name.lower(): + # p.requires_grad = True + # elif training_strategy=='biastuning': + # if ('bias' in name.lower()) and ('clip' not in name.lower()): + # p.requires_grad = True + # elif training_strategy=='svdbiastuning': + # if 'trainable' in name.lower(): + # p.requires_grad = True + # if ('bias' in name.lower()) and ('clip' not in name.lower()): + # p.requires_grad = True + + if model_config["prompts"]["USE_TEXT_PROMPT"]: + if "Text_Embedding_Affine" in name: + p.requires_grad = True + if "clip" in name: + p.requires_grad = False + + # for name, p in model.named_parameters(): + # if p.requires_grad: + # print(name) + print( + "number of trainable parameters: ", + sum(p.numel() for p in model.parameters() if p.requires_grad), + ) + + return + + +def main_test(data_config, model_config, pretrained_path): + test_start = 104 + test_end = 131 + test( + data_config, + model_config, + pretrained_path, + test_start, + test_end, + device="cuda:0", + ) + + +def lr_lambda(step): + if step < model_config["training"]["warmup_steps"]: + return step / model_config["training"]["warmup_steps"] # Linear warm-up + elif step < model_config["training"]["steps"][0]: + return 1.0 # Maintain initial learning rate + elif step < model_config["training"]["steps"][1]: + return 1 / model_config["training"]["decay_factor"] # First decay + else: + return 1 / (model_config["training"]["decay_factor"] ** 2) # Second decay + + +def main_train( + data_config, + model_config, + pretrained_path, + save_path, + training_strategy="biastuning", + device="cuda:0", +): + print(data_config) + print(model_config) + + # load data + if data_config["data"]["name"] == "LITS": + dataset_dict, dataset_sizes, label_dict = get_data( + data_config, + tr_folder_start=0, + tr_folder_end=78, + val_folder_start=78, + val_folder_end=104, + ) + elif data_config["data"]["name"] == "AMOS22": + dataset_dict, dataset_sizes, label_dict = get_data( + data_config, + tr_folder_start=0, + tr_folder_end=78, + val_folder_start=78, + val_folder_end=104, + ) + elif data_config["data"]["name"] == "IDRID": + dataset_dict, dataset_sizes, label_dict = get_data( + data_config, + tr_folder_start=0, + tr_folder_end=40, + val_folder_start=40, + val_folder_end=104, + ) + dataloader_dict = {} + for x in ["train", "val"]: + dataloader_dict[x] = torch.utils.data.DataLoader( + dataset_dict[x], + batch_size=model_config["training"]["batch_size"], + shuffle=True, + num_workers=4, + ) + elif data_config["data"]["name"] == "ENDOVIS": + dataset_dict, dataset_sizes, label_dict = get_data( + data_config, + tr_folder_start=0, + tr_folder_end=180, + val_folder_start=180, + val_folder_end=304, + ) + dataloader_dict = {} + for x in ["train", "val"]: + dataloader_dict[x] = torch.utils.data.DataLoader( + dataset_dict[x], + batch_size=model_config["training"]["batch_size"], + shuffle=True, + num_workers=4, + ) + elif data_config["data"]["name"] == "ENDOVIS 18": + dataset_dict, dataset_sizes, label_dict = get_data( + data_config, + tr_folder_start=0, + tr_folder_end=18000, + val_folder_start=0, + val_folder_end=34444, + ) + dataloader_dict = {} + for x in ["train", "val"]: + dataloader_dict[x] = torch.utils.data.DataLoader( + dataset_dict[x], + batch_size=model_config["training"]["batch_size"], + shuffle=True, + num_workers=4, + ) + elif data_config["data"]["name"] == "CHESTXDET": + dataset_dict, dataset_sizes, label_dict = get_data( + data_config, + tr_folder_start=0, + tr_folder_end=18000, + val_folder_start=0, + val_folder_end=34444, + ) + dataloader_dict = {} + for x in ["train", "val"]: + dataloader_dict[x] = torch.utils.data.DataLoader( + dataset_dict[x], + batch_size=model_config["training"]["batch_size"], + shuffle=True, + num_workers=4, + ) + elif data_config["data"]["name"] == "CHOLEC 8K": + dataset_dict, dataset_sizes, label_dict = get_data( + data_config, + tr_folder_start=0, + tr_folder_end=18000, + val_folder_start=0, + val_folder_end=34444, + ) + dataloader_dict = {} + for x in ["train", "val"]: + dataloader_dict[x] = torch.utils.data.DataLoader( + dataset_dict[x], + batch_size=model_config["training"]["batch_size"], + shuffle=True, + num_workers=4, + ) + elif data_config["data"]["name"] == "ULTRASOUND": + dataset_dict, dataset_sizes, label_dict = get_data( + data_config, + tr_folder_start=0, + tr_folder_end=18000, + val_folder_start=0, + val_folder_end=34444, + ) + dataloader_dict = {} + for x in ["train", "val"]: + dataloader_dict[x] = torch.utils.data.DataLoader( + dataset_dict[x], + batch_size=model_config["training"]["batch_size"], + shuffle=True, + num_workers=4, + ) + elif data_config["data"]["name"] == "KVASIRSEG": + dataset_dict, dataset_sizes, label_dict = get_data( + data_config, + tr_folder_start=0, + tr_folder_end=18000, + val_folder_start=0, + val_folder_end=34444, + ) + dataloader_dict = {} + for x in ["train", "val"]: + dataloader_dict[x] = torch.utils.data.DataLoader( + dataset_dict[x], + batch_size=model_config["training"]["batch_size"], + shuffle=True, + num_workers=4, + ) + elif data_config["data"]["name"] == "LITS2": + dataset_dict, dataset_sizes, label_dict = get_data( + data_config, + tr_folder_start=0, + tr_folder_end=18000, + val_folder_start=0, + val_folder_end=34444, + ) + dataloader_dict = {} + for x in ["train", "val"]: + dataloader_dict[x] = torch.utils.data.DataLoader( + dataset_dict[x], + batch_size=model_config["training"]["batch_size"], + shuffle=True, + num_workers=4, + ) + elif data_config["data"]["name"] == "ISIC2018": + dataset_dict, dataset_sizes, label_dict = get_data( + data_config, + tr_folder_start=0, + tr_folder_end=18000, + val_folder_start=0, + val_folder_end=34444, + ) + dataloader_dict = {} + for x in ["train", "val"]: + dataloader_dict[x] = torch.utils.data.DataLoader( + dataset_dict[x], + batch_size=model_config["training"]["batch_size"], + shuffle=True, + num_workers=4, + ) + elif data_config["data"]["name"] == "Polyp": + dataset_dict, dataset_sizes, label_dict = get_data( + data_config, + tr_folder_start=0, + tr_folder_end=18000, + val_folder_start=0, + val_folder_end=34444, + ) + dataloader_dict = {} + for x in ["train", "val"]: + dataloader_dict[x] = torch.utils.data.DataLoader( + dataset_dict[x], + batch_size=model_config["training"]["batch_size"], + shuffle=True, + num_workers=4, + ) + elif data_config["data"]["name"] == "RITE": + dataset_dict, dataset_sizes, label_dict = get_data( + data_config, + tr_folder_start=0, + tr_folder_end=18000, + val_folder_start=0, + val_folder_end=34444, + ) + dataloader_dict = {} + for x in ["train", "val"]: + dataloader_dict[x] = torch.utils.data.DataLoader( + dataset_dict[x], + batch_size=model_config["training"]["batch_size"], + shuffle=True, + num_workers=4, + ) + elif data_config["data"]["name"] == "GLAS": + dataset_dict, dataset_sizes, label_dict = get_data( + data_config, + tr_folder_start=0, + tr_folder_end=18000, + val_folder_start=0, + val_folder_end=34444, + ) + dataloader_dict = {} + for x in ["train", "val"]: + dataloader_dict[x] = torch.utils.data.DataLoader( + dataset_dict[x], + batch_size=model_config["training"]["batch_size"], + shuffle=True, + num_workers=4, + ) + elif data_config["data"]["name"] == "Refuge": + dataset_dict, dataset_sizes, label_dict = get_data( + data_config, + tr_folder_start=0, + tr_folder_end=18000, + val_folder_start=0, + val_folder_end=34444, + ) + dataloader_dict = {} + for x in ["train", "val"]: + dataloader_dict[x] = torch.utils.data.DataLoader( + dataset_dict[x], + batch_size=model_config["training"]["batch_size"], + shuffle=True, + num_workers=4, + ) + elif data_config["data"]["name"] == "BTCV": + dataset_dict, dataset_sizes, label_dict = get_data( + data_config, + tr_folder_start=0, + tr_folder_end=18000, + val_folder_start=0, + val_folder_end=34444, + ) + dataloader_dict = {} + for x in ["train", "val"]: + dataloader_dict[x] = torch.utils.data.DataLoader( + dataset_dict[x], + batch_size=model_config["training"]["batch_size"], + shuffle=True, + num_workers=4, + ) + elif data_config["data"]["name"] == "ATR": + dataset_dict, dataset_sizes, label_dict = get_data( + data_config, + tr_folder_start=0, + tr_folder_end=18000, + val_folder_start=0, + val_folder_end=34444, + ) + dataloader_dict = {} + for x in ["train", "val"]: + dataloader_dict[x] = torch.utils.data.DataLoader( + dataset_dict[x], + batch_size=model_config["training"]["batch_size"], + shuffle=True, + num_workers=4, + ) + elif data_config["data"]["name"] == "ArcadeDataset": + print("HERE") + data_split_csv_path = data_config["data"]["data_split_csv"] + data_split = pd.read_csv(data_split_csv_path) + + dataset_dict = {} + dataloader_dict = {} + + use_norm = True + no_text_mode = False + + for split in ["train", "val"]: + # Filter the CSV for the current split + split_data = data_split[data_split["split"] == split]["imgs"].tolist() + + # Pass the filtered data to the dataset class (ArcadeDataset) + dataset_dict[split] = ArcadeDataset( + config=data_config, + file_list=split_data, # Pass file_list as (image_path, mask_path) tuples + shuffle_list=True, + is_train=(split == "train"), + apply_norm=use_norm, + no_text_mode=no_text_mode, + ) + + # Create DataLoader for each dataset + dataloader_dict[split] = torch.utils.data.DataLoader( + dataset_dict[split], + batch_size=model_config["training"]["batch_size"], + shuffle=True, + num_workers=4, + ) + + # Get dataset sizes + dataset_sizes = {split: len(dataset_dict[split]) for split in ["train", "val"]} + + # Create label dictionary + label_dict = { + name: i for i, name in enumerate(data_config["data"]["label_names"]) + } + + # Print dataset sizes + print(f"Train dataset size: {dataset_sizes['train']}") + print(f"Val dataset size: {dataset_sizes['val']}") + + # Get dataset sizes + dataset_sizes = {split: len(dataset_dict[split]) for split in ["train", "val"]} + + # Create label dictionary + label_dict = { + name: i for i, name in enumerate(data_config["data"]["label_names"]) + } + + # Print dataset sizes + print(f"Train dataset size: {dataset_sizes['train']}") + print(f"Val dataset size: {dataset_sizes['val']}") + # load model + # change the img size in model config according to data config + model_config["sam"]["img_size"] = data_config["data_transforms"]["img_size"] + model_config["sam"]["num_classes"] = len(data_config["data"]["label_list"]) + if training_strategy == "lora": + model_config["use_lora"] = True + else: + model_config["use_lora"] = False + + if training_strategy == "biastuning": + model_config["decoder_training"] = "full" + + if model_config["arch"] == "Prompt Adapted SAM": + model = Prompt_Adapted_SAM( + model_config, label_dict, device, training_strategy=training_strategy + ) + + # load model weights + if pretrained_path is not None: + model.load_state_dict(torch.load(pretrained_path)) + + # freeze correct weights + for p in model.parameters(): + # p.requires_grad=True + p.requires_grad = False + + # unfreeze according to strategy: + for name, p in model.named_parameters(): + if training_strategy == "svdtuning": + if "trainable" in name.lower(): + p.requires_grad = True + elif training_strategy == "biastuning": + if ("bias" in name.lower()) and ("clip" not in name.lower()): + p.requires_grad = True + elif training_strategy == "svdbiastuning": + if "trainable" in name.lower(): + p.requires_grad = True + if ("bias" in name.lower()) and ("clip" not in name.lower()): + p.requires_grad = True + elif training_strategy == "lora": + if "trainable_lora" in name.lower(): + p.requires_grad = True + + if model_config["prompts"]["USE_TEXT_PROMPT"]: + if "Text_Embedding_Affine" in name: + p.requires_grad = True + if model_config["prompts"]["USE_SLICE_NUM"]: + if "slice" in name: + p.requires_grad = True + + if model_config["decoder_training"] == "full": + if ("decoder" in name.lower()) and ("clip" not in name.lower()): + p.requires_grad = True + elif model_config["decoder_training"] == "svdtuning": + if "trainable" in name.lower(): + p.requires_grad = True + elif model_config["decoder_training"] == "none": + if "decoder" in name.lower(): + p.requires_grad = False + + if "prompt_encoder" in name.lower(): + p.requires_grad = False + # p.requires_grad = True + + # common parameters + if "norm" in name.lower(): + p.requires_grad = True + if "pos_embed" in name.lower(): + p.requires_grad = True + if "clip" in name.lower(): + p.requires_grad = False + + # training parameters + training_params = model_config["training"] + if training_params["optimizer"] == "adamw": + optimizer = optim.AdamW( + model.parameters(), + lr=float(training_params["lr"]), + weight_decay=float(training_params["weight_decay"]), + ) + elif training_params["optimizer"] == "sgd": + optimizer = optim.SGD( + model.parameters(), + lr=float(training_params["lr"]), + weight_decay=float(training_params["weight_decay"]), + momentum=0.9, + ) + + # USED LAMBDALR or CosineAnnealing instead of STEPLR + if training_params["schedular"] == "cosine_warmup": + return CosineAnnealingWarmupScheduler( + optimizer, + warmup_epochs=training_params["warmup_epochs"],#TODO: Add it the config file (organize it in more good way), + total_epochs=training_params["num_epochs"], + min_lr=training_params["min_lr"] , #TODO: Add it the config file (organize it in more good way) + warmup_start_lr=training_params["lr"] + ) + # I STILL Use this for some of my experiments thats why I am keeping it + if training_params["schedular"] == "step": + exp_lr_scheduler = lr_scheduler.StepLR( + optimizer, + step_size=training_params["schedule_step"], + gamma=training_params["schedule_step_factor"], + ) + else: + exp_lr_scheduler = lr_scheduler.LambdaLR( + optimizer, + lr_lambda, + ) + criterion = [] + if "dice" in training_params["loss"]: + criterion.append(dice_loss) + if "focal" in training_params["loss"]: + criterion.append(focal_loss) + if "CE" in training_params["loss"]: + criterion.append(nn.BCELoss()) + if "weighted CE" in training_params["loss"]: + criterion.append(weighted_ce_loss) + if criterion == []: + criterion = [nn.BCELoss()] + + # retain_graph = False if model_config['decoder_training']=='none' else True + retain_graph = False + + # train the model + if data_config["data"]["name"] == "LITS": + model = train( + model, + dataset_dict["train"], + dataset_dict["val"], + criterion, + optimizer, + save_path, + num_epochs=training_params["num_epochs"], + bs=training_params["batch_size"], + device=device, + ) + elif data_config["data"]["name"] == "AMOS22": + model = train( + model, + dataset_dict["train"], + dataset_dict["val"], + criterion, + optimizer, + save_path, + num_epochs=training_params["num_epochs"], + bs=training_params["batch_size"], + device=device, + ) + # model = train_dl(model, dataset_dict, dataset_sizes, criterion, optimizer, exp_lr_scheduler, save_path, num_epochs=training_params['num_epochs'], bs=training_params['batch_size'], device=device, retain_graph=retain_graph, neg2pos_ratio=data_config['data']['negative_to_positive_ratio'], reg_multiplier=model_config['training']['reg_multiplier']) + + elif data_config["data"]["name"] == "IDRID": + model = train_dl( + model, + dataloader_dict, + dataset_sizes, + criterion, + optimizer, + exp_lr_scheduler, + save_path, + num_epochs=training_params["num_epochs"], + bs=training_params["batch_size"], + device=device, + retain_graph=retain_graph, + neg2pos_ratio=data_config["data"]["negative_to_positive_ratio"], + reg_multiplier=model_config["training"]["reg_multiplier"], + ) + elif data_config["data"]["name"] == "ENDOVIS": + model = train_dl( + model, + dataset_dict, + dataset_sizes, + criterion, + optimizer, + exp_lr_scheduler, + save_path, + num_epochs=training_params["num_epochs"], + bs=training_params["batch_size"], + device=device, + retain_graph=retain_graph, + neg2pos_ratio=data_config["data"]["negative_to_positive_ratio"], + reg_multiplier=model_config["training"]["reg_multiplier"], + ) + elif data_config["data"]["name"] == "ENDOVIS 18": + model = train_dl( + model, + dataset_dict, + dataset_sizes, + criterion, + optimizer, + exp_lr_scheduler, + save_path, + num_epochs=training_params["num_epochs"], + bs=training_params["batch_size"], + device=device, + retain_graph=retain_graph, + neg2pos_ratio=data_config["data"]["negative_to_positive_ratio"], + reg_multiplier=model_config["training"]["reg_multiplier"], + ) + elif data_config["data"]["name"] == "CHOLEC 8K": + model = train_dl( + model, + dataset_dict, + dataset_sizes, + criterion, + optimizer, + exp_lr_scheduler, + save_path, + num_epochs=training_params["num_epochs"], + bs=training_params["batch_size"], + device=device, + retain_graph=retain_graph, + neg2pos_ratio=data_config["data"]["negative_to_positive_ratio"], + reg_multiplier=model_config["training"]["reg_multiplier"], + ) + elif data_config["data"]["name"] == "ULTRASOUND": + model = train_dl( + model, + dataset_dict, + dataset_sizes, + criterion, + optimizer, + exp_lr_scheduler, + save_path, + num_epochs=training_params["num_epochs"], + bs=training_params["batch_size"], + device=device, + retain_graph=retain_graph, + neg2pos_ratio=data_config["data"]["negative_to_positive_ratio"], + reg_multiplier=model_config["training"]["reg_multiplier"], + ) + elif data_config["data"]["name"] == "KVASIRSEG": + model = train_dl( + model, + dataset_dict, + dataset_sizes, + criterion, + optimizer, + exp_lr_scheduler, + save_path, + num_epochs=training_params["num_epochs"], + bs=training_params["batch_size"], + device=device, + retain_graph=retain_graph, + neg2pos_ratio=data_config["data"]["negative_to_positive_ratio"], + reg_multiplier=model_config["training"]["reg_multiplier"], + ) + elif data_config["data"]["name"] == "CHESTXDET": + model = train_dl( + model, + dataset_dict, + dataset_sizes, + criterion, + optimizer, + exp_lr_scheduler, + save_path, + num_epochs=training_params["num_epochs"], + bs=training_params["batch_size"], + device=device, + retain_graph=retain_graph, + neg2pos_ratio=data_config["data"]["negative_to_positive_ratio"], + reg_multiplier=model_config["training"]["reg_multiplier"], + ) + elif data_config["data"]["name"] == "LITS2": + model = train_dl( + model, + dataset_dict, + dataset_sizes, + criterion, + optimizer, + exp_lr_scheduler, + save_path, + num_epochs=training_params["num_epochs"], + bs=training_params["batch_size"], + device=device, + retain_graph=retain_graph, + neg2pos_ratio=data_config["data"]["negative_to_positive_ratio"], + reg_multiplier=model_config["training"]["reg_multiplier"], + ) + elif data_config["data"]["name"] == "ISIC2018": + model = train_dl( + model, + dataset_dict, + dataset_sizes, + criterion, + optimizer, + exp_lr_scheduler, + save_path, + num_epochs=training_params["num_epochs"], + bs=training_params["batch_size"], + device=device, + retain_graph=retain_graph, + neg2pos_ratio=data_config["data"]["negative_to_positive_ratio"], + reg_multiplier=model_config["training"]["reg_multiplier"], + ) + elif data_config["data"]["name"] == "Polyp": + model = train_dl( + model, + dataset_dict, + dataset_sizes, + criterion, + optimizer, + exp_lr_scheduler, + save_path, + num_epochs=training_params["num_epochs"], + bs=training_params["batch_size"], + device=device, + retain_graph=retain_graph, + neg2pos_ratio=data_config["data"]["negative_to_positive_ratio"], + reg_multiplier=model_config["training"]["reg_multiplier"], + ) + elif data_config["data"]["name"] == "RITE": + model = train_dl( + model, + dataset_dict, + dataset_sizes, + criterion, + optimizer, + exp_lr_scheduler, + save_path, + num_epochs=training_params["num_epochs"], + bs=training_params["batch_size"], + device=device, + retain_graph=retain_graph, + neg2pos_ratio=data_config["data"]["negative_to_positive_ratio"], + reg_multiplier=model_config["training"]["reg_multiplier"], + ) + elif data_config["data"]["name"] == "GLAS": + model = train_dl( + model, + dataset_dict, + dataset_sizes, + criterion, + optimizer, + exp_lr_scheduler, + save_path, + num_epochs=training_params["num_epochs"], + bs=training_params["batch_size"], + device=device, + retain_graph=retain_graph, + neg2pos_ratio=data_config["data"]["negative_to_positive_ratio"], + reg_multiplier=model_config["training"]["reg_multiplier"], + ) + elif data_config["data"]["name"] == "Refuge": + model = train_dl( + model, + dataset_dict, + dataset_sizes, + criterion, + optimizer, + exp_lr_scheduler, + save_path, + num_epochs=training_params["num_epochs"], + bs=training_params["batch_size"], + device=device, + retain_graph=retain_graph, + neg2pos_ratio=data_config["data"]["negative_to_positive_ratio"], + reg_multiplier=model_config["training"]["reg_multiplier"], + ) + elif data_config["data"]["name"] == "BTCV": + model = train_dl( + model, + dataset_dict, + dataset_sizes, + criterion, + optimizer, + exp_lr_scheduler, + save_path, + num_epochs=training_params["num_epochs"], + bs=training_params["batch_size"], + device=device, + retain_graph=retain_graph, + neg2pos_ratio=data_config["data"]["negative_to_positive_ratio"], + reg_multiplier=model_config["training"]["reg_multiplier"], + ) + elif data_config["data"]["name"] == "ATR": + model = train_dl( + model, + dataset_dict, + dataset_sizes, + criterion, + optimizer, + exp_lr_scheduler, + save_path, + num_epochs=training_params["num_epochs"], + bs=training_params["batch_size"], + device=device, + retain_graph=retain_graph, + neg2pos_ratio=data_config["data"]["negative_to_positive_ratio"], + reg_multiplier=model_config["training"]["reg_multiplier"], + ) + elif data_config["data"]["name"] == "ArcadeDataset": + save_path = "./models" + data_config["data"]["root_path"].split("/")[-1] + model = train_dl( + model, + dataset_dict, + dataset_sizes, + criterion, + optimizer, + exp_lr_scheduler, + save_path, + save_dir=f"./{args.training_strategy}/{data_config['data']['root_path'].split('/')[-1]}", + num_epochs=training_params["num_epochs"], + bs=5, + device=device, + retain_graph=retain_graph, + neg2pos_ratio=data_config["data"]["negative_to_positive_ratio"], + reg_multiplier=model_config["training"]["reg_multiplier"], + ) + # print("Starting RLHF fine-tuning...") + # model.train() + # # get the training dataloader + # train_datatloader = dataloader_dict["train"] + # val_dataloader = dataloader_dict["val"] + # rewardmodel = RewardModel(save_dir="DIAS_rhlf_30") + # rewardmodel = rewardmodel.to(device) + # rlhf_model = train_rlhf( + # model, + # model_config, + # label_dict, + # rewardmodel, + # train_datatloader, + # val_dataloader, + # 40, + # ) + # # more tuning + # optimizer = optim.AdamW( + # rlhf_model.parameters(), + # lr=float(training_params["lr"]), + # weight_decay=float(training_params["weight_decay"]), + # ) + # exp_lr_scheduler = lr_scheduler.StepLR( + # optimizer, + # step_size=training_params["schedule_step"], + # gamma=training_params["schedule_step_factor"], + # ) + # final_model = train_dl( + # rlhf_model, + # dataset_dict, + # dataset_sizes, + # criterion, + # optimizer, + # exp_lr_scheduler, + # save_path, + # save_dir=f"./{args.training_strategy}/{data_config['data']['root_path'].split('/')[-1]}", + # num_epochs=50, + # bs=5, + # device=device, + # retain_graph=retain_graph, + # neg2pos_ratio=data_config["data"]["negative_to_positive_ratio"], + # reg_multiplier=model_config["training"]["reg_multiplier"], + # ) + + +if __name__ == "__main__": + args = parse_args() + with open(args.data_config, "r") as f: + data_config = yaml.load(f, Loader=yaml.FullLoader) + with open(args.model_config, "r") as f: + model_config = yaml.load(f, Loader=yaml.FullLoader) + + # main_onetime_functions(data_config) + # #for checking data_utils + # main_datautils(data_config, use_norm=False) + + # #for checking model + # main_model(config=model_config) + + # #for testing on the test dataset + # main_test(data_config, model_config, args.pretrained_path) + + # # for training the model + main_train( + data_config, + model_config, + args.pretrained_path, + args.save_path, + args.training_strategy, + device=args.device, + ) diff --git a/AllinonSAM/eval/atr1/config_atr1.yml b/AllinonSAM/eval/atr1/config_atr1.yml new file mode 100644 index 0000000000000000000000000000000000000000..2cf0e6c9ec2854525cb3d8a97df4f47b05e5b40d --- /dev/null +++ b/AllinonSAM/eval/atr1/config_atr1.yml @@ -0,0 +1,21 @@ +data_transforms: + a_min: 0 + a_max: 255 + img_size: 1024 + use_random_crop: False + use_rotation: True + rotation_angle: 10 + use_saturation: False + saturation: 2 + use_brightness: False + brightness: 2 + use_horizontal_flip: True + use_cjitter: False + use_affine: False +data: + name: ATR + root_path: '/media/ubuntu/New Volume/jay/ATR/atr_dataset/cegr' + label_list: [1] + label_names: ['Military Vehicle'] + volume_channel: 2 + negative_to_positive_ratio: -1 diff --git a/AllinonSAM/eval/atr1/generate_all_results.sh b/AllinonSAM/eval/atr1/generate_all_results.sh new file mode 100644 index 0000000000000000000000000000000000000000..ab0ab79d600ae3ef275f3486576fb462307f0462 --- /dev/null +++ b/AllinonSAM/eval/atr1/generate_all_results.sh @@ -0,0 +1,28 @@ + +echo "Testing Accuracy 1000: " +python generate_predictions.py --csv_path "/media/ubuntu/New Volume/jay/ATR/atr_dataset/cegr/test_csvs/test_1000.csv" --data_config config_atr1.yml --model_config model_svdtuning.yml --pretrained_path "biastuning_atr1_thermal_512_bs8.pth" --save_path biastuning_atr1_thermal_512_bs8/1000 --root_path "/media/ubuntu/New Volume/jay/ATR/atr_dataset/cegr" --img_folder_path "/media/ubuntu/New Volume/jay/ATR/atr_dataset_old/range/thermal" --device "cuda:0" --labels_of_interest "Military Vehicle" +echo "......................." + +echo "Testing Accuracy 2000: " +python generate_predictions.py --csv_path "/media/ubuntu/New Volume/jay/ATR/atr_dataset/cegr/test_csvs/test_2000.csv" --data_config config_atr1.yml --model_config model_svdtuning.yml --pretrained_path "biastuning_atr1_thermal_512_bs8.pth" --save_path biastuning_atr1_thermal_512_bs8/2000 --root_path "/media/ubuntu/New Volume/jay/ATR/atr_dataset/cegr" --img_folder_path "/media/ubuntu/New Volume/jay/ATR/atr_dataset_old/range/thermal" --device "cuda:0" --labels_of_interest "Military Vehicle" +echo "......................." + +echo "Testing Accuracy 3000: " +python generate_predictions.py --csv_path "/media/ubuntu/New Volume/jay/ATR/atr_dataset/cegr/test_csvs/test_3000.csv" --data_config config_atr1.yml --model_config model_svdtuning.yml --pretrained_path "biastuning_atr1_thermal_512_bs8.pth" --save_path biastuning_atr1_thermal_512_bs8/3000 --root_path "/media/ubuntu/New Volume/jay/ATR/atr_dataset/cegr" --img_folder_path "/media/ubuntu/New Volume/jay/ATR/atr_dataset_old/range/thermal" --device "cuda:0" --labels_of_interest "Military Vehicle" +echo "......................." + +echo "Testing Accuracy 4000: " +python generate_predictions.py --csv_path "/media/ubuntu/New Volume/jay/ATR/atr_dataset/cegr/test_csvs/test_4000.csv" --data_config config_atr1.yml --model_config model_svdtuning.yml --pretrained_path "biastuning_atr1_thermal_512_bs8.pth" --save_path biastuning_atr1_thermal_512_bs8/4000 --root_path "/media/ubuntu/New Volume/jay/ATR/atr_dataset/cegr" --img_folder_path "/media/ubuntu/New Volume/jay/ATR/atr_dataset_old/range/thermal" --device "cuda:0" --labels_of_interest "Military Vehicle" +echo "......................." + +echo "Testing Accuracy 5000: " +python generate_predictions.py --csv_path "/media/ubuntu/New Volume/jay/ATR/atr_dataset/cegr/test_csvs/test_5000.csv" --data_config config_atr1.yml --model_config model_svdtuning.yml --pretrained_path "biastuning_atr1_thermal_512_bs8.pth" --save_path biastuning_atr1_thermal_512_bs8/5000 --root_path "/media/ubuntu/New Volume/jay/ATR/atr_dataset/cegr" --img_folder_path "/media/ubuntu/New Volume/jay/ATR/atr_dataset_old/range/thermal" --device "cuda:0" --labels_of_interest "Military Vehicle" +echo "......................." + +echo "Testing Accuracy day: " +python generate_predictions.py --csv_path "/media/ubuntu/New Volume/jay/ATR/atr_dataset/cegr/test_csvs/test_day.csv" --data_config config_atr1.yml --model_config model_svdtuning.yml --pretrained_path "biastuning_atr1_thermal_512_bs8.pth" --save_path biastuning_atr1_thermal_512_bs8/day --root_path "/media/ubuntu/New Volume/jay/ATR/atr_dataset/cegr" --img_folder_path "/media/ubuntu/New Volume/jay/ATR/atr_dataset/cegr/test_imgs" --device "cuda:0" --labels_of_interest "Military Vehicle" +echo "......................." + +echo "Testing Accuracy night: " +python generate_predictions.py --csv_path "/media/ubuntu/New Volume/jay/ATR/atr_dataset/cegr/test_csvs/test_night.csv" --data_config config_atr1.yml --model_config model_svdtuning.yml --pretrained_path "biastuning_atr1_thermal_512_bs8.pth" --save_path biastuning_atr1_thermal_512_bs8/night --root_path "/media/ubuntu/New Volume/jay/ATR/atr_dataset/cegr" --img_folder_path "/media/ubuntu/New Volume/jay/ATR/atr_dataset/cegr/test_imgs" --device "cuda:0" --labels_of_interest "Military Vehicle" +echo "......................." \ No newline at end of file diff --git a/AllinonSAM/eval/atr1/generate_all_results_zeroshot.sh b/AllinonSAM/eval/atr1/generate_all_results_zeroshot.sh new file mode 100644 index 0000000000000000000000000000000000000000..109656de41c07cfd54a7d6d60db7318f5b0e032b --- /dev/null +++ b/AllinonSAM/eval/atr1/generate_all_results_zeroshot.sh @@ -0,0 +1,28 @@ + +echo "Testing Accuracy 1000: " +python generate_predictions.py --csv_path "/media/ubuntu/New Volume/jay/ATR/atr_dataset/i1co/test_csvs/test_1000.csv" --data_config config_atr1.yml --model_config model_svdtuning.yml --save_path sam-zs_i1co/1000 --root_path "/media/ubuntu/New Volume/jay/ATR/atr_dataset/i1co" --img_folder_path "/media/ubuntu/New Volume/jay/ATR/atr_dataset_old/range/visible" --device "cuda:0" --labels_of_interest "Military Vehicle" +echo "......................." + +echo "Testing Accuracy 2000: " +python generate_predictions.py --csv_path "/media/ubuntu/New Volume/jay/ATR/atr_dataset/i1co/test_csvs/test_2000.csv" --data_config config_atr1.yml --model_config model_svdtuning.yml --save_path sam-zs_i1co/2000 --root_path "/media/ubuntu/New Volume/jay/ATR/atr_dataset/i1co" --img_folder_path "/media/ubuntu/New Volume/jay/ATR/atr_dataset_old/range/visible" --device "cuda:0" --labels_of_interest "Military Vehicle" +echo "......................." + +echo "Testing Accuracy 3000: " +python generate_predictions.py --csv_path "/media/ubuntu/New Volume/jay/ATR/atr_dataset/i1co/test_csvs/test_3000.csv" --data_config config_atr1.yml --model_config model_svdtuning.yml --save_path sam-zs_i1co/3000 --root_path "/media/ubuntu/New Volume/jay/ATR/atr_dataset/i1co" --img_folder_path "/media/ubuntu/New Volume/jay/ATR/atr_dataset_old/range/visible" --device "cuda:0" --labels_of_interest "Military Vehicle" +echo "......................." + +echo "Testing Accuracy 4000: " +python generate_predictions.py --csv_path "/media/ubuntu/New Volume/jay/ATR/atr_dataset/i1co/test_csvs/test_4000.csv" --data_config config_atr1.yml --model_config model_svdtuning.yml --save_path sam-zs_i1co/4000 --root_path "/media/ubuntu/New Volume/jay/ATR/atr_dataset/i1co" --img_folder_path "/media/ubuntu/New Volume/jay/ATR/atr_dataset_old/range/visible" --device "cuda:0" --labels_of_interest "Military Vehicle" +echo "......................." + +echo "Testing Accuracy 5000: " +python generate_predictions.py --csv_path "/media/ubuntu/New Volume/jay/ATR/atr_dataset/i1co/test_csvs/test_5000.csv" --data_config config_atr1.yml --model_config model_svdtuning.yml --save_path sam-zs_i1co/5000 --root_path "/media/ubuntu/New Volume/jay/ATR/atr_dataset/i1co" --img_folder_path "/media/ubuntu/New Volume/jay/ATR/atr_dataset_old/range/visible" --device "cuda:0" --labels_of_interest "Military Vehicle" +echo "......................." + +echo "Testing Accuracy day: " +python generate_predictions.py --csv_path "/media/ubuntu/New Volume/jay/ATR/atr_dataset/i1co/test_csvs/test_day.csv" --data_config config_atr1.yml --model_config model_svdtuning.yml --save_path sam-zs_i1co/day --root_path "/media/ubuntu/New Volume/jay/ATR/atr_dataset/i1co" --img_folder_path "/media/ubuntu/New Volume/jay/ATR/atr_dataset/i1co/test_imgs" --device "cuda:0" --labels_of_interest "Military Vehicle" +echo "......................." + +echo "Testing Accuracy night: " +python generate_predictions.py --csv_path "/media/ubuntu/New Volume/jay/ATR/atr_dataset/i1co/test_csvs/test_night.csv" --data_config config_atr1.yml --model_config model_svdtuning.yml --save_path sam-zs_i1co/night --root_path "/media/ubuntu/New Volume/jay/ATR/atr_dataset/i1co" --img_folder_path "/media/ubuntu/New Volume/jay/ATR/atr_dataset/i1co/test_imgs" --device "cuda:0" --labels_of_interest "Military Vehicle" +echo "......................." \ No newline at end of file diff --git a/AllinonSAM/eval/atr1/generate_predictions.py b/AllinonSAM/eval/atr1/generate_predictions.py new file mode 100644 index 0000000000000000000000000000000000000000..fb4db6a9df16cb07974a63af7b1e084146783345 --- /dev/null +++ b/AllinonSAM/eval/atr1/generate_predictions.py @@ -0,0 +1,155 @@ +import torch +import yaml +import sys +import copy +import os +sys.path.append("/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/SVDSAM/") + +from data_utils import * +from model import * +from utils import * +from data_transforms.atr_transform import ATR_Transform + +label_names = ['Military Vehicle'] +label_dict = {} +# visualize_dict = {} +for i,ln in enumerate(label_names): + label_dict[ln] = i + # visualize_dict[ln] = visualize_li[i] + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument('--csv_path', default='config_tmp.yml', + help='data csv file path') + + parser.add_argument('--data_config', default='config_tmp.yml', + help='data config file path') + + parser.add_argument('--model_config', default='model_baseline.yml', + help='model config file path') + + parser.add_argument('--pretrained_path', default=None, + help='pretrained model path') + + parser.add_argument('--save_path', default='checkpoints/temp.pth', + help='pretrained model path') + + parser.add_argument('--root_path', default='.', + help='root path to the groundtruth') + + parser.add_argument('--img_folder_path', default='.', + help='path to the image folder') + + parser.add_argument('--device', default='cuda:0', help='device to train on') + + parser.add_argument('--labels_of_interest', default='Left Prograsp Forceps,Maryland Bipolar Forceps,Right Prograsp Forceps,Left Large Needle Driver,Right Large Needle Driver', help='labels of interest') + + parser.add_argument('--codes', default='1,2,1,3,3', help='numeric label to save per instrument') + + args = parser.parse_args() + + return args + +def main(): + args = parse_args() + with open(args.data_config, 'r') as f: + data_config = yaml.load(f, Loader=yaml.FullLoader) + with open(args.model_config, 'r') as f: + model_config = yaml.load(f, Loader=yaml.FullLoader) + labels_of_interest = args.labels_of_interest.split(',') + + #make folder to save visualizations + os.makedirs(os.path.join(args.save_path,"preds"),exist_ok=True) + os.makedirs(os.path.join(args.save_path,"rescaled_preds"),exist_ok=True) + os.makedirs(os.path.join(args.save_path,"rescaled_gt"),exist_ok=True) + + #load model + model = Prompt_Adapted_SAM(config=model_config, label_text_dict=label_dict, device=args.device, training_strategy='svdtuning') + # model = Prompt_Adapted_SAM(config=model_config, label_text_dict=label_dict, device=args.device, training_strategy='lora') + + #legacy model support + if args.pretrained_path: + sdict = torch.load(args.pretrained_path, map_location=args.device) + # for key in list(sdict.keys()): + # if 'sam_encoder.neck' in key: + # if '0' in key: + # new_key = key.replace('0','conv1') + # if '1' in key: + # new_key = key.replace('1','ln1') + # if '2' in key: + # new_key = key.replace('2','conv2') + # if '3' in key: + # new_key = key.replace('3','ln2') + # sdict[new_key] = sdict[key] + # _ = sdict.pop(key) + # if 'mask_decoder' in key: + # if 'trainable' in key: + # _ = sdict.pop(key) + + model.load_state_dict(sdict,strict=True) + model = model.to(args.device) + model = model.eval() + + #load data transform + data_transform = ATR_Transform(config=data_config) + + #dice + dices = [] + ious=[] + + #load data + df_test = pd.read_csv(args.csv_path) + for i in range(len(df_test)): + gt_path = os.path.join(args.root_path,df_test['mask_path'][i]) + img_path = os.path.join(args.img_folder_path, df_test['mask_path'][i][11:]) + img_name = df_test['mask_path'][i][11:] + + # print("img_path: ",img_path) + # print("gt_path: ",gt_path) + img = torch.as_tensor(np.array(Image.open(img_path).convert("RGB"))) + img = img.permute(2,0,1) + C,H,W = img.shape + #make a dummy mask of shape 1XHXW + label = torch.Tensor(np.array(Image.open(gt_path))) + if len(label.shape)==3: + label = label[:,:,0] + label = label.unsqueeze(0) + mask = (label>0)+0 + # plt.imshow(gold) + # plt.show() + + img, mask = data_transform(img, mask, is_train=False, apply_norm=True) + mask = (mask>=0.5)+0 + + #get image embeddings + img = img.unsqueeze(0).to(args.device) #1XCXHXW + img_embeds = model.get_image_embeddings(img) + + # generate masks for all labels of interest + img_embeds_repeated = img_embeds.repeat(len(labels_of_interest),1,1,1) + x_text = [t for t in labels_of_interest] + masks = model.get_masks_for_multiple_labels(img_embeds_repeated, x_text).cpu() + + plt.imshow((masks[0]>=0.5), cmap='gray') + plt.savefig(os.path.join(args.save_path,'rescaled_preds', img_name[:-4]+'.png')) + plt.close() + + plt.imshow((mask[0]), cmap='gray') + plt.savefig(os.path.join(args.save_path,'rescaled_gt', img_name)) + plt.close() + + # print("dice: ",dice_coef(label, (masks>0.5)+0)) + dices.append(dice_coef(mask, (masks>=0.5)+0)) + ious.append(iou_coef(mask, (masks>=0.5)+0)) + # break + print(torch.mean(torch.Tensor(dices))) + print(torch.mean(torch.Tensor(ious))) + +if __name__ == '__main__': + main() + + + + + diff --git a/AllinonSAM/eval/atr1/model_svdtuning.yml b/AllinonSAM/eval/atr1/model_svdtuning.yml new file mode 100644 index 0000000000000000000000000000000000000000..28bebdd2ca329181ffaa204aa423c5e39b43ba3c --- /dev/null +++ b/AllinonSAM/eval/atr1/model_svdtuning.yml @@ -0,0 +1,32 @@ +sam: + img_size: 1024 + num_classes: 13 + sam_type: "base" + +img_type: 'image' +arch: "Prompt Adapted SAM" +use_fdn: False +decoder_training: 'none' +mlp_transform: False + +prompts: + USE_TEXT_PROMPT: True + NUM_TEXT_REPEAT: 1 + USE_IMAGE_PROMPT: False + USE_SLICE_NUM: False + LOCATION: 'prepend' + DROPOUT: 0 + NUM_TOKENS: 5 + +training: + optimizer: 'adamw' + lr: 1e-3 + batch_size: 9 + num_epochs: 1000 + schedule_step: 200 + schedule_step_factor: 0.2 + weight_decay: 1e-2 + loss: 'focal+dice' + reg_multiplier: 0 + +use_lora: False \ No newline at end of file diff --git a/AllinonSAM/eval/btcv/config_btcv.yml b/AllinonSAM/eval/btcv/config_btcv.yml new file mode 100644 index 0000000000000000000000000000000000000000..36ee3895d70746fcadbdc8b1eae0095703723552 --- /dev/null +++ b/AllinonSAM/eval/btcv/config_btcv.yml @@ -0,0 +1,21 @@ +data_transforms: + a_min: 0 + a_max: 255 + img_size: 1024 + use_random_crop: False + use_rotation: False + rotation_angle: 10 + use_saturation: False + saturation: 2 + use_brightness: False + brightness: 2 + use_horizontal_flip: False +data: + name: BTCV + root_path: '/media/ubuntu/New Volume/jay/BTCV' + label_list: [1,2,3,4,5,6,7,8] + label_names: ['Spleen', 'Right Kidney', 'Left Kidney', 'Gall Bladder', 'Liver', 'Stomach', 'Aorta', 'Pancreas'] + volume_channel: 2 + negative_to_positive_ratio: -1 + +use_lora: False \ No newline at end of file diff --git a/AllinonSAM/eval/btcv/generate_predictions.py b/AllinonSAM/eval/btcv/generate_predictions.py new file mode 100644 index 0000000000000000000000000000000000000000..4fdb4010f0ceb1d71773391963da807c633bd6c3 --- /dev/null +++ b/AllinonSAM/eval/btcv/generate_predictions.py @@ -0,0 +1,183 @@ +import torch +import yaml +import sys +import copy +import os +import h5py +sys.path.append("/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/SVDSAM/") + +from data_utils import * +from model import * +from utils import * + +label_names = ['Spleen', 'Right Kidney', 'Left Kidney', 'Gall Bladder', 'Liver', 'Stomach', 'Aorta', 'Pancreas'] + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument('--data_folder', default='config_tmp.yml', + help='data folder file path') + + parser.add_argument('--data_config', default='config_tmp.yml', + help='data config file path') + + parser.add_argument('--model_config', default='model_baseline.yml', + help='model config file path') + + parser.add_argument('--pretrained_path', default=None, + help='pretrained model path') + + parser.add_argument('--save_path', default='checkpoints/temp.pth', + help='pretrained model path') + + parser.add_argument('--gt_path', default='', + help='ground truth path') + + parser.add_argument('--device', default='cuda:0', help='device to train on') + + parser.add_argument('--labels_of_interest', default='Left Prograsp Forceps,Maryland Bipolar Forceps,Right Prograsp Forceps,Left Large Needle Driver,Right Large Needle Driver', help='labels of interest') + + parser.add_argument('--codes', default='1,2,1,3,3', help='numeric label to save per instrument') + + args = parser.parse_args() + + return args + +def main(): + args = parse_args() + with open(args.data_config, 'r') as f: + data_config = yaml.load(f, Loader=yaml.FullLoader) + with open(args.model_config, 'r') as f: + model_config = yaml.load(f, Loader=yaml.FullLoader) + labels_of_interest = args.labels_of_interest.split(',') + codes = args.codes.split(',') + codes = [int(c) for c in codes] + + label_dict = { + "Spleen":1, + "Right Kidney": 2, + "Left Kidney": 3, + "Gall Bladder": 4, + "Liver": 5, + "Stomach": 6, + "Aorta": 7, + "Pancreas": 8 + } + + + #make folder to save visualizations + os.makedirs(os.path.join(args.save_path,"preds"),exist_ok=True) + os.makedirs(os.path.join(args.save_path,"rescaled_preds"),exist_ok=True) + os.makedirs(os.path.join(args.save_path,"rescaled_gt"),exist_ok=True) + + #load model + model = Prompt_Adapted_SAM(config=model_config, label_text_dict=label_dict, device=args.device, training_strategy='svdtuning') + # model = Prompt_Adapted_SAM(config=model_config, label_text_dict=label_dict, device=args.device, training_strategy='lora') + + #legacy model support + sdict = torch.load(args.pretrained_path, map_location=args.device) + # for key in list(sdict.keys()): + # if 'sam_encoder.neck' in key: + # if '0' in key: + # new_key = key.replace('0','conv1') + # if '1' in key: + # new_key = key.replace('1','ln1') + # if '2' in key: + # new_key = key.replace('2','conv2') + # if '3' in key: + # new_key = key.replace('3','ln2') + # sdict[new_key] = sdict[key] + # _ = sdict.pop(key) + # if 'mask_decoder' in key: + # if 'trainable' in key: + # _ = sdict.pop(key) + + model.load_state_dict(sdict,strict=True) + model = model.to(args.device) + model = model.eval() + + #load data transform + data_transform = BTCV_Transform(config=data_config) + + #dice + dices = [] + ious=[] + + #load data + for i,h5_name in enumerate(sorted(os.listdir(args.data_folder))): + # if i%5!=0: + # continue + h5_path = (os.path.join(args.data_folder,h5_name)) + data = h5py.File(h5_path) + all_img, all_label = data['image'], data['label'] + + for i in range(all_img.shape[0]): + if i%5!=0: + continue + img = torch.as_tensor(all_img[i]).unsqueeze(0).repeat(3,1,1) + label = torch.as_tensor(all_label[i]) + # print("image shape", img.shape) + # print("label shape: ", label.shape) + # 1/0 + # img = img.permute(2,0,1) + C,H,W = img.shape + #make a dummy mask of shape 1XHXW + selected_color = label_dict[args.labels_of_interest] + temp = (label==selected_color)+0 + + # plt.imshow(gold) + # plt.show() + mask = torch.Tensor(temp).unsqueeze(0) + img, mask = data_transform(img, mask, is_train=False, apply_norm=True) + mask = (mask>=0.5)+0 + + #get image embeddings + img = img.unsqueeze(0).to(args.device) #1XCXHXW + img_embeds = model.get_image_embeddings(img) + + # generate masks for all labels of interest + img_embeds_repeated = img_embeds.repeat(len(labels_of_interest),1,1,1) + x_text = [t for t in labels_of_interest] + masks = model.get_masks_for_multiple_labels(img_embeds_repeated, x_text).cpu() + argmax_masks = torch.argmax(masks, dim=0) + final_mask = torch.zeros(masks[0].shape) + final_mask_rescaled = torch.zeros(masks[0].shape).unsqueeze(-1).repeat(1,1,3) + #save masks + for i in range(final_mask.shape[0]): + for j in range(final_mask.shape[1]): + final_mask[i,j] = codes[argmax_masks[i,j]] if masks[argmax_masks[i,j],i,j]>=0.5 else 0 + # final_mask_rescaled[i,j] = torch.Tensor(visualize_dict[(labels_of_interest[argmax_masks[i,j]])] if masks[argmax_masks[i,j],i,j]>=0.5 else [0,0,0]) + + # save_im = Image.fromarray(final_mask.numpy()) + # save_im.save(os.path.join(args.save_path,'preds', img_name)) + + # plt.imshow(final_mask_rescaled,cmap='gray') + # plt.savefig(os.path.join(args.save_path,'rescaled_preds', img_name)) + # plt.close() + + # print("label shape: ", label.shape) + # plt.imshow(label[0], cmap='gray') + # plt.show() + + plt.imshow((masks[0]>=0.5), cmap='gray') + plt.savefig(os.path.join(args.save_path,'rescaled_preds', h5_name[:h5_name.find('.')]+"_"+str(i)+".png")) + plt.close() + + plt.imshow((mask[0]), cmap='gray') + plt.savefig(os.path.join(args.save_path,'rescaled_gt', h5_name[:h5_name.find('.')]+"_"+str(i)+".png")) + plt.close() + + # print("dice: ",dice_coef(label, (masks>0.5)+0)) + dices.append(dice_coef(mask, (masks>=0.5)+0)) + ious.append(iou_coef(mask, (masks>=0.5)+0)) + # break + print(torch.mean(torch.Tensor(dices))) + print(torch.mean(torch.Tensor(ious))) + +if __name__ == '__main__': + main() + + + + + diff --git a/AllinonSAM/eval/btcv/generate_predictions.sh b/AllinonSAM/eval/btcv/generate_predictions.sh new file mode 100644 index 0000000000000000000000000000000000000000..ab47e607208807d2459d98522779f226554bf18c --- /dev/null +++ b/AllinonSAM/eval/btcv/generate_predictions.sh @@ -0,0 +1,15 @@ +python generate_predictions.py --model_config model_svdtuning.yml --data_config config_btcv.yml --data_folder "/media/ubuntu/New Volume/jay/BTCV/test_vol_h5" --save_path "./svdsam_btcv_1024" --pretrained_path "svdsam_btcv_1024.pth" --labels_of_interest "Spleen" --device "cuda:1" + +python generate_predictions.py --model_config model_svdtuning.yml --data_config config_btcv.yml --data_folder "/media/ubuntu/New Volume/jay/BTCV/test_vol_h5" --save_path "./svdsam_btcv_1024" --pretrained_path "svdsam_btcv_1024.pth" --labels_of_interest "Right Kidney" --device "cuda:1" + +python generate_predictions.py --model_config model_svdtuning.yml --data_config config_btcv.yml --data_folder "/media/ubuntu/New Volume/jay/BTCV/test_vol_h5" --save_path "./svdsam_btcv_1024" --pretrained_path "svdsam_btcv_1024.pth" --labels_of_interest "Left Kidney" --device "cuda:1" + +python generate_predictions.py --model_config model_svdtuning.yml --data_config config_btcv.yml --data_folder "/media/ubuntu/New Volume/jay/BTCV/test_vol_h5" --save_path "./svdsam_btcv_1024" --pretrained_path "svdsam_btcv_1024.pth" --labels_of_interest "Gall Bladder" --device "cuda:1" + +python generate_predictions.py --model_config model_svdtuning.yml --data_config config_btcv.yml --data_folder "/media/ubuntu/New Volume/jay/BTCV/test_vol_h5" --save_path "./svdsam_btcv_1024" --pretrained_path "svdsam_btcv_1024.pth" --labels_of_interest "Liver" --device "cuda:1" + +python generate_predictions.py --model_config model_svdtuning.yml --data_config config_btcv.yml --data_folder "/media/ubuntu/New Volume/jay/BTCV/test_vol_h5" --save_path "./svdsam_btcv_1024" --pretrained_path "svdsam_btcv_1024.pth" --labels_of_interest "Stomach" --device "cuda:1" + +python generate_predictions.py --model_config model_svdtuning.yml --data_config config_btcv.yml --data_folder "/media/ubuntu/New Volume/jay/BTCV/test_vol_h5" --save_path "./svdsam_btcv_1024" --pretrained_path "svdsam_btcv_1024.pth" --labels_of_interest "Aorta" --device "cuda:1" + +python generate_predictions.py --model_config model_svdtuning.yml --data_config config_btcv.yml --data_folder "/media/ubuntu/New Volume/jay/BTCV/test_vol_h5" --save_path "./svdsam_btcv_1024" --pretrained_path "svdsam_btcv_1024.pth" --labels_of_interest "Pancreas" --device "cuda:1" \ No newline at end of file diff --git a/AllinonSAM/eval/btcv/model_svdtuning.yml b/AllinonSAM/eval/btcv/model_svdtuning.yml new file mode 100644 index 0000000000000000000000000000000000000000..8a78b78ff28a32d540df1ae933343cec81c490c8 --- /dev/null +++ b/AllinonSAM/eval/btcv/model_svdtuning.yml @@ -0,0 +1,31 @@ +sam: + img_size: 1024 + num_classes: 8 + sam_type: "base" + +img_type: 'image' +arch: "Prompt Adapted SAM" +use_fdn: False +decoder_training: 'none' +mlp_transform: False + +prompts: + USE_TEXT_PROMPT: True + NUM_TEXT_REPEAT: 1 + USE_IMAGE_PROMPT: False + USE_SLICE_NUM: False + LOCATION: 'prepend' + DROPOUT: 0 + NUM_TOKENS: 5 + +training: + optimizer: 'adamw' + lr: 1e-3 + batch_size: 2 + num_epochs: 1000 + schedule_step: 200 + schedule_step_factor: 0.2 + weight_decay: 1e-2 + loss: 'focal+dice' + reg_multiplier: 0 +use_lora: False \ No newline at end of file diff --git a/AllinonSAM/eval/chestXDet/config_chestxdet.yml b/AllinonSAM/eval/chestXDet/config_chestxdet.yml new file mode 100644 index 0000000000000000000000000000000000000000..f3050702e14832d9fc8f6133b968a973c8a22d6c --- /dev/null +++ b/AllinonSAM/eval/chestXDet/config_chestxdet.yml @@ -0,0 +1,18 @@ +data_transforms: + a_min: 0 + a_max: 255 + img_size: 256 + use_random_crop: False + use_rotation: False + rotation_angle: 10 + use_saturation: False + saturation: 2 + use_brightness: False + brightness: 2 + use_horizontal_flip: False +data: + name: CHESTXDET + root_path: '/media/ubuntu/New Volume/jay/ChestXDet/train_data' + label_list: [1,2,3,4,5,6,7,8,9,10,11,12,13] + label_names: ['Effusion', 'Nodule', 'Cardiomegaly', 'Fibrosis', 'Consolidation', 'Emphysema', 'Mass', 'Fracture', 'Calcification', 'Pleural Thickening', 'Pneumothorax', 'Atelectasis', 'Diffuse Nodule'] + volume_channel: 2 diff --git a/AllinonSAM/eval/chestXDet/generate_all_predictions.sh b/AllinonSAM/eval/chestXDet/generate_all_predictions.sh new file mode 100644 index 0000000000000000000000000000000000000000..96326b3791f9f3e47ce0634d5ef42585c0d10024 --- /dev/null +++ b/AllinonSAM/eval/chestXDet/generate_all_predictions.sh @@ -0,0 +1,25 @@ +python generate_predictions.py --data_config config_chestxdet.yml --model_config model_svdtuning.yml --data_folder "/media/ubuntu/New Volume/jay/ChestXDet/test_data/images" --gt_path "/media/ubuntu/New Volume/jay/ChestXDet/test_data/masks" --pretrained_path "samed_chestxdet_final_256_bs32_focaldice.pth" --save_path "samed_chestxdet_final_256_bs32_focaldice/Effusion" --labels_of_interest "Effusion" + +python generate_predictions.py --data_config config_chestxdet.yml --model_config model_svdtuning.yml --data_folder "/media/ubuntu/New Volume/jay/ChestXDet/test_data/images" --gt_path "/media/ubuntu/New Volume/jay/ChestXDet/test_data/masks" --pretrained_path "samed_chestxdet_final_256_bs32_focaldice.pth" --save_path "samed_chestxdet_final_256_bs32_focaldice/Nodule" --labels_of_interest "Nodule" + +python generate_predictions.py --data_config config_chestxdet.yml --model_config model_svdtuning.yml --data_folder "/media/ubuntu/New Volume/jay/ChestXDet/test_data/images" --gt_path "/media/ubuntu/New Volume/jay/ChestXDet/test_data/masks" --pretrained_path "samed_chestxdet_final_256_bs32_focaldice.pth" --save_path "samed_chestxdet_final_256_bs32_focaldice/Cardiomegaly" --labels_of_interest "Cardiomegaly" + +python generate_predictions.py --data_config config_chestxdet.yml --model_config model_svdtuning.yml --data_folder "/media/ubuntu/New Volume/jay/ChestXDet/test_data/images" --gt_path "/media/ubuntu/New Volume/jay/ChestXDet/test_data/masks" --pretrained_path "samed_chestxdet_final_256_bs32_focaldice.pth" --save_path "samed_chestxdet_final_256_bs32_focaldice/Fibrosis" --labels_of_interest "Fibrosis" + +python generate_predictions.py --data_config config_chestxdet.yml --model_config model_svdtuning.yml --data_folder "/media/ubuntu/New Volume/jay/ChestXDet/test_data/images" --gt_path "/media/ubuntu/New Volume/jay/ChestXDet/test_data/masks" --pretrained_path "samed_chestxdet_final_256_bs32_focaldice.pth" --save_path "samed_chestxdet_final_256_bs32_focaldice/Consolidation" --labels_of_interest "Consolidation" + +python generate_predictions.py --data_config config_chestxdet.yml --model_config model_svdtuning.yml --data_folder "/media/ubuntu/New Volume/jay/ChestXDet/test_data/images" --gt_path "/media/ubuntu/New Volume/jay/ChestXDet/test_data/masks" --pretrained_path "samed_chestxdet_final_256_bs32_focaldice.pth" --save_path "samed_chestxdet_final_256_bs32_focaldice/Emphysema" --labels_of_interest "Emphysema" + +python generate_predictions.py --data_config config_chestxdet.yml --model_config model_svdtuning.yml --data_folder "/media/ubuntu/New Volume/jay/ChestXDet/test_data/images" --gt_path "/media/ubuntu/New Volume/jay/ChestXDet/test_data/masks" --pretrained_path "samed_chestxdet_final_256_bs32_focaldice.pth" --save_path "samed_chestxdet_final_256_bs32_focaldice/Mass" --labels_of_interest "Mass" + +python generate_predictions.py --data_config config_chestxdet.yml --model_config model_svdtuning.yml --data_folder "/media/ubuntu/New Volume/jay/ChestXDet/test_data/images" --gt_path "/media/ubuntu/New Volume/jay/ChestXDet/test_data/masks" --pretrained_path "samed_chestxdet_final_256_bs32_focaldice.pth" --save_path "samed_chestxdet_final_256_bs32_focaldice/Calcification" --labels_of_interest "Calcification" + +python generate_predictions.py --data_config config_chestxdet.yml --model_config model_svdtuning.yml --data_folder "/media/ubuntu/New Volume/jay/ChestXDet/test_data/images" --gt_path "/media/ubuntu/New Volume/jay/ChestXDet/test_data/masks" --pretrained_path "samed_chestxdet_final_256_bs32_focaldice.pth" --save_path "samed_chestxdet_final_256_bs32_focaldice/PleuralThickening" --labels_of_interest "Pleural Thickening" + +python generate_predictions.py --data_config config_chestxdet.yml --model_config model_svdtuning.yml --data_folder "/media/ubuntu/New Volume/jay/ChestXDet/test_data/images" --gt_path "/media/ubuntu/New Volume/jay/ChestXDet/test_data/masks" --pretrained_path "samed_chestxdet_final_256_bs32_focaldice.pth" --save_path "samed_chestxdet_final_256_bs32_focaldice/Pneumothorax" --labels_of_interest "Pneumothorax" + +python generate_predictions.py --data_config config_chestxdet.yml --model_config model_svdtuning.yml --data_folder "/media/ubuntu/New Volume/jay/ChestXDet/test_data/images" --gt_path "/media/ubuntu/New Volume/jay/ChestXDet/test_data/masks" --pretrained_path "samed_chestxdet_final_256_bs32_focaldice.pth" --save_path "samed_chestxdet_final_256_bs32_focaldice/Fracture" --labels_of_interest "Fracture" + +python generate_predictions.py --data_config config_chestxdet.yml --model_config model_svdtuning.yml --data_folder "/media/ubuntu/New Volume/jay/ChestXDet/test_data/images" --gt_path "/media/ubuntu/New Volume/jay/ChestXDet/test_data/masks" --pretrained_path "samed_chestxdet_final_256_bs32_focaldice.pth" --save_path "samed_chestxdet_final_256_bs32_focaldice/Atelectasis" --labels_of_interest "Atelectasis" + +python generate_predictions.py --data_config config_chestxdet.yml --model_config model_svdtuning.yml --data_folder "/media/ubuntu/New Volume/jay/ChestXDet/test_data/images" --gt_path "/media/ubuntu/New Volume/jay/ChestXDet/test_data/masks" --pretrained_path "samed_chestxdet_final_256_bs32_focaldice.pth" --save_path "samed_chestxdet_final_256_bs32_focaldice/DiffuseNodule" --labels_of_interest "Diffuse Nodule" diff --git a/AllinonSAM/eval/chestXDet/generate_all_predictions_pointsam.sh b/AllinonSAM/eval/chestXDet/generate_all_predictions_pointsam.sh new file mode 100644 index 0000000000000000000000000000000000000000..60378759dead647fb5560c8a903516c5035fe1a6 --- /dev/null +++ b/AllinonSAM/eval/chestXDet/generate_all_predictions_pointsam.sh @@ -0,0 +1,25 @@ +python predictions_pointsam.py --data_config config_chestxdet.yml --model_config model_svdtuning.yml --data_folder "/media/ubuntu/New Volume/jay/ChestXDet/test_data/images" --gt_path "/media/ubuntu/New Volume/jay/ChestXDet/test_data/masks" --save_path "medsam_point_chestxdet/Effusion" --labels_of_interest "Effusion" + +python predictions_pointsam.py --data_config config_chestxdet.yml --model_config model_svdtuning.yml --data_folder "/media/ubuntu/New Volume/jay/ChestXDet/test_data/images" --gt_path "/media/ubuntu/New Volume/jay/ChestXDet/test_data/masks" --save_path "medsam_point_chestxdet/Nodule" --labels_of_interest "Nodule" + +python predictions_pointsam.py --data_config config_chestxdet.yml --model_config model_svdtuning.yml --data_folder "/media/ubuntu/New Volume/jay/ChestXDet/test_data/images" --gt_path "/media/ubuntu/New Volume/jay/ChestXDet/test_data/masks" --save_path "medsam_point_chestxdet/Cardiomegaly" --labels_of_interest "Cardiomegaly" + +python predictions_pointsam.py --data_config config_chestxdet.yml --model_config model_svdtuning.yml --data_folder "/media/ubuntu/New Volume/jay/ChestXDet/test_data/images" --gt_path "/media/ubuntu/New Volume/jay/ChestXDet/test_data/masks" --save_path "medsam_point_chestxdet/Fibrosis" --labels_of_interest "Fibrosis" + +python predictions_pointsam.py --data_config config_chestxdet.yml --model_config model_svdtuning.yml --data_folder "/media/ubuntu/New Volume/jay/ChestXDet/test_data/images" --gt_path "/media/ubuntu/New Volume/jay/ChestXDet/test_data/masks" --save_path "medsam_point_chestxdet/Consolidation" --labels_of_interest "Consolidation" + +python predictions_pointsam.py --data_config config_chestxdet.yml --model_config model_svdtuning.yml --data_folder "/media/ubuntu/New Volume/jay/ChestXDet/test_data/images" --gt_path "/media/ubuntu/New Volume/jay/ChestXDet/test_data/masks" --save_path "medsam_point_chestxdet/Emphysema" --labels_of_interest "Emphysema" + +python predictions_pointsam.py --data_config config_chestxdet.yml --model_config model_svdtuning.yml --data_folder "/media/ubuntu/New Volume/jay/ChestXDet/test_data/images" --gt_path "/media/ubuntu/New Volume/jay/ChestXDet/test_data/masks" --save_path "medsam_point_chestxdet/Mass" --labels_of_interest "Mass" + +python predictions_pointsam.py --data_config config_chestxdet.yml --model_config model_svdtuning.yml --data_folder "/media/ubuntu/New Volume/jay/ChestXDet/test_data/images" --gt_path "/media/ubuntu/New Volume/jay/ChestXDet/test_data/masks" --save_path "medsam_point_chestxdet/Calcification" --labels_of_interest "Calcification" + +python predictions_pointsam.py --data_config config_chestxdet.yml --model_config model_svdtuning.yml --data_folder "/media/ubuntu/New Volume/jay/ChestXDet/test_data/images" --gt_path "/media/ubuntu/New Volume/jay/ChestXDet/test_data/masks" --save_path "medsam_point_chestxdet/PleuralThickening" --labels_of_interest "Pleural Thickening" + +python predictions_pointsam.py --data_config config_chestxdet.yml --model_config model_svdtuning.yml --data_folder "/media/ubuntu/New Volume/jay/ChestXDet/test_data/images" --gt_path "/media/ubuntu/New Volume/jay/ChestXDet/test_data/masks" --save_path "medsam_point_chestxdet/Pneumothorax" --labels_of_interest "Pneumothorax" + +python predictions_pointsam.py --data_config config_chestxdet.yml --model_config model_svdtuning.yml --data_folder "/media/ubuntu/New Volume/jay/ChestXDet/test_data/images" --gt_path "/media/ubuntu/New Volume/jay/ChestXDet/test_data/masks" --save_path "medsam_point_chestxdet/Fracture" --labels_of_interest "Fracture" + +python predictions_pointsam.py --data_config config_chestxdet.yml --model_config model_svdtuning.yml --data_folder "/media/ubuntu/New Volume/jay/ChestXDet/test_data/images" --gt_path "/media/ubuntu/New Volume/jay/ChestXDet/test_data/masks" --save_path "medsam_point_chestxdet/Atelectasis" --labels_of_interest "Atelectasis" + +python predictions_pointsam.py --data_config config_chestxdet.yml --model_config model_svdtuning.yml --data_folder "/media/ubuntu/New Volume/jay/ChestXDet/test_data/images" --gt_path "/media/ubuntu/New Volume/jay/ChestXDet/test_data/masks" --save_path "medsam_point_chestxdet/DiffuseNodule" --labels_of_interest "Diffuse Nodule" diff --git a/AllinonSAM/eval/chestXDet/generate_predictions.py b/AllinonSAM/eval/chestXDet/generate_predictions.py new file mode 100644 index 0000000000000000000000000000000000000000..4569468b95ce9fb89aceaca77c983f0ac66d8d07 --- /dev/null +++ b/AllinonSAM/eval/chestXDet/generate_predictions.py @@ -0,0 +1,202 @@ +import torch +import yaml +import sys +import copy +import os +sys.path.append("/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/SVDSAM/") + +from data_utils import * +from model import * +from utils import * + +label_names = ['Effusion', 'Nodule', 'Cardiomegaly', 'Fibrosis', 'Consolidation', 'Emphysema', 'Mass', 'Fracture', 'Calcification', 'Pleural Thickening', 'Pneumothorax', 'Atelectasis', 'Diffuse Nodule'] +# visualize_li = [[1,0,0],[0,1,0],[1,0,0], [0,0,1], [0,0,1]] +label_dict = {} +# visualize_dict = {} +for i,ln in enumerate(label_names): + label_dict[ln] = i + # visualize_dict[ln] = visualize_li[i] + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument('--data_folder', default='config_tmp.yml', + help='data folder file path') + + parser.add_argument('--data_config', default='config_tmp.yml', + help='data config file path') + + parser.add_argument('--model_config', default='model_baseline.yml', + help='model config file path') + + parser.add_argument('--pretrained_path', default=None, + help='pretrained model path') + + parser.add_argument('--save_path', default='checkpoints/temp.pth', + help='pretrained model path') + + parser.add_argument('--gt_path', default='', + help='ground truth path') + + parser.add_argument('--device', default='cuda:0', help='device to train on') + + parser.add_argument('--labels_of_interest', default='Left Prograsp Forceps,Maryland Bipolar Forceps,Right Prograsp Forceps,Left Large Needle Driver,Right Large Needle Driver', help='labels of interest') + + parser.add_argument('--codes', default='1,2,1,3,3', help='numeric label to save per instrument') + + args = parser.parse_args() + + return args + +def main(): + args = parse_args() + with open(args.data_config, 'r') as f: + data_config = yaml.load(f, Loader=yaml.FullLoader) + with open(args.model_config, 'r') as f: + model_config = yaml.load(f, Loader=yaml.FullLoader) + labels_of_interest = args.labels_of_interest.split(',') + codes = args.codes.split(',') + codes = [int(c) for c in codes] + + label_dict = { + 'Effusion': 1, + 'Nodule': 2, + 'Cardiomegaly': 3, + 'Fibrosis': 4, + 'Consolidation': 5, + 'Emphysema': 6, + 'Mass': 7, + 'Fracture': 8, + 'Calcification': 9, + 'Pleural Thickening': 10, + 'Pneumothorax': 11, + 'Atelectasis': 12, + 'Diffuse Nodule': 13 + } + + + #make folder to save visualizations + os.makedirs(os.path.join(args.save_path,"preds"),exist_ok=True) + os.makedirs(os.path.join(args.save_path,"rescaled_preds"),exist_ok=True) + if args.gt_path: + os.makedirs(os.path.join(args.save_path,"rescaled_gt"),exist_ok=True) + + #load model + model = Prompt_Adapted_SAM(config=model_config, label_text_dict=label_dict, device=args.device, training_strategy='svdtuning') + # model = Prompt_Adapted_SAM(config=model_config, label_text_dict=label_dict, device=args.device, training_strategy='lora') + + # print(model) + # model.load_state_dict(torch.load(args.pretrained_path, map_location=args.device), strict=False) + # temp = torch.load(args.pretrained_path, map_location=args.device) + # print(list(temp.keys())) + #legacy model support + sdict = torch.load(args.pretrained_path, map_location=args.device) + # for key in list(sdict.keys()): + # if 'sam_encoder.neck' in key: + # if '0' in key: + # new_key = key.replace('0','conv1') + # if '1' in key: + # new_key = key.replace('1','ln1') + # if '2' in key: + # new_key = key.replace('2','conv2') + # if '3' in key: + # new_key = key.replace('3','ln2') + # sdict[new_key] = sdict[key] + # _ = sdict.pop(key) + # if 'mask_decoder' in key: + # if 'trainable' in key: + # _ = sdict.pop(key) + model.load_state_dict(sdict,strict=True) + model = model.to(args.device) + model = model.eval() + + #load data transform + data_transform = ChestXDet_Transform(config=data_config) + + #dice + dices = [] + ious=[] + + #load data + for i,img_name in enumerate(sorted(os.listdir(args.data_folder))): + if i%5!=0: + continue + img_path = (os.path.join(args.data_folder,img_name)) + if args.gt_path: + gt_path = (os.path.join(args.gt_path,img_name)) + if not os.path.exists(gt_path): + gt_path = (os.path.join(args.gt_path,img_name[:-4]+'.png')) + if not os.path.exists(gt_path): + continue + + # print(img_path) + img = torch.as_tensor(np.array(Image.open(img_path).convert("RGB"))) + img = img.permute(2,0,1) + C,H,W = img.shape + #make a dummy mask of shape 1XHXW + if args.gt_path: + label = np.array(Image.open(gt_path)) + c = label_dict[args.labels_of_interest] + temp = (label==c) + + # plt.imshow(gold) + # plt.show() + mask = torch.Tensor(temp).unsqueeze(0) + mask = mask+0 + + else: + mask = torch.zeros((1,H,W)) + img, mask = data_transform(img, mask, is_train=False, apply_norm=True) + mask = (mask>=0.5)+0 + + #get image embeddings + img = img.unsqueeze(0).to(args.device) #1XCXHXW + img_embeds = model.get_image_embeddings(img) + + # generate masks for all labels of interest + img_embeds_repeated = img_embeds.repeat(len(labels_of_interest),1,1,1) + x_text = [t for t in labels_of_interest] + masks = model.get_masks_for_multiple_labels(img_embeds_repeated, x_text).cpu() + argmax_masks = torch.argmax(masks, dim=0) + final_mask = torch.zeros(masks[0].shape) + final_mask_rescaled = torch.zeros(masks[0].shape).unsqueeze(-1).repeat(1,1,3) + #save masks + for i in range(final_mask.shape[0]): + for j in range(final_mask.shape[1]): + final_mask[i,j] = codes[argmax_masks[i,j]] if masks[argmax_masks[i,j],i,j]>=0.5 else 0 + # final_mask_rescaled[i,j] = torch.Tensor(visualize_dict[(labels_of_interest[argmax_masks[i,j]])] if masks[argmax_masks[i,j],i,j]>=0.5 else [0,0,0]) + + # save_im = Image.fromarray(final_mask.numpy()) + # save_im.save(os.path.join(args.save_path,'preds', img_name)) + + # plt.imshow(final_mask_rescaled,cmap='gray') + # plt.savefig(os.path.join(args.save_path,'rescaled_preds', img_name)) + # plt.close() + + # print("label shape: ", label.shape) + # plt.imshow(label[0], cmap='gray') + # plt.show() + + plt.imshow((masks[0]>=0.5), cmap='gray') + plt.savefig(os.path.join(args.save_path,'rescaled_preds', img_name)) + plt.close() + + if args.gt_path: + plt.imshow((mask[0]), cmap='gray') + plt.savefig(os.path.join(args.save_path,'rescaled_gt', img_name)) + plt.close() + + # print("dice: ",dice_coef(label, (masks>0.5)+0)) + dices.append(dice_coef(mask, (masks>=0.5)+0)) + ious.append(iou_coef(mask, (masks>=0.5)+0)) + # break + print(torch.mean(torch.Tensor(dices))) + print(torch.mean(torch.Tensor(ious))) + +if __name__ == '__main__': + main() + + + + + diff --git a/AllinonSAM/eval/chestXDet/generate_predictions_baselines.py b/AllinonSAM/eval/chestXDet/generate_predictions_baselines.py new file mode 100644 index 0000000000000000000000000000000000000000..f67e08ebf7f85cbb2b198fe45617b443a8eefefd --- /dev/null +++ b/AllinonSAM/eval/chestXDet/generate_predictions_baselines.py @@ -0,0 +1,194 @@ +import torch +import yaml +import sys +import copy +import os +sys.path.append("/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/SVDSAM/") + +from data_utils import * +from model import * +from utils import * +from baselines import UNet, UNext, medt_net +from vit_seg_modeling import VisionTransformer +from vit_seg_modeling import CONFIGS as CONFIGS_ViT_seg +from axialnet import MedT + +label_names = ['Effusion', 'Nodule', 'Cardiomegaly', 'Fibrosis', 'Consolidation', 'Emphysema', 'Mass', 'Fracture', 'Calcification', 'Pleural Thickening', 'Pneumothorax', 'Atelectasis', 'Diffuse Nodule'] +# visualize_li = [[1,0,0],[0,1,0],[1,0,0], [0,0,1], [0,0,1]] +label_dict = {} +# visualize_dict = {} +for i,ln in enumerate(label_names): + label_dict[ln] = i + # visualize_dict[ln] = visualize_li[i] + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument('--data_folder', default='config_tmp.yml', + help='data folder file path') + + parser.add_argument('--data_config', default='config_tmp.yml', + help='data config file path') + + parser.add_argument('--model_config', default='model_baseline.yml', + help='model config file path') + + parser.add_argument('--pretrained_path', default=None, + help='pretrained model path') + + parser.add_argument('--save_path', default='checkpoints/temp.pth', + help='pretrained model path') + + parser.add_argument('--gt_path', default='', + help='ground truth path') + + parser.add_argument('--device', default='cuda:0', help='device to train on') + + parser.add_argument('--codes', default='1,2,1,3,3', help='numeric label to save per instrument') + + args = parser.parse_args() + + return args + +def main(): + args = parse_args() + with open(args.data_config, 'r') as f: + data_config = yaml.load(f, Loader=yaml.FullLoader) + with open(args.model_config, 'r') as f: + model_config = yaml.load(f, Loader=yaml.FullLoader) + codes = args.codes.split(',') + codes = [int(c) for c in codes] + + label_dict = { + 'Effusion': 1, + 'Nodule': 2, + 'Cardiomegaly': 3, + 'Fibrosis': 4, + 'Consolidation': 5, + 'Emphysema': 6, + 'Mass': 7, + 'Fracture': 8, + 'Calcification': 9, + 'Pleural Thickening': 10, + 'Pneumothorax': 11, + 'Atelectasis': 12, + 'Diffuse Nodule': 13 + } + + + #make folder to save visualizations + os.makedirs(os.path.join(args.save_path,"preds"),exist_ok=True) + os.makedirs(os.path.join(args.save_path,"rescaled_preds"),exist_ok=True) + if args.gt_path: + os.makedirs(os.path.join(args.save_path,"rescaled_gt"),exist_ok=True) + + + #load model + #change the img size in model config according to data config + in_channels = model_config['in_channels'] + out_channels = model_config['num_classes'] + img_size = model_config['img_size'] + if model_config['arch']=='Prompt Adapted SAM': + model = Prompt_Adapted_SAM(model_config, label_dict, args.device, training_strategy='biastuning') + elif model_config['arch']=='UNet': + model = UNet(in_channels=in_channels, out_channels=out_channels) + elif model_config['arch']=='UNext': + model = UNext(num_classes=out_channels, input_channels=in_channels, img_size=img_size) + elif model_config['arch']=='MedT': + #TODO + model = MedT(img_size=img_size, num_classes=out_channels) + elif model_config['arch']=='TransUNet': + config_vit = CONFIGS_ViT_seg['R50-ViT-B_16'] + config_vit.n_classes = out_channels + config_vit.n_skip = 3 + # if args.vit_name.find('R50') != -1: + # config_vit.patches.grid = (int(args.img_size / args.vit_patches_size), int(args.img_size / args.vit_patches_size)) + model = VisionTransformer(config_vit, img_size=img_size, num_classes=config_vit.n_classes) + + model.load_state_dict(torch.load(args.pretrained_path, map_location=args.device)) + model = model.to(args.device) + model = model.eval() + + #load data transform + data_transform = ChestXDet_Transform(config=data_config) + + #dice + dices = [] + ious=[] + + #load data + for i,img_name in enumerate(sorted(os.listdir(args.data_folder))): + # if i>20: + # continue + img_path = (os.path.join(args.data_folder,img_name)) + if args.gt_path: + gt_path = (os.path.join(args.gt_path,img_name)) + if not os.path.exists(gt_path): + gt_path = (os.path.join(args.gt_path,img_name[:-4]+'.png')) + if not os.path.exists(gt_path): + continue + + # print(img_path) + img = torch.as_tensor(np.array(Image.open(img_path).convert("RGB"))) + img = img.permute(2,0,1) + C,H,W = img.shape + #make a dummy mask of shape 1XHXW + label = np.array(Image.open(gt_path)) + + if args.gt_path: + + mask = np.zeros((len(label_dict),img.shape[1], img.shape[2])) + for i,c in enumerate(list(label_dict.keys())): + temp = (label==label_dict[c]) + mask[i,:,:] = temp + mask = torch.Tensor(mask+0) + + else: + mask = torch.zeros((len(label_dict),H,W)) + img, mask = data_transform(img, mask, is_train=False, apply_norm=True) + mask = (mask>=0.5)+0 + + img = img.unsqueeze(0).to(args.device) #1XCXHXW + masks = model(img,'') + # print("masks shape: ",masks.shape) + + argmax_masks = torch.argmax(masks, dim=1).cpu().numpy() + # print("argmax masks shape: ",argmax_masks.shape) + + classwise_dices = [] + classwise_ious = [] + for j,c1 in enumerate(label_dict): + res = np.where(argmax_masks==j,1,0) + # print("res shape: ",res.shape) + plt.imshow(res[0], cmap='gray') + save_dir = os.path.join(args.save_path, c1, 'rescaled_preds') + os.makedirs(save_dir, exist_ok=True) + plt.savefig(os.path.join(args.save_path, c1, 'rescaled_preds', img_name)) + plt.close() + + if args.gt_path: + plt.imshow((mask[j]), cmap='gray') + save_dir = os.path.join(args.save_path, c1, 'rescaled_gt') + os.makedirs(save_dir, exist_ok=True) + plt.savefig(os.path.join(args.save_path, c1, 'rescaled_gt', img_name)) + plt.close() + + classwise_dices.append(dice_coef(mask[j], torch.Tensor(res[0]))) + classwise_ious.append(iou_coef(mask[j], torch.Tensor(res[0]))) + + # break + dices.append(classwise_dices) + ious.append(classwise_ious) + # print("classwise_dices: ", classwise_dices) + # print("classwise ious: ", classwise_ious) + + print(torch.mean(torch.Tensor(dices),dim=0)) + print(torch.mean(torch.Tensor(ious),dim=0)) + +if __name__ == '__main__': + main() + + + + + diff --git a/AllinonSAM/eval/chestXDet/model_baseline.yml b/AllinonSAM/eval/chestXDet/model_baseline.yml new file mode 100644 index 0000000000000000000000000000000000000000..79bfff67f5af82eeb0adf3efb1c2ebb6ebab3cc4 --- /dev/null +++ b/AllinonSAM/eval/chestXDet/model_baseline.yml @@ -0,0 +1,17 @@ + +img_size: 256 +num_classes: 13 +in_channels: 3 +img_type: 'image' +arch: "MedT" +use_fdn: False + +training: + optimizer: 'adamw' + lr: 1e-4 + batch_size: 16 + num_epochs: 500 + schedule_step: 2100 + schedule_step_factor: 0.5 + weight_decay: 1e-2 + loss: 'focal' \ No newline at end of file diff --git a/AllinonSAM/eval/chestXDet/model_svdtuning.yml b/AllinonSAM/eval/chestXDet/model_svdtuning.yml new file mode 100644 index 0000000000000000000000000000000000000000..144529fd8fc7035d3d2ac31d82b4c38218d2cd02 --- /dev/null +++ b/AllinonSAM/eval/chestXDet/model_svdtuning.yml @@ -0,0 +1,31 @@ +sam: + img_size: 256 + num_classes: 13 + sam_type: "base" + +img_type: 'image' +arch: "Prompt Adapted SAM" +use_fdn: False +decoder_training: 'none' +mlp_transform: False + +prompts: + USE_TEXT_PROMPT: False + USE_IMAGE_PROMPT: False + USE_SLICE_NUM: False + LOCATION: 'prepend' + DROPOUT: 0 + NUM_TOKENS: 5 + +training: + optimizer: 'adamw' + lr: 1e-3 + batch_size: 32 + num_epochs: 1000 + schedule_step: 100 + schedule_step_factor: 0.5 + weight_decay: 1e-2 + loss: 'focal' + reg_multiplier: 0 + +use_lora: True \ No newline at end of file diff --git a/AllinonSAM/eval/chestXDet/predictions_pointsam.py b/AllinonSAM/eval/chestXDet/predictions_pointsam.py new file mode 100644 index 0000000000000000000000000000000000000000..03c8db59c3b545cc17ce507f453a78a19e5deb5c --- /dev/null +++ b/AllinonSAM/eval/chestXDet/predictions_pointsam.py @@ -0,0 +1,228 @@ +import torch +import yaml +import sys +import copy +import os +sys.path.append("/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/SVDSAM/") + +from data_utils import * +from model import * +from utils import * + +label_names = ['Effusion', 'Nodule', 'Cardiomegaly', 'Fibrosis', 'Consolidation', 'Emphysema', 'Mass', 'Fracture', 'Calcification', 'Pleural Thickening', 'Pneumothorax', 'Atelectasis', 'Diffuse Nodule'] +# visualize_li = [[1,0,0],[0,1,0],[1,0,0], [0,0,1], [0,0,1]] +label_dict = {} +# visualize_dict = {} +for i,ln in enumerate(label_names): + label_dict[ln] = i + # visualize_dict[ln] = visualize_li[i] + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument('--data_folder', default='config_tmp.yml', + help='data folder file path') + + parser.add_argument('--data_config', default='config_tmp.yml', + help='data config file path') + + parser.add_argument('--model_config', default='model_baseline.yml', + help='model config file path') + + parser.add_argument('--pretrained_path', default=None, + help='pretrained model path') + + parser.add_argument('--save_path', default='checkpoints/temp.pth', + help='pretrained model path') + + parser.add_argument('--gt_path', default='', + help='ground truth path') + + parser.add_argument('--device', default='cuda:0', help='device to train on') + + parser.add_argument('--labels_of_interest', default='Left Prograsp Forceps,Maryland Bipolar Forceps,Right Prograsp Forceps,Left Large Needle Driver,Right Large Needle Driver', help='labels of interest') + + parser.add_argument('--codes', default='1,2,1,3,3', help='numeric label to save per instrument') + + args = parser.parse_args() + + return args + +def main(): + args = parse_args() + with open(args.data_config, 'r') as f: + data_config = yaml.load(f, Loader=yaml.FullLoader) + with open(args.model_config, 'r') as f: + model_config = yaml.load(f, Loader=yaml.FullLoader) + labels_of_interest = args.labels_of_interest.split(',') + codes = args.codes.split(',') + codes = [int(c) for c in codes] + + label_dict = { + 'Effusion': 1, + 'Nodule': 2, + 'Cardiomegaly': 3, + 'Fibrosis': 4, + 'Consolidation': 5, + 'Emphysema': 6, + 'Mass': 7, + 'Fracture': 8, + 'Calcification': 9, + 'Pleural Thickening': 10, + 'Pneumothorax': 11, + 'Atelectasis': 12, + 'Diffuse Nodule': 13 + } + + + #make folder to save visualizations + os.makedirs(os.path.join(args.save_path,"preds"),exist_ok=True) + os.makedirs(os.path.join(args.save_path,"rescaled_preds"),exist_ok=True) + if args.gt_path: + os.makedirs(os.path.join(args.save_path,"rescaled_gt"),exist_ok=True) + + #load model + model = Prompt_Adapted_SAM(config=model_config, label_text_dict=label_dict, device=args.device, training_strategy='svdtuning') + # model = Prompt_Adapted_SAM(config=model_config, label_text_dict=label_dict, device=args.device, training_strategy='lora') + + # print(model) + # model.load_state_dict(torch.load(args.pretrained_path, map_location=args.device), strict=False) + # temp = torch.load(args.pretrained_path, map_location=args.device) + # print(list(temp.keys())) + #legacy model support + if args.pretrained_path: + sdict = torch.load(args.pretrained_path, map_location=args.device) + # for key in list(sdict.keys()): + # if 'sam_encoder.neck' in key: + # if '0' in key: + # new_key = key.replace('0','conv1') + # if '1' in key: + # new_key = key.replace('1','ln1') + # if '2' in key: + # new_key = key.replace('2','conv2') + # if '3' in key: + # new_key = key.replace('3','ln2') + # sdict[new_key] = sdict[key] + # _ = sdict.pop(key) + # if 'mask_decoder' in key: + # if 'trainable' in key: + # _ = sdict.pop(key) + model.load_state_dict(sdict,strict=True) + model = model.to(args.device) + model = model.eval() + + #load data transform + data_transform = ChestXDet_Transform(config=data_config) + + #dice + dices = [] + ious=[] + + #load data + for i,img_name in enumerate(sorted(os.listdir(args.data_folder))): + # if i%5!=0: + # continue + img_path = (os.path.join(args.data_folder,img_name)) + if args.gt_path: + gt_path = (os.path.join(args.gt_path,img_name)) + if not os.path.exists(gt_path): + gt_path = (os.path.join(args.gt_path,img_name[:-4]+'.png')) + if not os.path.exists(gt_path): + continue + + # print(img_path) + img = torch.as_tensor(np.array(Image.open(img_path).convert("RGB"))) + img = img.permute(2,0,1) + C,H,W = img.shape + #make a dummy mask of shape 1XHXW + if args.gt_path: + label = np.array(Image.open(gt_path)) + c = label_dict[args.labels_of_interest] + temp = (label==c) + + # plt.imshow(gold) + # plt.show() + mask = torch.Tensor(temp).unsqueeze(0) + mask = mask+0 + + else: + mask = torch.zeros((1,H,W)) + img, mask = data_transform(img, mask, is_train=False, apply_norm=True) + mask = (mask>=0.5)+0 + + #get positive point prompts + _,y,x = torch.where(mask==1) + pos_prompts = torch.cat([x.unsqueeze(1),y.unsqueeze(1)],dim=1) + + #get negative point prompts + _,y_neg,x_neg = torch.where(mask==0) + neg_prompts = (torch.cat([x_neg.unsqueeze(1),y_neg.unsqueeze(1)],dim=1)) + + if len(y)>0: + pos_point_idx = random.randint(0,y.shape[0]-1) + neg_point_idx = random.randint(0,y_neg.shape[0]-1) + # points = (torch.cat([pos_prompts[pos_point_idx].unsqueeze(0), neg_prompts[neg_point_idx].unsqueeze(0)],dim=0).unsqueeze(0).to(args.device), torch.Tensor([1,-1]).unsqueeze(0).to(args.device)) + points = (pos_prompts[pos_point_idx].unsqueeze(0).unsqueeze(0).to(args.device), torch.Tensor([1]).unsqueeze(0).to(args.device)) + + else: + neg_point_idx1 = random.randint(0,y_neg.shape[0]-1) + neg_point_idx2 = random.randint(0,y_neg.shape[0]-1) + # points = (torch.cat([neg_prompts[neg_point_idx1].unsqueeze(0), neg_prompts[neg_point_idx2].unsqueeze(0)],dim=0).unsqueeze(0).to(args.device), torch.Tensor([-1,-1]).unsqueeze(0).to(args.device)) + points = (neg_prompts[neg_point_idx1].unsqueeze(0).unsqueeze(0).to(args.device), torch.Tensor([-1]).unsqueeze(0).to(args.device)) + + #get image embeddings + img = img.unsqueeze(0).to(args.device) #1XCXHXW + img_embeds = model.get_image_embeddings(img) + + # generate masks for all labels of interest + img_embeds_repeated = img_embeds.repeat(len(labels_of_interest),1,1,1) + masks = model.get_masks_with_manual_prompts(img_embeds_repeated, points=points).cpu() + argmax_masks = torch.argmax(masks, dim=0) + final_mask = torch.zeros(masks[0].shape) + final_mask_rescaled = torch.zeros(masks[0].shape).unsqueeze(-1).repeat(1,1,3) + #save masks + for i in range(final_mask.shape[0]): + for j in range(final_mask.shape[1]): + final_mask[i,j] = codes[argmax_masks[i,j]] if masks[argmax_masks[i,j],i,j]>=0.5 else 0 + # final_mask_rescaled[i,j] = torch.Tensor(visualize_dict[(labels_of_interest[argmax_masks[i,j]])] if masks[argmax_masks[i,j],i,j]>=0.5 else [0,0,0]) + + # save_im = Image.fromarray(final_mask.numpy()) + # save_im.save(os.path.join(args.save_path,'preds', img_name)) + + # plt.imshow(final_mask_rescaled,cmap='gray') + # plt.savefig(os.path.join(args.save_path,'rescaled_preds', img_name)) + # plt.close() + + # print("label shape: ", label.shape) + # plt.imshow(label[0], cmap='gray') + # plt.show() + + plt.imshow((masks[0]>=0.5), cmap='gray') + if len(y)>0: + plt.scatter(x[pos_point_idx], y[pos_point_idx], c='green') + # plt.scatter(x_neg[neg_point_idx], y_neg[neg_point_idx], c='red') + else: + plt.scatter(x_neg[neg_point_idx1], y_neg[neg_point_idx1], c='red') + # plt.scatter(x_neg[neg_point_idx2], y_neg[neg_point_idx2], c='red') + plt.savefig(os.path.join(args.save_path,'rescaled_preds', img_name)) + plt.close() + + if args.gt_path: + plt.imshow((mask[0]), cmap='gray') + plt.savefig(os.path.join(args.save_path,'rescaled_gt', img_name)) + plt.close() + + # print("dice: ",dice_coef(label, (masks>0.5)+0)) + dices.append(dice_coef(mask, (masks>=0.5)+0)) + ious.append(iou_coef(mask, (masks>=0.5)+0)) + # break + print(torch.mean(torch.Tensor(dices))) + print(torch.mean(torch.Tensor(ious))) + +if __name__ == '__main__': + main() + + + + + diff --git a/AllinonSAM/eval/cholec8k/config_cholec8k_test.yml b/AllinonSAM/eval/cholec8k/config_cholec8k_test.yml new file mode 100644 index 0000000000000000000000000000000000000000..e537325584a91214dda85694915c5d4e3342c633 --- /dev/null +++ b/AllinonSAM/eval/cholec8k/config_cholec8k_test.yml @@ -0,0 +1,19 @@ +data_transforms: + a_min: 0 + a_max: 255 + img_size: 256 + use_random_crop: False + use_rotation: False + rotation_angle: 10 + use_saturation: False + saturation: 2 + use_brightness: False + brightness: 2 + use_horizontal_flip: False +data: + name: CHOLEC 8K + root_path: '/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/archive' + label_list: [1,2,3,4,5,6,7,8,9,10,11,12] + label_names: ['Grasper', 'L Hook Electrocautery', 'Liver', 'Fat', 'Gall Bladder','Abdominal Wall','Gastrointestinal Tract','Cystic Duct','Blood','Hepatic Vein', 'Liver Ligament', 'Connective Tissue'] + # label_names: ['Abdominal Wall', 'Blood', 'Connective Tissue', 'Cystic Duct', 'Fat', 'Gall Bladder', 'Gastrointestinal Tract', Grasper', 'Hepatic Vein', 'L Hook Electrocautery', 'Liver', 'Liver Ligament'] + volume_channel: 2 diff --git a/AllinonSAM/eval/cholec8k/config_model_test.yml b/AllinonSAM/eval/cholec8k/config_model_test.yml new file mode 100644 index 0000000000000000000000000000000000000000..144529fd8fc7035d3d2ac31d82b4c38218d2cd02 --- /dev/null +++ b/AllinonSAM/eval/cholec8k/config_model_test.yml @@ -0,0 +1,31 @@ +sam: + img_size: 256 + num_classes: 13 + sam_type: "base" + +img_type: 'image' +arch: "Prompt Adapted SAM" +use_fdn: False +decoder_training: 'none' +mlp_transform: False + +prompts: + USE_TEXT_PROMPT: False + USE_IMAGE_PROMPT: False + USE_SLICE_NUM: False + LOCATION: 'prepend' + DROPOUT: 0 + NUM_TOKENS: 5 + +training: + optimizer: 'adamw' + lr: 1e-3 + batch_size: 32 + num_epochs: 1000 + schedule_step: 100 + schedule_step_factor: 0.5 + weight_decay: 1e-2 + loss: 'focal' + reg_multiplier: 0 + +use_lora: True \ No newline at end of file diff --git a/AllinonSAM/eval/cholec8k/generate_predictions_baselines.py b/AllinonSAM/eval/cholec8k/generate_predictions_baselines.py new file mode 100644 index 0000000000000000000000000000000000000000..aa15e84d0084f919495f31078c04a4c96dedd90c --- /dev/null +++ b/AllinonSAM/eval/cholec8k/generate_predictions_baselines.py @@ -0,0 +1,191 @@ +import torch +import yaml +import sys +import copy +import os +sys.path.append("/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/biastuning/") + +from data_utils import * +from model import * +from utils import * +from baselines import UNet, UNext, medt_net +from vit_seg_modeling import VisionTransformer +from vit_seg_modeling import CONFIGS as CONFIGS_ViT_seg +from axialnet import MedT + +label_names = ['Grasper', 'L Hook Electrocautery', 'Liver', 'Fat', 'Gall Bladder','Abdominal Wall','Gastrointestinal Tract','Cystic Duct','Blood','Hepatic Vein', 'Liver Ligament', 'Connective Tissue'] +# visualize_li = [[1,0,0],[0,1,0],[1,0,0], [0,0,1], [0,0,1]] +label_dict = {} +# visualize_dict = {} +for i,ln in enumerate(label_names): + label_dict[ln] = i + # visualize_dict[ln] = visualize_li[i] + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument('--data_folder', default='config_tmp.yml', + help='data folder file path') + + parser.add_argument('--data_config', default='config_tmp.yml', + help='data config file path') + + parser.add_argument('--model_config', default='model_baseline.yml', + help='model config file path') + + parser.add_argument('--pretrained_path', default=None, + help='pretrained model path') + + parser.add_argument('--save_path', default='checkpoints/temp.pth', + help='pretrained model path') + + parser.add_argument('--gt_path', default='', + help='ground truth path') + + parser.add_argument('--device', default='cuda:0', help='device to train on') + + parser.add_argument('--codes', default='1,2,1,3,3', help='numeric label to save per instrument') + + args = parser.parse_args() + + return args + +def main(): + args = parse_args() + with open(args.data_config, 'r') as f: + data_config = yaml.load(f, Loader=yaml.FullLoader) + with open(args.model_config, 'r') as f: + model_config = yaml.load(f, Loader=yaml.FullLoader) + codes = args.codes.split(',') + codes = [int(c) for c in codes] + + label_dict2 = { + 'Grasper':31, + 'L Hook Electrocautery':32, + 'Liver':21, + 'Fat':12, + 'Gall Bladder':22, + 'Abdominal Wall':11, + 'Gastrointestinal Tract':13, + 'Cystic Duct':25, + 'Blood':24, + 'Hepatic Vein':33, + 'Liver Ligament':5, + 'Connective Tissue':23 + } + + + #make folder to save visualizations + os.makedirs(os.path.join(args.save_path,"preds"),exist_ok=True) + os.makedirs(os.path.join(args.save_path,"rescaled_preds"),exist_ok=True) + if args.gt_path: + os.makedirs(os.path.join(args.save_path,"rescaled_gt"),exist_ok=True) + + #load model + #change the img size in model config according to data config + in_channels = model_config['in_channels'] + out_channels = model_config['num_classes'] + img_size = model_config['img_size'] + if model_config['arch']=='Prompt Adapted SAM': + model = Prompt_Adapted_SAM(model_config, label_dict, args.device, training_strategy='biastuning') + elif model_config['arch']=='UNet': + model = UNet(in_channels=in_channels, out_channels=out_channels) + elif model_config['arch']=='UNext': + model = UNext(num_classes=out_channels, input_channels=in_channels, img_size=img_size) + elif model_config['arch']=='MedT': + #TODO + model = MedT(img_size=img_size, num_classes=out_channels) + elif model_config['arch']=='TransUNet': + config_vit = CONFIGS_ViT_seg['R50-ViT-B_16'] + config_vit.n_classes = out_channels + config_vit.n_skip = 3 + # if args.vit_name.find('R50') != -1: + # config_vit.patches.grid = (int(args.img_size / args.vit_patches_size), int(args.img_size / args.vit_patches_size)) + model = VisionTransformer(config_vit, img_size=img_size, num_classes=config_vit.n_classes) + + model.load_state_dict(torch.load(args.pretrained_path, map_location=args.device)) + model = model.to(args.device) + model = model.eval() + + #load data transform + data_transform = Cholec_8k_Transform(config=data_config) + + #dice + dices = [] + ious=[] + + #load data + for i,img_name in enumerate(sorted(os.listdir(args.data_folder))): + # if i%5!=0: + # continue + img_path = (os.path.join(args.data_folder,img_name)) + if args.gt_path: + gt_path = (os.path.join(args.gt_path,img_name[:img_name.find('.')]+'_watershed_mask.png')) + + # print(img_path) + img = torch.as_tensor(np.array(Image.open(img_path).convert("RGB"))) + img = img.permute(2,0,1) + C,H,W = img.shape + #make a dummy mask of shape 1XHXW + if args.gt_path: + gold = np.array(Image.open(gt_path)) + if len(gold.shape)==3: + gold = gold[:,:,0] + if gold.max()<2: + gold = (gold*255).astype(int) + + mask = np.zeros((len(label_dict2),img.shape[1], img.shape[2])) + for i,c in enumerate(list(label_dict2.keys())): + mask[i,:,:] = (gold==label_dict2[c]) + + mask = torch.Tensor(mask+0) + + else: + mask = torch.zeros((1,H,W)) + img, mask = data_transform(img, mask, is_train=False, apply_norm=True) + mask = (mask>=0.5)+0 + + #get image embeddings + img = img.unsqueeze(0).to(args.device) #1XCXHXW + masks = model(img,'') + + argmax_masks = torch.argmax(masks, dim=1).cpu().numpy() + # print("argmax masks shape: ",argmax_masks.shape) + + classwise_dices = [] + classwise_ious = [] + for j,c1 in enumerate(label_dict): + res = np.where(argmax_masks==j,1,0) + # print("res shape: ",res.shape) + plt.imshow(res[0], cmap='gray') + save_dir = os.path.join(args.save_path, c1, 'rescaled_preds') + os.makedirs(save_dir, exist_ok=True) + plt.savefig(os.path.join(args.save_path, c1, 'rescaled_preds', img_name)) + plt.close() + + if args.gt_path: + plt.imshow((mask[j]), cmap='gray') + save_dir = os.path.join(args.save_path, c1, 'rescaled_gt') + os.makedirs(save_dir, exist_ok=True) + plt.savefig(os.path.join(args.save_path, c1, 'rescaled_gt', img_name)) + plt.close() + + classwise_dices.append(dice_coef(mask[j], torch.Tensor(res[0]))) + classwise_ious.append(iou_coef(mask[j], torch.Tensor(res[0]))) + + # break + dices.append(classwise_dices) + ious.append(classwise_ious) + # print("classwise_dices: ", classwise_dices) + # print("classwise ious: ", classwise_ious) + + print(torch.mean(torch.Tensor(dices),dim=0)) + print(torch.mean(torch.Tensor(ious),dim=0)) + +if __name__ == '__main__': + main() + + + + + diff --git a/AllinonSAM/eval/cholec8k/generate_predictions_cholec.py b/AllinonSAM/eval/cholec8k/generate_predictions_cholec.py new file mode 100644 index 0000000000000000000000000000000000000000..9e3fde0b133b0cbe3cd029d93215a50e6df5b44f --- /dev/null +++ b/AllinonSAM/eval/cholec8k/generate_predictions_cholec.py @@ -0,0 +1,203 @@ +import torch +import yaml +import sys +import copy +import os +sys.path.append("/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/SVDSAM/") + +from data_utils import * +from model import * +from utils import * + +label_names = ['Grasper', 'L Hook Electrocautery', 'Liver', 'Fat', 'Gall Bladder','Abdominal Wall','Gastrointestinal Tract','Cystic Duct','Blood','Hepatic Vein', 'Liver Ligament', 'Connective Tissue'] +# visualize_li = [[1,0,0],[0,1,0],[1,0,0], [0,0,1], [0,0,1]] +label_dict = {} +# visualize_dict = {} +for i,ln in enumerate(label_names): + label_dict[ln] = i + # visualize_dict[ln] = visualize_li[i] + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument('--data_folder', default='config_tmp.yml', + help='data folder file path') + + parser.add_argument('--data_config', default='config_tmp.yml', + help='data config file path') + + parser.add_argument('--model_config', default='model_baseline.yml', + help='model config file path') + + parser.add_argument('--pretrained_path', default=None, + help='pretrained model path') + + parser.add_argument('--save_path', default='checkpoints/temp.pth', + help='pretrained model path') + + parser.add_argument('--gt_path', default='', + help='ground truth path') + + parser.add_argument('--device', default='cuda:0', help='device to train on') + + parser.add_argument('--labels_of_interest', default='Left Prograsp Forceps,Maryland Bipolar Forceps,Right Prograsp Forceps,Left Large Needle Driver,Right Large Needle Driver', help='labels of interest') + + parser.add_argument('--codes', default='1,2,1,3,3', help='numeric label to save per instrument') + + args = parser.parse_args() + + return args + +def main(): + args = parse_args() + with open(args.data_config, 'r') as f: + data_config = yaml.load(f, Loader=yaml.FullLoader) + with open(args.model_config, 'r') as f: + model_config = yaml.load(f, Loader=yaml.FullLoader) + labels_of_interest = args.labels_of_interest.split(',') + codes = args.codes.split(',') + codes = [int(c) for c in codes] + + label_dict2 = { + 'Grasper':31, + 'L Hook Electrocautery':32, + 'Liver':21, + 'Fat':12, + 'Gall Bladder':22, + 'Abdominal Wall':11, + 'Gastrointestinal Tract':13, + 'Cystic Duct':25, + 'Blood':24, + 'Hepatic Vein':33, + 'Liver Ligament':5, + 'Connective Tissue':23 + } + + + #make folder to save visualizations + os.makedirs(os.path.join(args.save_path,"preds"),exist_ok=True) + os.makedirs(os.path.join(args.save_path,"rescaled_preds"),exist_ok=True) + if args.gt_path: + os.makedirs(os.path.join(args.save_path,"rescaled_gt"),exist_ok=True) + + #load model + model = Prompt_Adapted_SAM(config=model_config, label_text_dict=label_dict, device=args.device,training_strategy='svdtuning') + # model = Prompt_Adapted_SAM(config=model_config, label_text_dict=label_dict, device=args.device,training_strategy='lora') + + #legacy model support + sdict = torch.load(args.pretrained_path, map_location=args.device) + # for key in list(sdict.keys()): + # if 'sam_encoder.neck' in key: + # if '0' in key: + # new_key = key.replace('0','conv1') + # if '1' in key: + # new_key = key.replace('1','ln1') + # if '2' in key: + # new_key = key.replace('2','conv2') + # if '3' in key: + # new_key = key.replace('3','ln2') + # sdict[new_key] = sdict[key] + # _ = sdict.pop(key) + # if 'mask_decoder' in key: + # if 'trainable' in key: + # _ = sdict.pop(key) + + model.load_state_dict(sdict,strict=True) + + + + model = model.to(args.device) + model = model.eval() + + #load data transform + data_transform = Cholec_8k_Transform(config=data_config) + + #dice + dices = [] + ious=[] + + #load data + for i,img_name in enumerate(sorted(os.listdir(args.data_folder))): + if i%10!=0: + continue + img_path = (os.path.join(args.data_folder,img_name)) + if args.gt_path: + gt_path = (os.path.join(args.gt_path,img_name[:img_name.find('.')]+'_watershed_mask.png')) + + # print(img_path) + img = torch.as_tensor(np.array(Image.open(img_path).convert("RGB"))) + img = img.permute(2,0,1) + C,H,W = img.shape + #make a dummy mask of shape 1XHXW + if args.gt_path: + label_of_interest = args.labels_of_interest + gold = np.array(Image.open(gt_path)) + + if len(gold.shape)==3: + gold = gold[:,:,0] + if gold.max()<2: + gold = (gold*255).astype(int) + + # plt.imshow(gold) + # plt.show() + mask = (gold==label_dict2[label_of_interest]) + + mask = torch.Tensor(mask+0) + mask = torch.Tensor(mask).unsqueeze(0) + + else: + mask = torch.zeros((1,H,W)) + img, mask = data_transform(img, mask, is_train=False, apply_norm=True) + mask = (mask>=0.5)+0 + + #get image embeddings + img = img.unsqueeze(0).to(args.device) #1XCXHXW + img_embeds = model.get_image_embeddings(img) + + # generate masks for all labels of interest + img_embeds_repeated = img_embeds.repeat(len(labels_of_interest),1,1,1) + x_text = [t for t in labels_of_interest] + masks = model.get_masks_for_multiple_labels(img_embeds_repeated, x_text).cpu() + argmax_masks = torch.argmax(masks, dim=0) + final_mask = torch.zeros(masks[0].shape) + final_mask_rescaled = torch.zeros(masks[0].shape).unsqueeze(-1).repeat(1,1,3) + #save masks + for i in range(final_mask.shape[0]): + for j in range(final_mask.shape[1]): + final_mask[i,j] = codes[argmax_masks[i,j]] if masks[argmax_masks[i,j],i,j]>=0.5 else 0 + # final_mask_rescaled[i,j] = torch.Tensor(visualize_dict[(labels_of_interest[argmax_masks[i,j]])] if masks[argmax_masks[i,j],i,j]>=0.5 else [0,0,0]) + + # save_im = Image.fromarray(final_mask.numpy()) + # save_im.save(os.path.join(args.save_path,'preds', img_name)) + + # plt.imshow(final_mask_rescaled,cmap='gray') + # plt.savefig(os.path.join(args.save_path,'rescaled_preds', img_name)) + # plt.close() + + # print("label shape: ", label.shape) + # plt.imshow(label[0], cmap='gray') + # plt.show() + + plt.imshow((masks[0]>=0.5), cmap='gray') + plt.savefig(os.path.join(args.save_path,'rescaled_preds', img_name)) + plt.close() + + if args.gt_path: + plt.imshow((mask[0]), cmap='gray') + plt.savefig(os.path.join(args.save_path,'rescaled_gt', img_name)) + plt.close() + + # print("dice: ",dice_coef(label, (masks>0.5)+0)) + dices.append(dice_coef(mask, (masks>=0.5)+0)) + ious.append(iou_coef(mask, (masks>=0.5)+0)) + # break + print(torch.mean(torch.Tensor(dices))) + print(torch.mean(torch.Tensor(ious))) + +if __name__ == '__main__': + main() + + + + + diff --git a/AllinonSAM/eval/cholec8k/generate_predictions_cholec.sh b/AllinonSAM/eval/cholec8k/generate_predictions_cholec.sh new file mode 100644 index 0000000000000000000000000000000000000000..6f911ebb78794bda9764cc92d0e6a5df90d50b0b --- /dev/null +++ b/AllinonSAM/eval/cholec8k/generate_predictions_cholec.sh @@ -0,0 +1,23 @@ +python generate_predictions_cholec.py --data_folder "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_images2" --gt_path "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_labels2" --pretrained_path "samed_cholec_final_256_bs32_focaldice.pth" --model_config config_model_test.yml --data_config config_cholec8k_test.yml --save_path "samed_cholec_final_256_bs32_focaldice/Grasper" --device "cuda:1" --labels_of_interest "Grasper" + +python generate_predictions_cholec.py --data_folder "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_images2" --gt_path "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_labels2" --pretrained_path "samed_cholec_final_256_bs32_focaldice.pth" --model_config config_model_test.yml --data_config config_cholec8k_test.yml --save_path "samed_cholec_final_256_bs32_focaldice/LHookElectrocautery" --device "cuda:1" --labels_of_interest "L Hook Electrocautery" + +python generate_predictions_cholec.py --data_folder "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_images2" --gt_path "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_labels2" --pretrained_path "samed_cholec_final_256_bs32_focaldice.pth" --model_config config_model_test.yml --data_config config_cholec8k_test.yml --save_path "samed_cholec_final_256_bs32_focaldice/Liver" --device "cuda:1" --labels_of_interest "Liver" + +python generate_predictions_cholec.py --data_folder "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_images2" --gt_path "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_labels2" --pretrained_path "samed_cholec_final_256_bs32_focaldice.pth" --model_config config_model_test.yml --data_config config_cholec8k_test.yml --save_path "samed_cholec_final_256_bs32_focaldice/Fat" --device "cuda:1" --labels_of_interest "Fat" + +python generate_predictions_cholec.py --data_folder "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_images2" --gt_path "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_labels2" --pretrained_path "samed_cholec_final_256_bs32_focaldice.pth" --model_config config_model_test.yml --data_config config_cholec8k_test.yml --save_path "samed_cholec_final_256_bs32_focaldice/Gallbladder" --device "cuda:1" --labels_of_interest "Gall Bladder" + +python generate_predictions_cholec.py --data_folder "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_images2" --gt_path "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_labels2" --pretrained_path "samed_cholec_final_256_bs32_focaldice.pth" --model_config config_model_test.yml --data_config config_cholec8k_test.yml --save_path "samed_cholec_final_256_bs32_focaldice/Abdominalwall" --device "cuda:1" --labels_of_interest "Abdominal Wall" + +python generate_predictions_cholec.py --data_folder "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_images2" --gt_path "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_labels2" --pretrained_path "samed_cholec_final_256_bs32_focaldice.pth" --model_config config_model_test.yml --data_config config_cholec8k_test.yml --save_path "samed_cholec_final_256_bs32_focaldice/GITract" --device "cuda:1" --labels_of_interest "Gastrointestinal Tract" + +python generate_predictions_cholec.py --data_folder "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_images2" --gt_path "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_labels2" --pretrained_path "samed_cholec_final_256_bs32_focaldice.pth" --model_config config_model_test.yml --data_config config_cholec8k_test.yml --save_path "samed_cholec_final_256_bs32_focaldice/CysticDuct" --device "cuda:1" --labels_of_interest "Cystic Duct" + +python generate_predictions_cholec.py --data_folder "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_images2" --gt_path "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_labels2" --pretrained_path "samed_cholec_final_256_bs32_focaldice.pth" --model_config config_model_test.yml --data_config config_cholec8k_test.yml --save_path "samed_cholec_final_256_bs32_focaldice/Blood" --device "cuda:1" --labels_of_interest "Blood" + +python generate_predictions_cholec.py --data_folder "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_images2" --gt_path "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_labels2" --pretrained_path "samed_cholec_final_256_bs32_focaldice.pth" --model_config config_model_test.yml --data_config config_cholec8k_test.yml --save_path "samed_cholec_final_256_bs32_focaldice/Hepaticvein" --device "cuda:1" --labels_of_interest "Hepatic Vein" + +python generate_predictions_cholec.py --data_folder "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_images2" --gt_path "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_labels2" --pretrained_path "samed_cholec_final_256_bs32_focaldice.pth" --model_config config_model_test.yml --data_config config_cholec8k_test.yml --save_path "samed_cholec_final_256_bs32_focaldice/LiverLigament" --device "cuda:1" --labels_of_interest "Liver Ligament" + +python generate_predictions_cholec.py --data_folder "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_images2" --gt_path "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_labels2" --pretrained_path "samed_cholec_final_256_bs32_focaldice.pth" --model_config config_model_test.yml --data_config config_cholec8k_test.yml --save_path "samed_cholec_final_256_bs32_focaldice/ConnectiveTissue" --device "cuda:1" --labels_of_interest "Connective Tissue" \ No newline at end of file diff --git a/AllinonSAM/eval/cholec8k/model_baseline.yml b/AllinonSAM/eval/cholec8k/model_baseline.yml new file mode 100644 index 0000000000000000000000000000000000000000..d5c50496276d02eca94b41e996eeb355f8af358d --- /dev/null +++ b/AllinonSAM/eval/cholec8k/model_baseline.yml @@ -0,0 +1,17 @@ + +img_size: 256 +num_classes: 12 +in_channels: 3 +img_type: 'image' +arch: "UNet" +use_fdn: False + +training: + optimizer: 'adamw' + lr: 1e-4 + batch_size: 16 + num_epochs: 500 + schedule_step: 2100 + schedule_step_factor: 0.5 + weight_decay: 1e-2 + loss: 'focal' \ No newline at end of file diff --git a/AllinonSAM/eval/cholec8k/predictions_pointsam.py b/AllinonSAM/eval/cholec8k/predictions_pointsam.py new file mode 100644 index 0000000000000000000000000000000000000000..cc39a7d257bef55aea7070a7ffa6324baec5fa85 --- /dev/null +++ b/AllinonSAM/eval/cholec8k/predictions_pointsam.py @@ -0,0 +1,229 @@ +import torch +import yaml +import sys +import copy +import os +sys.path.append("/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/SVDSAM/") + +from data_utils import * +from model import * +from utils import * + +label_names = ['Grasper', 'L Hook Electrocautery', 'Liver', 'Fat', 'Gall Bladder','Abdominal Wall','Gastrointestinal Tract','Cystic Duct','Blood','Hepatic Vein', 'Liver Ligament', 'Connective Tissue'] +# visualize_li = [[1,0,0],[0,1,0],[1,0,0], [0,0,1], [0,0,1]] +label_dict = {} +# visualize_dict = {} +for i,ln in enumerate(label_names): + label_dict[ln] = i + # visualize_dict[ln] = visualize_li[i] + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument('--data_folder', default='config_tmp.yml', + help='data folder file path') + + parser.add_argument('--data_config', default='config_tmp.yml', + help='data config file path') + + parser.add_argument('--model_config', default='model_baseline.yml', + help='model config file path') + + parser.add_argument('--pretrained_path', default=None, + help='pretrained model path') + + parser.add_argument('--save_path', default='checkpoints/temp.pth', + help='pretrained model path') + + parser.add_argument('--gt_path', default='', + help='ground truth path') + + parser.add_argument('--device', default='cuda:0', help='device to train on') + + parser.add_argument('--labels_of_interest', default='Left Prograsp Forceps,Maryland Bipolar Forceps,Right Prograsp Forceps,Left Large Needle Driver,Right Large Needle Driver', help='labels of interest') + + parser.add_argument('--codes', default='1,2,1,3,3', help='numeric label to save per instrument') + + args = parser.parse_args() + + return args + +def main(): + args = parse_args() + with open(args.data_config, 'r') as f: + data_config = yaml.load(f, Loader=yaml.FullLoader) + with open(args.model_config, 'r') as f: + model_config = yaml.load(f, Loader=yaml.FullLoader) + labels_of_interest = args.labels_of_interest.split(',') + codes = args.codes.split(',') + codes = [int(c) for c in codes] + + label_dict2 = { + 'Grasper':31, + 'L Hook Electrocautery':32, + 'Liver':21, + 'Fat':12, + 'Gall Bladder':22, + 'Abdominal Wall':11, + 'Gastrointestinal Tract':13, + 'Cystic Duct':25, + 'Blood':24, + 'Hepatic Vein':33, + 'Liver Ligament':5, + 'Connective Tissue':23 + } + + + #make folder to save visualizations + os.makedirs(os.path.join(args.save_path,"preds"),exist_ok=True) + os.makedirs(os.path.join(args.save_path,"rescaled_preds"),exist_ok=True) + if args.gt_path: + os.makedirs(os.path.join(args.save_path,"rescaled_gt"),exist_ok=True) + + #load model + model = Prompt_Adapted_SAM(config=model_config, label_text_dict=label_dict, device=args.device,training_strategy='svdtuning') + # model = Prompt_Adapted_SAM(config=model_config, label_text_dict=label_dict, device=args.device,training_strategy='lora') + + #legacy model support + if args.pretrained_path: + sdict = torch.load(args.pretrained_path, map_location=args.device) + # for key in list(sdict.keys()): + # if 'sam_encoder.neck' in key: + # if '0' in key: + # new_key = key.replace('0','conv1') + # if '1' in key: + # new_key = key.replace('1','ln1') + # if '2' in key: + # new_key = key.replace('2','conv2') + # if '3' in key: + # new_key = key.replace('3','ln2') + # sdict[new_key] = sdict[key] + # _ = sdict.pop(key) + # if 'mask_decoder' in key: + # if 'trainable' in key: + # _ = sdict.pop(key) + + model.load_state_dict(sdict,strict=True) + + + + model = model.to(args.device) + model = model.eval() + + #load data transform + data_transform = Cholec_8k_Transform(config=data_config) + + #dice + dices = [] + ious=[] + + #load data + for i,img_name in enumerate(sorted(os.listdir(args.data_folder))): + if i%10!=0: + continue + img_path = (os.path.join(args.data_folder,img_name)) + if args.gt_path: + gt_path = (os.path.join(args.gt_path,img_name[:img_name.find('.')]+'_watershed_mask.png')) + + # print(img_path) + img = torch.as_tensor(np.array(Image.open(img_path).convert("RGB"))) + img = img.permute(2,0,1) + C,H,W = img.shape + #make a dummy mask of shape 1XHXW + if args.gt_path: + label_of_interest = args.labels_of_interest + gold = np.array(Image.open(gt_path)) + + if len(gold.shape)==3: + gold = gold[:,:,0] + if gold.max()<2: + gold = (gold*255).astype(int) + + # plt.imshow(gold) + # plt.show() + mask = (gold==label_dict2[label_of_interest]) + + mask = torch.Tensor(mask+0) + mask = torch.Tensor(mask).unsqueeze(0) + + else: + mask = torch.zeros((1,H,W)) + img, mask = data_transform(img, mask, is_train=False, apply_norm=True) + mask = (mask>=0.5)+0 + + #get positive point prompts + _,y,x = torch.where(mask==1) + pos_prompts = torch.cat([x.unsqueeze(1),y.unsqueeze(1)],dim=1) + + #get negative point prompts + _,y_neg,x_neg = torch.where(mask==0) + neg_prompts = (torch.cat([x_neg.unsqueeze(1),y_neg.unsqueeze(1)],dim=1)) + + if len(y)>0: + pos_point_idx = random.randint(0,y.shape[0]-1) + neg_point_idx = random.randint(0,y_neg.shape[0]-1) + # points = (torch.cat([pos_prompts[pos_point_idx].unsqueeze(0), neg_prompts[neg_point_idx].unsqueeze(0)],dim=0).unsqueeze(0).to(args.device), torch.Tensor([1,-1]).unsqueeze(0).to(args.device)) + points = (pos_prompts[pos_point_idx].unsqueeze(0).unsqueeze(0).to(args.device), torch.Tensor([1]).unsqueeze(0).to(args.device)) + + else: + neg_point_idx1 = random.randint(0,y_neg.shape[0]-1) + neg_point_idx2 = random.randint(0,y_neg.shape[0]-1) + # points = (torch.cat([neg_prompts[neg_point_idx1].unsqueeze(0), neg_prompts[neg_point_idx2].unsqueeze(0)],dim=0).unsqueeze(0).to(args.device), torch.Tensor([-1,-1]).unsqueeze(0).to(args.device)) + points = (neg_prompts[neg_point_idx1].unsqueeze(0).unsqueeze(0).to(args.device), torch.Tensor([-1]).unsqueeze(0).to(args.device)) + + #get image embeddings + img = img.unsqueeze(0).to(args.device) #1XCXHXW + img_embeds = model.get_image_embeddings(img) + + # generate masks for all labels of interest + img_embeds_repeated = img_embeds.repeat(len(labels_of_interest),1,1,1) + masks = model.get_masks_with_manual_prompts(img_embeds_repeated, points=points).cpu() + argmax_masks = torch.argmax(masks, dim=0) + final_mask = torch.zeros(masks[0].shape) + final_mask_rescaled = torch.zeros(masks[0].shape).unsqueeze(-1).repeat(1,1,3) + #save masks + for i in range(final_mask.shape[0]): + for j in range(final_mask.shape[1]): + final_mask[i,j] = codes[argmax_masks[i,j]] if masks[argmax_masks[i,j],i,j]>=0.5 else 0 + # final_mask_rescaled[i,j] = torch.Tensor(visualize_dict[(labels_of_interest[argmax_masks[i,j]])] if masks[argmax_masks[i,j],i,j]>=0.5 else [0,0,0]) + + # save_im = Image.fromarray(final_mask.numpy()) + # save_im.save(os.path.join(args.save_path,'preds', img_name)) + + # plt.imshow(final_mask_rescaled,cmap='gray') + # plt.savefig(os.path.join(args.save_path,'rescaled_preds', img_name)) + # plt.close() + + # print("label shape: ", label.shape) + # plt.imshow(label[0], cmap='gray') + # plt.show() + + plt.imshow((masks[0]>=0.5), cmap='gray') + if len(y)>0: + plt.scatter(x[pos_point_idx], y[pos_point_idx], c='green') + # plt.scatter(x_neg[neg_point_idx], y_neg[neg_point_idx], c='red') + else: + plt.scatter(x_neg[neg_point_idx1], y_neg[neg_point_idx1], c='red') + # plt.scatter(x_neg[neg_point_idx2], y_neg[neg_point_idx2], c='red') + plt.savefig(os.path.join(args.save_path,'rescaled_preds', img_name)) + plt.close() + + if args.gt_path: + plt.imshow((mask[0]), cmap='gray') + plt.savefig(os.path.join(args.save_path,'rescaled_gt', img_name)) + plt.close() + + # print("dice: ",dice_coef(label, (masks>0.5)+0)) + dices.append(dice_coef(mask, (masks>=0.5)+0)) + ious.append(iou_coef(mask, (masks>=0.5)+0)) + # break + print(torch.mean(torch.Tensor(dices))) + print(torch.mean(torch.Tensor(ious))) + +if __name__ == '__main__': + main() + + + + + diff --git a/AllinonSAM/eval/cholec8k/predictions_pointsam.sh b/AllinonSAM/eval/cholec8k/predictions_pointsam.sh new file mode 100644 index 0000000000000000000000000000000000000000..7a9487d31e6af177b40b70f53973dd8e1cf0c0c2 --- /dev/null +++ b/AllinonSAM/eval/cholec8k/predictions_pointsam.sh @@ -0,0 +1,23 @@ +python predictions_pointsam.py --data_folder "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_images2" --gt_path "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_labels2" --model_config config_model_test.yml --data_config config_cholec8k_test.yml --save_path "sam_point_cholec/Grasper" --device "cuda:1" --labels_of_interest "Grasper" + +python predictions_pointsam.py --data_folder "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_images2" --gt_path "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_labels2" --model_config config_model_test.yml --data_config config_cholec8k_test.yml --save_path "sam_point_cholec/LHookElectrocautery" --device "cuda:1" --labels_of_interest "L Hook Electrocautery" + +python predictions_pointsam.py --data_folder "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_images2" --gt_path "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_labels2" --model_config config_model_test.yml --data_config config_cholec8k_test.yml --save_path "sam_point_cholec/Liver" --device "cuda:1" --labels_of_interest "Liver" + +python predictions_pointsam.py --data_folder "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_images2" --gt_path "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_labels2" --model_config config_model_test.yml --data_config config_cholec8k_test.yml --save_path "sam_point_cholec/Fat" --device "cuda:1" --labels_of_interest "Fat" + +python predictions_pointsam.py --data_folder "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_images2" --gt_path "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_labels2" --model_config config_model_test.yml --data_config config_cholec8k_test.yml --save_path "sam_point_cholec/Gallbladder" --device "cuda:1" --labels_of_interest "Gall Bladder" + +python predictions_pointsam.py --data_folder "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_images2" --gt_path "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_labels2" --model_config config_model_test.yml --data_config config_cholec8k_test.yml --save_path "sam_point_cholec/Abdominalwall" --device "cuda:1" --labels_of_interest "Abdominal Wall" + +python predictions_pointsam.py --data_folder "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_images2" --gt_path "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_labels2" --model_config config_model_test.yml --data_config config_cholec8k_test.yml --save_path "sam_point_cholec/GITract" --device "cuda:1" --labels_of_interest "Gastrointestinal Tract" + +python predictions_pointsam.py --data_folder "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_images2" --gt_path "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_labels2" --model_config config_model_test.yml --data_config config_cholec8k_test.yml --save_path "sam_point_cholec/CysticDuct" --device "cuda:1" --labels_of_interest "Cystic Duct" + +python predictions_pointsam.py --data_folder "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_images2" --gt_path "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_labels2" --model_config config_model_test.yml --data_config config_cholec8k_test.yml --save_path "sam_point_cholec/Blood" --device "cuda:1" --labels_of_interest "Blood" + +python predictions_pointsam.py --data_folder "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_images2" --gt_path "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_labels2" --model_config config_model_test.yml --data_config config_cholec8k_test.yml --save_path "sam_point_cholec/Hepaticvein" --device "cuda:1" --labels_of_interest "Hepatic Vein" + +python predictions_pointsam.py --data_folder "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_images2" --gt_path "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_labels2" --model_config config_model_test.yml --data_config config_cholec8k_test.yml --save_path "sam_point_cholec/LiverLigament" --device "cuda:1" --labels_of_interest "Liver Ligament" + +python predictions_pointsam.py --data_folder "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_images2" --gt_path "/home/ubuntu/Desktop/Domain_Adaptation_Project/data/cholecSeg8k/test_labels2" --model_config config_model_test.yml --data_config config_cholec8k_test.yml --save_path "sam_point_cholec/ConnectiveTissue" --device "cuda:1" --labels_of_interest "Connective Tissue" \ No newline at end of file diff --git a/AllinonSAM/eval/endovis/config_endovis_test.yml b/AllinonSAM/eval/endovis/config_endovis_test.yml new file mode 100644 index 0000000000000000000000000000000000000000..8f7ccfc551f06efa072606e61b859e200c64abd1 --- /dev/null +++ b/AllinonSAM/eval/endovis/config_endovis_test.yml @@ -0,0 +1,20 @@ +data_transforms: + a_min: 0 + a_max: 255 + img_size: 256 + use_random_crop: False + use_rotation: False + rotation_angle: 10 + use_saturation: False + saturation: 2 + use_brightness: False + brightness: 2 + use_horizontal_flip: False +data: + name: ENDOVIS + root_path: '/media/ubuntu/New Volume/jay/endovis17/instrument_1_4_testing/' + label_list: [1,2,3,4,5,6,7,8,9] + label_names: ['Left Prograsp Forceps', 'Maryland Bipolar Forceps', 'Right Prograsp Forceps', 'Left Large Needle Driver', 'Right Large Needle Driver', 'Left Grasping Retractor', 'Right Grasping Retractor', 'Vessel Sealer', 'Monopolar Curved Scissors'] + volume_channel: 2 + + diff --git a/AllinonSAM/eval/endovis/config_model_test.yml b/AllinonSAM/eval/endovis/config_model_test.yml new file mode 100644 index 0000000000000000000000000000000000000000..3b343417983aad1b6ac867c1375b5329806421df --- /dev/null +++ b/AllinonSAM/eval/endovis/config_model_test.yml @@ -0,0 +1,31 @@ +sam: + img_size: 256 + num_classes: 9 + sam_type: "base" + +img_type: 'image' +arch: "Prompt Adapted SAM" +use_fdn: False +decoder_training: 'none' +mlp_transform: False + +prompts: + USE_TEXT_PROMPT: True + USE_IMAGE_PROMPT: False + USE_SLICE_NUM: False + LOCATION: 'prepend' + DROPOUT: 0 + NUM_TOKENS: 5 + +training: + optimizer: 'adamw' + lr: 1e-3 + batch_size: 32 + num_epochs: 1000 + schedule_step: 100 + schedule_step_factor: 0.5 + weight_decay: 1e-2 + loss: 'focal' + reg_multiplier: 0 + +use_lora: True \ No newline at end of file diff --git a/AllinonSAM/eval/endovis/generate_all_baseline_results.sh b/AllinonSAM/eval/endovis/generate_all_baseline_results.sh new file mode 100644 index 0000000000000000000000000000000000000000..a540d77bdd27835139c5dcfa2a0f658838412ed2 --- /dev/null +++ b/AllinonSAM/eval/endovis/generate_all_baseline_results.sh @@ -0,0 +1,39 @@ +python generate_predictions_baselines.py --data_folder "/media/ubuntu/New Volume/jay/endovis17/instrument_1_4_testing/instrument_dataset_1/left_frames" --data_config config_endovis_test.yml --model_config model_baseline.yml --save_path "./unet_results/dataset1" --pretrained_path ../../unet_ev17.pth --gt_path "/media/ubuntu/New Volume/jay/endovis17/instrument_2017_test/instrument_2017_test/instrument_dataset_1/TypeSegmentation" + +echo "......................." + +python generate_predictions_baselines.py --data_folder "/media/ubuntu/New Volume/jay/endovis17/instrument_1_4_testing/instrument_dataset_2/left_frames" --data_config config_endovis_test.yml --model_config model_baseline.yml --save_path "./unet_results/dataset2" --pretrained_path ../../unet_ev17.pth --gt_path "/media/ubuntu/New Volume/jay/endovis17/instrument_2017_test/instrument_2017_test/instrument_dataset_2/TypeSegmentation" + + +echo "......................." + + +python generate_predictions_baselines.py --data_folder "/media/ubuntu/New Volume/jay/endovis17/instrument_1_4_testing/instrument_dataset_3/left_frames" --data_config config_endovis_test.yml --model_config model_baseline.yml --save_path "./unet_results/dataset3" --pretrained_path ../../unet_ev17.pth --gt_path "/media/ubuntu/New Volume/jay/endovis17/instrument_2017_test/instrument_2017_test/instrument_dataset_3/TypeSegmentation" + +echo "......................." + +python generate_predictions_baselines.py --data_folder "/media/ubuntu/New Volume/jay/endovis17/instrument_1_4_testing/instrument_dataset_4/left_frames" --data_config config_endovis_test.yml --model_config model_baseline.yml --save_path "./unet_results/dataset4" --pretrained_path ../../unet_ev17.pth --gt_path "/media/ubuntu/New Volume/jay/endovis17/instrument_2017_test/instrument_2017_test/instrument_dataset_4/TypeSegmentation" + +echo "......................." + +python generate_predictions_baselines.py --data_folder "/media/ubuntu/New Volume/jay/endovis17/instrument_5_8_testing/instrument_dataset_5/left_frames" --data_config config_endovis_test.yml --model_config model_baseline.yml --save_path "./unet_results/dataset5" --pretrained_path ../../unet_ev17.pth --gt_path "/media/ubuntu/New Volume/jay/endovis17/instrument_2017_test/instrument_2017_test/instrument_dataset_5/TypeSegmentation" + +echo "......................." + +python generate_predictions_baselines.py --data_folder "/media/ubuntu/New Volume/jay/endovis17/instrument_5_8_testing/instrument_dataset_6/left_frames" --data_config config_endovis_test.yml --model_config model_baseline.yml --save_path "./unet_results/dataset6" --pretrained_path ../../unet_ev17.pth --gt_path "/media/ubuntu/New Volume/jay/endovis17/instrument_2017_test/instrument_2017_test/instrument_dataset_6/TypeSegmentation" + +echo "......................." + +python generate_predictions_baselines.py --data_folder "/media/ubuntu/New Volume/jay/endovis17/instrument_5_8_testing/instrument_dataset_7/left_frames" --data_config config_endovis_test.yml --model_config model_baseline.yml --save_path "./unet_results/dataset7" --pretrained_path ../../unet_ev17.pth --gt_path "/media/ubuntu/New Volume/jay/endovis17/instrument_2017_test/instrument_2017_test/instrument_dataset_7/TypeSegmentation" + +echo "......................." + +python generate_predictions_baselines.py --data_folder "/media/ubuntu/New Volume/jay/endovis17/instrument_5_8_testing/instrument_dataset_8/left_frames" --data_config config_endovis_test.yml --model_config model_baseline.yml --save_path "./unet_results/dataset8" --pretrained_path ../../unet_ev17.pth --gt_path "/media/ubuntu/New Volume/jay/endovis17/instrument_2017_test/instrument_2017_test/instrument_dataset_8/TypeSegmentation" + +echo "......................." + +python generate_predictions_baselines.py --data_folder "/media/ubuntu/New Volume/jay/endovis17/instrument_9_10_testing/instrument_dataset_9/left_frames" --data_config config_endovis_test.yml --model_config model_baseline.yml --save_path "./unet_results/dataset9" --pretrained_path ../../unet_ev17.pth --gt_path "/media/ubuntu/New Volume/jay/endovis17/instrument_2017_test/instrument_2017_test/instrument_dataset_9/TypeSegmentation" + +echo "......................." + +python generate_predictions_baselines.py --data_folder "/media/ubuntu/New Volume/jay/endovis17/instrument_9_10_testing/instrument_dataset_10/left_frames" --data_config config_endovis_test.yml --model_config model_baseline.yml --save_path "./unet_results/dataset10" --pretrained_path ../../unet_ev17.pth --gt_path "/media/ubuntu/New Volume/jay/endovis17/instrument_2017_test/instrument_2017_test/instrument_dataset_10/TypeSegmentation" \ No newline at end of file diff --git a/AllinonSAM/eval/endovis/generate_all_predictions.sh b/AllinonSAM/eval/endovis/generate_all_predictions.sh new file mode 100644 index 0000000000000000000000000000000000000000..637870e4f5dec3688719d4f6ac6185ac51a0388e --- /dev/null +++ b/AllinonSAM/eval/endovis/generate_all_predictions.sh @@ -0,0 +1,27 @@ +echo "endovis17_lora16" + +for i in "Left Prograsp Forceps",2 "Right Prograsp Forceps",2 "Maryland Bipolar Forceps",1 "Left Large Needle Driver",3 "Right Large Needle Driver",3 "Vessel Sealer",4 "Left Grasping Retractor",5 "Right Grasping Retractor",5 "Monopolar Curved Scissors",6; +do IFS=","; set $i; +echo "$1 and $2"; + +python generate_predictions.py --data_folder "/media/ubuntu/New Volume/jay/endovis17/instrument_1_4_testing/instrument_dataset_1/left_frames" --data_config config_endovis_test.yml --model_config config_model_test.yml --save_path "endovis17_lora16/instrument_1/$1" --gt_path /media/ubuntu/New\ Volume/jay/endovis17/instrument_2017_test/instrument_2017_test/instrument_dataset_1/TypeSegmentation --pretrained_path endovis17_lora16.pth --device "cuda:0" --labels_of_interest "$1" --codes "$2" + +python generate_predictions.py --data_folder "/media/ubuntu/New Volume/jay/endovis17/instrument_1_4_testing/instrument_dataset_2/left_frames" --data_config config_endovis_test.yml --model_config config_model_test.yml --save_path "endovis17_lora16/instrument_2/$1" --gt_path /media/ubuntu/New\ Volume/jay/endovis17/instrument_2017_test/instrument_2017_test/instrument_dataset_2/TypeSegmentation --pretrained_path endovis17_lora16.pth --device "cuda:0" --labels_of_interest "$1" --codes "$2" + +python generate_predictions.py --data_folder "/media/ubuntu/New Volume/jay/endovis17/instrument_1_4_testing/instrument_dataset_3/left_frames" --data_config config_endovis_test.yml --model_config config_model_test.yml --save_path "endovis17_lora16/instrument_3/$1" --gt_path /media/ubuntu/New\ Volume/jay/endovis17/instrument_2017_test/instrument_2017_test/instrument_dataset_3/TypeSegmentation --pretrained_path endovis17_lora16.pth --device "cuda:0" --labels_of_interest "$1" --codes "$2" + +python generate_predictions.py --data_folder "/media/ubuntu/New Volume/jay/endovis17/instrument_1_4_testing/instrument_dataset_4/left_frames" --data_config config_endovis_test.yml --model_config config_model_test.yml --save_path "endovis17_lora16/instrument_4/$1" --gt_path /media/ubuntu/New\ Volume/jay/endovis17/instrument_2017_test/instrument_2017_test/instrument_dataset_4/TypeSegmentation --pretrained_path endovis17_lora16.pth --device "cuda:0" --labels_of_interest "$1" --codes "$2" + +python generate_predictions.py --data_folder "/media/ubuntu/New Volume/jay/endovis17/instrument_5_8_testing/instrument_dataset_5/left_frames" --data_config config_endovis_test.yml --model_config config_model_test.yml --save_path "endovis17_lora16/instrument_5/$1" --gt_path /media/ubuntu/New\ Volume/jay/endovis17/instrument_2017_test/instrument_2017_test/instrument_dataset_5/TypeSegmentation --pretrained_path endovis17_lora16.pth --device "cuda:0" --labels_of_interest "$1" --codes "$2" + +python generate_predictions.py --data_folder "/media/ubuntu/New Volume/jay/endovis17/instrument_5_8_testing/instrument_dataset_6/left_frames" --data_config config_endovis_test.yml --model_config config_model_test.yml --save_path "endovis17_lora16/instrument_6/$1" --gt_path /media/ubuntu/New\ Volume/jay/endovis17/instrument_2017_test/instrument_2017_test/instrument_dataset_6/TypeSegmentation --pretrained_path endovis17_lora16.pth --device "cuda:0" --labels_of_interest "$1" --codes "$2" + +python generate_predictions.py --data_folder "/media/ubuntu/New Volume/jay/endovis17/instrument_5_8_testing/instrument_dataset_7/left_frames" --data_config config_endovis_test.yml --model_config config_model_test.yml --save_path "endovis17_lora16/instrument_7/$1" --gt_path /media/ubuntu/New\ Volume/jay/endovis17/instrument_2017_test/instrument_2017_test/instrument_dataset_7/TypeSegmentation --pretrained_path endovis17_lora16.pth --device "cuda:0" --labels_of_interest "$1" --codes "$2" + +python generate_predictions.py --data_folder "/media/ubuntu/New Volume/jay/endovis17/instrument_5_8_testing/instrument_dataset_8/left_frames" --data_config config_endovis_test.yml --model_config config_model_test.yml --save_path "endovis17_lora16/instrument_8/$1" --gt_path /media/ubuntu/New\ Volume/jay/endovis17/instrument_2017_test/instrument_2017_test/instrument_dataset_8/TypeSegmentation --pretrained_path endovis17_lora16.pth --device "cuda:0" --labels_of_interest "$1" --codes "$2" + +python generate_predictions.py --data_folder "/media/ubuntu/New Volume/jay/endovis17/instrument_9_10_testing/instrument_dataset_9/left_frames" --data_config config_endovis_test.yml --model_config config_model_test.yml --save_path "endovis17_lora16/instrument_9/$1" --gt_path /media/ubuntu/New\ Volume/jay/endovis17/instrument_2017_test/instrument_2017_test/instrument_dataset_9/TypeSegmentation --pretrained_path endovis17_lora16.pth --device "cuda:0" --labels_of_interest "$1" --codes "$2" + +python generate_predictions.py --data_folder "/media/ubuntu/New Volume/jay/endovis17/instrument_9_10_testing/instrument_dataset_10/left_frames" --data_config config_endovis_test.yml --model_config config_model_test.yml --save_path "endovis17_lora16/instrument_10/$1" --gt_path /media/ubuntu/New\ Volume/jay/endovis17/instrument_2017_test/instrument_2017_test/instrument_dataset_10/TypeSegmentation --pretrained_path endovis17_lora16.pth --device "cuda:0" --labels_of_interest "$1" --codes "$2" + +done \ No newline at end of file diff --git a/AllinonSAM/eval/endovis/generate_predictions.py b/AllinonSAM/eval/endovis/generate_predictions.py new file mode 100644 index 0000000000000000000000000000000000000000..765be11758a4fdc0b1f45ba1f4746caa6edf5f60 --- /dev/null +++ b/AllinonSAM/eval/endovis/generate_predictions.py @@ -0,0 +1,171 @@ +import torch +import yaml +import sys +import copy +import os +sys.path.append("/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/SVDSAM/") + +from data_utils import * +from model import * +from utils import * + +label_names = ['Left Prograsp Forceps', 'Maryland Bipolar Forceps', 'Right Prograsp Forceps', 'Left Large Needle Driver', 'Right Large Needle Driver', 'Left Grasping Retractor', 'Right Grasping Retractor', 'Vessel Sealer', 'Monopolar Curved Scissors'] +visualize_li = [[1,0,0],[0,1,0],[1,0,0], [0,0,1], [0,0,1]] +label_dict = {} +visualize_dict = {} +for i,ln in enumerate(label_names): + label_dict[ln] = i + # visualize_dict[ln] = visualize_li[i] + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument('--data_folder', default='config_tmp.yml', + help='data folder file path') + + parser.add_argument('--data_config', default='config_tmp.yml', + help='data config file path') + + parser.add_argument('--model_config', default='model_baseline.yml', + help='model config file path') + + parser.add_argument('--pretrained_path', default=None, + help='pretrained model path') + + parser.add_argument('--save_path', default='checkpoints/temp.pth', + help='pretrained model path') + + parser.add_argument('--gt_path', default='', + help='ground truth path') + + parser.add_argument('--device', default='cuda:0', help='device to train on') + + parser.add_argument('--labels_of_interest', default='Left Prograsp Forceps,Maryland Bipolar Forceps,Right Prograsp Forceps,Left Large Needle Driver,Right Large Needle Driver', help='labels of interest') + + parser.add_argument('--codes', default='1,2,1,3,3', help='numeric label to save per instrument') + + args = parser.parse_args() + + return args + +def main(): + args = parse_args() + with open(args.data_config, 'r') as f: + data_config = yaml.load(f, Loader=yaml.FullLoader) + with open(args.model_config, 'r') as f: + model_config = yaml.load(f, Loader=yaml.FullLoader) + labels_of_interest = args.labels_of_interest.split(',') + codes = args.codes.split(',') + codes = [int(c) for c in codes] + + + #make folder to save visualizations + os.makedirs(os.path.join(args.save_path,"preds"),exist_ok=True) + os.makedirs(os.path.join(args.save_path,"rescaled_preds"),exist_ok=True) + if args.gt_path: + os.makedirs(os.path.join(args.save_path,"rescaled_gt"),exist_ok=True) + + #load model + # model = Prompt_Adapted_SAM(config=model_config, label_text_dict=label_dict, device=args.device) + model = Prompt_Adapted_SAM(config=model_config, label_text_dict=label_dict, device=args.device, training_strategy='lora') + + if args.pretrained_path: + model.load_state_dict(torch.load(args.pretrained_path, map_location=args.device)) + model = model.to(args.device) + model = model.eval() + + #load data transform + data_transform = ENDOVIS_Transform(config=data_config) + + #dice + dices = [] + ious = [] + + #load data + for i,img_name in enumerate(sorted(os.listdir(args.data_folder))): + if i%5!=0: + continue + img_path = (os.path.join(args.data_folder,img_name)) + if args.gt_path: + label_name = labels_of_interest[0].replace(' ','_')+'_labels' + #for test data, the labels are arranged differently so uncomment the line below + gt_path = (os.path.join(args.gt_path,img_name)) + # gt_path = (os.path.join(args.gt_path,label_name,img_name)) + + # print(img_path) + img = torch.as_tensor(np.array(Image.open(img_path).convert("RGB"))) + img = img.permute(2,0,1) + C,H,W = img.shape + #make a dummy mask of shape 1XHXW + if args.gt_path: + label = torch.as_tensor(np.array(Image.open(gt_path))).unsqueeze(0) + + #for test data, the labels are arranged differently so uncomment th line below + label = (label==codes[0])+0 + + label = (label>0)+0 + else: + label = torch.zeros((1,H,W)) + img, label = data_transform(img, label, is_train=False, apply_norm=True) + label = (label>0.5)+0 + + #get image embeddings + img = img.unsqueeze(0).to(args.device) #1XCXHXW + img_embeds = model.get_image_embeddings(img) + + # generate masks for all labels of interest + img_embeds_repeated = img_embeds.repeat(len(labels_of_interest),1,1,1) + x_text = [t for t in labels_of_interest] + masks = model.get_masks_for_multiple_labels(img_embeds_repeated, x_text).cpu() + argmax_masks = torch.argmax(masks, dim=0) + final_mask = torch.zeros(masks[0].shape) + final_mask_rescaled = torch.zeros(masks[0].shape).unsqueeze(-1).repeat(1,1,3) + #save masks + for i in range(final_mask.shape[0]): + for j in range(final_mask.shape[1]): + final_mask[i,j] = codes[argmax_masks[i,j]] if masks[argmax_masks[i,j],i,j]>=0.5 else 0 + # final_mask_rescaled[i,j] = torch.Tensor(visualize_dict[(labels_of_interest[argmax_masks[i,j]])] if masks[argmax_masks[i,j],i,j]>=0.5 else [0,0,0]) + + # save_im = Image.fromarray(final_mask.numpy()) + # save_im.save(os.path.join(args.save_path,'preds', img_name)) + + # plt.imshow(final_mask_rescaled,cmap='gray') + # plt.savefig(os.path.join(args.save_path,'rescaled_preds', img_name)) + # plt.close() + + # print("label shape: ", label.shape) + # plt.imshow(label[0], cmap='gray') + # plt.show() + + plt.imshow((masks[0]>0.5), cmap='gray') + plt.savefig(os.path.join(args.save_path,'rescaled_preds', img_name)) + plt.close() + + if args.gt_path: + plt.imshow((label[0]), cmap='gray') + plt.savefig(os.path.join(args.save_path,'rescaled_gt', img_name)) + plt.close() + + # print("dice: ",dice_coef(label, (masks>0.5)+0)) + dices.append(dice_coef(label, (masks>0.5)+0)) + ious.append(iou_coef(label, (masks>0.5)+0)) + # break + print("Dice: ",torch.mean(torch.Tensor(dices))) + print("IoU: ",torch.mean(torch.Tensor(ious))) + +if __name__ == '__main__': + main() + + +# { +# "Bipolar Forceps": 1, +# "Prograsp Forceps": 2, +# "Large Needle Driver": 3, +# "Vessel Sealer": 4, +# "Grasping Retractor": 5, +# "Monopolar Curved Scissors": 6, +# "Other": 7 +# } + + + diff --git a/AllinonSAM/eval/endovis/generate_predictions_baselines.py b/AllinonSAM/eval/endovis/generate_predictions_baselines.py new file mode 100644 index 0000000000000000000000000000000000000000..82b8cf2e47672d98dde907f10ac1434e64661e0f --- /dev/null +++ b/AllinonSAM/eval/endovis/generate_predictions_baselines.py @@ -0,0 +1,190 @@ +import torch +import yaml +import sys +import copy +import os +sys.path.append("/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/biastuning/") + +from data_utils import * +from model import * +from utils import * +from baselines import UNet, UNext, medt_net +from vit_seg_modeling import VisionTransformer +from vit_seg_modeling import CONFIGS as CONFIGS_ViT_seg +from axialnet import MedT + +label_names = ['Left Prograsp Forceps', 'Maryland Bipolar Forceps', 'Right Prograsp Forceps', 'Left Large Needle Driver', 'Right Large Needle Driver', 'Left Grasping Retractor', 'Right Grasping Retractor', 'Vessel Sealer', 'Monopolar Curved Scissors'] +label_dict = {} +visualize_dict = {} +for i,ln in enumerate(label_names): + label_dict[ln] = i + # visualize_dict[ln] = visualize_li[i] + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument('--data_folder', default='config_tmp.yml', + help='data folder file path') + + parser.add_argument('--data_config', default='config_tmp.yml', + help='data config file path') + + parser.add_argument('--model_config', default='model_baseline.yml', + help='model config file path') + + parser.add_argument('--pretrained_path', default=None, + help='pretrained model path') + + parser.add_argument('--save_path', default='checkpoints/temp.pth', + help='pretrained model path') + + parser.add_argument('--gt_path', default='', + help='ground truth path') + + parser.add_argument('--device', default='cuda:0', help='device to train on') + + parser.add_argument('--codes', default='1,2,1,3,3', help='numeric label to save per instrument') + + args = parser.parse_args() + + return args + +def main(): + args = parse_args() + with open(args.data_config, 'r') as f: + data_config = yaml.load(f, Loader=yaml.FullLoader) + with open(args.model_config, 'r') as f: + model_config = yaml.load(f, Loader=yaml.FullLoader) + codes = args.codes.split(',') + codes = [int(c) for c in codes] + label_dict = { + 'Left Prograsp Forceps': 2, + 'Maryland Bipolar Forceps': 1, + 'Right Prograsp Forceps': 2, + 'Left Large Needle Driver': 3, + 'Right Large Needle Driver': 3, + 'Left Grasping Retractor': 5, + 'Right Grasping Retractor': 5, + 'Vessel Sealer': 4, + 'Monopolar Curved Scissors': 6 + } + + #make folder to save visualizations + os.makedirs(os.path.join(args.save_path,"preds"),exist_ok=True) + os.makedirs(os.path.join(args.save_path,"rescaled_preds"),exist_ok=True) + if args.gt_path: + os.makedirs(os.path.join(args.save_path,"rescaled_gt"),exist_ok=True) + + #load model + #change the img size in model config according to data config + in_channels = model_config['in_channels'] + out_channels = model_config['num_classes'] + img_size = model_config['img_size'] + if model_config['arch']=='Prompt Adapted SAM': + model = Prompt_Adapted_SAM(model_config, label_dict, args.device, training_strategy='biastuning') + elif model_config['arch']=='UNet': + model = UNet(in_channels=in_channels, out_channels=out_channels) + elif model_config['arch']=='UNext': + model = UNext(num_classes=out_channels, input_channels=in_channels, img_size=img_size) + elif model_config['arch']=='MedT': + #TODO + model = MedT(img_size=img_size, num_classes=out_channels) + elif model_config['arch']=='TransUNet': + config_vit = CONFIGS_ViT_seg['R50-ViT-B_16'] + config_vit.n_classes = out_channels + config_vit.n_skip = 3 + # if args.vit_name.find('R50') != -1: + # config_vit.patches.grid = (int(args.img_size / args.vit_patches_size), int(args.img_size / args.vit_patches_size)) + model = VisionTransformer(config_vit, img_size=img_size, num_classes=config_vit.n_classes) + + model.load_state_dict(torch.load(args.pretrained_path, map_location=args.device)) + model = model.to(args.device) + model = model.eval() + + #load data transform + data_transform = ENDOVIS_Transform(config=data_config) + + #dice + dices = [] + ious = [] + + #load data + for i,img_name in enumerate(sorted(os.listdir(args.data_folder))): + # if i%5!=0: + # continue + img_path = (os.path.join(args.data_folder,img_name)) + if args.gt_path: + #for test data, the labels are arranged differently so uncomment the line below + gt_path = (os.path.join(args.gt_path,img_name)) + # gt_path = (os.path.join(args.gt_path,label_name,img_name)) + + # print(img_path) + img = torch.as_tensor(np.array(Image.open(img_path).convert("RGB"))) + img = img.permute(2,0,1) + C,H,W = img.shape + #make a dummy mask of shape 1XHXW + if args.gt_path: + label = torch.as_tensor(np.array(Image.open(gt_path))) + mask = np.zeros((len(label_dict),img.shape[1], img.shape[2])) + for i,c in enumerate(list(label_dict.keys())): + mask[i,:,:] = ((label==label_dict[c])+0) + mask = torch.as_tensor(mask) + + else: + mask = torch.zeros((len(label_dict),H,W)) + img, mask = data_transform(img, mask, is_train=False, apply_norm=True) + mask = (mask>=0.5)+0 + + #get image embeddings + img = img.unsqueeze(0).to(args.device) #1XCXHXW + masks = model(img,'') + + argmax_masks = torch.argmax(masks, dim=1).cpu().numpy() + # print("argmax masks shape: ",argmax_masks.shape) + + classwise_dices = [] + classwise_ious = [] + for j,c1 in enumerate(label_dict): + res = np.where(argmax_masks==j,1,0) + # print("res shape: ",res.shape) + plt.imshow(res[0], cmap='gray') + save_dir = os.path.join(args.save_path, c1, 'rescaled_preds') + os.makedirs(save_dir, exist_ok=True) + plt.savefig(os.path.join(args.save_path, c1, 'rescaled_preds', img_name)) + plt.close() + + if args.gt_path: + plt.imshow((mask[j]), cmap='gray') + save_dir = os.path.join(args.save_path, c1, 'rescaled_gt') + os.makedirs(save_dir, exist_ok=True) + plt.savefig(os.path.join(args.save_path, c1, 'rescaled_gt', img_name)) + plt.close() + + classwise_dices.append(dice_coef(mask[j], torch.Tensor(res[0]))) + classwise_ious.append(iou_coef(mask[j], torch.Tensor(res[0]))) + + # break + dices.append(classwise_dices) + ious.append(classwise_ious) + # print("classwise_dices: ", classwise_dices) + # print("classwise ious: ", classwise_ious) + + print(torch.mean(torch.Tensor(dices),dim=0)) + print(torch.mean(torch.Tensor(ious),dim=0)) + +if __name__ == '__main__': + main() + + +# { +# "Bipolar Forceps": 1, +# "Prograsp Forceps": 2, +# "Large Needle Driver": 3, +# "Vessel Sealer": 4, +# "Grasping Retractor": 5, +# "Monopolar Curved Scissors": 6, +# "Other": 7 +# } + + + diff --git a/AllinonSAM/eval/endovis/model_baseline.yml b/AllinonSAM/eval/endovis/model_baseline.yml new file mode 100644 index 0000000000000000000000000000000000000000..a73f1bef1dcb0f960801226d389eea6e9acb2f9f --- /dev/null +++ b/AllinonSAM/eval/endovis/model_baseline.yml @@ -0,0 +1,17 @@ + +img_size: 256 +num_classes: 9 +in_channels: 3 +img_type: 'image' +arch: "UNet" +use_fdn: False + +training: + optimizer: 'adamw' + lr: 1e-4 + batch_size: 16 + num_epochs: 500 + schedule_step: 2100 + schedule_step_factor: 0.5 + weight_decay: 1e-2 + loss: 'focal' \ No newline at end of file diff --git a/AllinonSAM/eval/endovis/random_patch_preds.py b/AllinonSAM/eval/endovis/random_patch_preds.py new file mode 100644 index 0000000000000000000000000000000000000000..662f6bbb1c265c6ca4f77ae186dce7b693eb9bda --- /dev/null +++ b/AllinonSAM/eval/endovis/random_patch_preds.py @@ -0,0 +1,163 @@ +import numpy as np + +PATCH_SIZE = 256 # Size of the patches +OVERLAP = 32 # Amount of overlap between patches + +def split_image_into_patches(image): + height, width, _ = image.shape + patches = [] + + for y in range(0, height-PATCH_SIZE+1, PATCH_SIZE-OVERLAP): + for x in range(0, width-PATCH_SIZE+1, PATCH_SIZE-OVERLAP): + patch = (y,x,image[y:y+PATCH_SIZE, x:x+PATCH_SIZE]) + patches.append(patch) + + return patches + +def stitch_patches_to_image(patches, image_shape): + stitched_image = np.zeros(image_shape) + overlap_mask = np.zeros(image_shape[:2])+1e-10 + + for patch in patches: + y, x, p = patch + try: + # Add the patch to the stitched image + stitched_image[y:y+PATCH_SIZE, x:x+PATCH_SIZE] += p + overlap_mask[y:y+PATCH_SIZE, x:x+PATCH_SIZE] += 1 + except: + print(p.shape) + print(y,x) + print(image_shape) + 1/0 + + # Normalize the stitched image by dividing with the overlap count + stitched_image = ((stitched_image/overlap_mask)>0.5)+0 + + return stitched_image.astype(np.uint8) + +import torch +import yaml +import sys +import copy +import os +sys.path.append("/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/biastuning/") + +from data_utils import * +from model import * +from utils import * + +label_names = ['Left Prograsp Forceps', 'Maryland Bipolar Forceps', 'Right Prograsp Forceps', 'Left Large Needle Driver', 'Right Large Needle Driver'] +visualize_li = [[1,0,0],[0,1,0],[1,0,0], [0,0,1], [0,0,1]] +label_dict = {} +visualize_dict = {} +for i,ln in enumerate(label_names): + label_dict[ln] = i + visualize_dict[ln] = visualize_li[i] + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument('--data_folder', default='config_tmp.yml', + help='data folder file path') + + parser.add_argument('--data_config', default='config_tmp.yml', + help='data config file path') + + parser.add_argument('--model_config', default='model_baseline.yml', + help='model config file path') + + parser.add_argument('--pretrained_path', default=None, + help='pretrained model path') + + parser.add_argument('--save_path', default='checkpoints/temp.pth', + help='pretrained model path') + + parser.add_argument('--device', default='cuda:0', help='device to train on') + + parser.add_argument('--labels_of_interest', default='Left Prograsp Forceps,Maryland Bipolar Forceps,Right Prograsp Forceps,Left Large Needle Driver,Right Large Needle Driver', help='labels of interest') + + parser.add_argument('--codes', default='1,2,1,3,3', help='numeric label to save per instrument') + + args = parser.parse_args() + + return args + +def main(): + args = parse_args() + with open(args.data_config, 'r') as f: + data_config = yaml.load(f, Loader=yaml.FullLoader) + with open(args.model_config, 'r') as f: + model_config = yaml.load(f, Loader=yaml.FullLoader) + labels_of_interest = args.labels_of_interest.split(',') + codes = args.codes.split(',') + codes = [int(c) for c in codes] + + + #make folder to save visualizations + os.makedirs(os.path.join(args.save_path,"preds"),exist_ok=True) + os.makedirs(os.path.join(args.save_path,"rescaled_preds"),exist_ok=True) + + #load model + model = Prompt_Adapted_SAM(config=model_config, label_text_dict=label_dict, device=args.device) + model.load_state_dict(torch.load(args.pretrained_path, map_location=args.device)) + model = model.eval() + model = model.to(args.device) + + #load data transform + data_transform = ENDOVIS_Transform(config=data_config) + + #load data + for img_name in sorted(os.listdir(args.data_folder)): + img_path = (os.path.join(args.data_folder,img_name)) + # print(img_path) + original_img = torch.as_tensor(np.array(Image.open(img_path).convert("RGB"))) + patches = split_image_into_patches(original_img) + patch_masks = [] + + for y,x,p in patches: + img = p.permute(2,0,1) + #make a dummy mask of shape 1XHXW + label = torch.zeros(img.shape)[0].unsqueeze(0) + img, _ = data_transform(img, label, is_train=False, apply_norm=True, crop=False, resize=False) + + #get image embeddings + img = img.unsqueeze(0).to(args.device) #1XCXHXW + img_embeds = model.get_image_embeddings(img) + + # generate masks for all labels of interest + img_embeds_repeated = img_embeds.repeat(len(labels_of_interest),1,1,1) + x_text = [t for t in labels_of_interest] + masks = model.get_masks_for_multiple_labels(img_embeds_repeated, x_text).cpu() + + #for now, only handle one class at a time + masks, max_idxs = torch.max(masks,dim=0) + patch_masks.append((y,x,masks.numpy())) + + # argmax_masks = torch.argmax(masks, dim=0) + # final_mask = torch.zeros(masks[0].shape) + # final_mask_rescaled = torch.zeros(masks[0].shape).unsqueeze(-1).repeat(1,1,3) + #save masks + # for i in range(final_mask.shape[0]): + # for j in range(final_mask.shape[1]): + # final_mask[i,j] = codes[argmax_masks[i,j]] if masks[argmax_masks[i,j],i,j]>=0.5 else 0 + # final_mask_rescaled[i,j] = torch.Tensor(visualize_dict[(labels_of_interest[argmax_masks[i,j]])] if masks[argmax_masks[i,j],i,j]>=0.5 else [0,0,0]) + + #stitch masks + print("original shape: ", original_img.shape) + final_mask = stitch_patches_to_image(patch_masks, original_img.shape[:2]) + print("final mask shape: ",final_mask.shape) + save_im = Image.fromarray(final_mask) + save_im.save(os.path.join(args.save_path,'preds', img_name)) + + # plt.imshow(final_mask_rescaled,cmap='gray') + # plt.savefig(os.path.join(args.save_path,'rescaled_preds', img_name)) + # plt.close() + break + +if __name__ == '__main__': + main() + + + + + diff --git a/AllinonSAM/eval/endovis/testing_results_dice.py b/AllinonSAM/eval/endovis/testing_results_dice.py new file mode 100644 index 0000000000000000000000000000000000000000..8ef35293a510229a6106712de84e77c1cd0f8dab --- /dev/null +++ b/AllinonSAM/eval/endovis/testing_results_dice.py @@ -0,0 +1,61 @@ +import os +from PIL import Image +import sys +from matplotlib import pyplot as plt +import torch + +sys.path.append("/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/SVDSAM/") +from utils import * + +# test_path = "/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/SVDSAM/eval/endovis/svdshiftscale_ev17_tal_focal075_alpha2_1e-3" +test_path = "endovis17_lora16" + +#when not differentiating between the forceps, add mbp to the first tuple +# instruments = [('lgr','rgr'),('llnd','rlnd'),('lpf','rpf')] +instruments = [('Left Grasping Retractor','Right Grasping Retractor'),('Left Large Needle Driver','Right Large Needle Driver'),('Left Prograsp Forceps','Right Prograsp Forceps')] + +for dataset in sorted(os.listdir(test_path)): + for instrument in instruments: + dices = [] + ious = [] + if len(instrument)==3: + gt_path1 = os.path.join(test_path, dataset,instrument[0],'rescaled_gt') + gt_path2 = os.path.join(test_path, dataset,instrument[2],'rescaled_gt') + extra_preds_path = os.path.join(test_path, dataset,instrument[2],'rescaled_preds') + else: + gt_path = os.path.join(test_path, dataset,instrument[0],'rescaled_gt') + left_preds_path = os.path.join(test_path, dataset,instrument[0],'rescaled_preds') + right_preds_path = os.path.join(test_path, dataset,instrument[1],'rescaled_preds') + for frame in sorted(os.listdir(left_preds_path)): + if len(instrument)==3: + gold1 = ((plt.imread(os.path.join(gt_path1,frame))[:,:,0][58:-52,143:-126])>=0.5)+0 + gold2 = ((plt.imread(os.path.join(gt_path2,frame))[:,:,0][58:-52,143:-126])>=0.5)+0 + extra_pred = ((plt.imread(os.path.join(extra_preds_path, frame))[:,:,0][58:-52,143:-126])>=0.5) + gold = (gold1 | gold2)+0 + else: + gold = ((plt.imread(os.path.join(gt_path,frame))[:,:,0][58:-52,143:-126])>=0.5)+0 + left_pred = ((plt.imread(os.path.join(left_preds_path, frame))[:,:,0][58:-52,143:-126])>=0.5) + right_pred = ((plt.imread(os.path.join(right_preds_path, frame))[:,:,0][58:-52,143:-126])>=0.5) + + pred = (left_pred | right_pred) + if len(instrument)==3: + pred = (pred | extra_pred) + pred = pred + 0 + gold = torch.Tensor(gold).unsqueeze(0) + pred = torch.Tensor(pred).unsqueeze(0) + dices.append(dice_coef(gold, pred)) + ious.append(iou_coef(gold, pred)) + + + # if instrument==('lpf','rpf') and dataset=='instrument_2': + # print(dices) + # print(os.path.join(left_preds_path, frame)) + # plt.imshow(plt.imread(os.path.join(left_preds_path, frame)),cmap='gray') + # plt.imshow(pred[0],'gray') + # plt.show() + # plt.imshow(gold[0],cmap='gray') + # plt.show() + # 1/0 + + print(f"Dataset: {dataset}, instrument: {instrument}, dice: {torch.mean(torch.Tensor(dices))}, iou: {torch.mean(torch.Tensor(ious))}") + print('\n') \ No newline at end of file diff --git a/AllinonSAM/eval/endovis18/calculate_ious.py b/AllinonSAM/eval/endovis18/calculate_ious.py new file mode 100644 index 0000000000000000000000000000000000000000..e78f4f11b8842bbce9772573aa2b8918e46afcda --- /dev/null +++ b/AllinonSAM/eval/endovis18/calculate_ious.py @@ -0,0 +1,33 @@ +import os +import torch +import PIL.Image as Image +import matplotlib.pyplot as plt +import sys + +sys.path.append("/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/biastuning/") + +from utils import * + +results_folder_name = 'endovis18_10label_textaffine_decdertuning_4e-4_adamw_focal_alpha75e-2_gamma_2_256_bs64_rsz_manyaug_blanklables' + +ious_all = {} +for object in os.listdir(results_folder_name): + ious = [] + print("Starting object: ", object) + preds_path = os.path.join(results_folder_name, object, 'rescaled_preds') + gt_path = os.path.join(results_folder_name, object, 'rescaled_gt') + for i,im in enumerate(os.listdir(gt_path)): + if i<13: + continue + label = np.array(Image.open(os.path.join(gt_path,im)))[60:306,150:400] + label = (label>127)+0 + pred = np.array(Image.open(os.path.join(preds_path,im)))[60:306, 150:400] + pred = (pred>127) + 0 + plt.imshow(label) + plt.show() + plt.imshow(label) + plt.show() + print(label.shape) + print(pred.shape) + print(np.unique(pred)) + 1/0 \ No newline at end of file diff --git a/AllinonSAM/eval/endovis18/config_endovis18_test.yml b/AllinonSAM/eval/endovis18/config_endovis18_test.yml new file mode 100644 index 0000000000000000000000000000000000000000..fe4bb476abee9c158c3aaac4c5158cb0d45d45a7 --- /dev/null +++ b/AllinonSAM/eval/endovis18/config_endovis18_test.yml @@ -0,0 +1,19 @@ +data_transforms: + a_min: 0 + a_max: 255 + img_size: 256 + use_random_crop: False + use_rotation: False + rotation_angle: 10 + use_saturation: False + saturation: 2 + use_brightness: False + brightness: 2 + use_horizontal_flip: False +data: + name: ENDOVIS + root_path: '/media/ubuntu/New Volume/jay/endovis17/instrument_1_4_testing/' + label_list: [1,2,3,4,5,6,7,8,9,10] + label_names: ['background tissue', 'surgical instrument', 'kidney parenchyma', 'covered kidney', 'thread', 'clamps', 'suturing needle', 'suction instrument', 'small intestine','ultrasound probe'] + volume_channel: 2 + diff --git a/AllinonSAM/eval/endovis18/generate_all_baseline_results.sh b/AllinonSAM/eval/endovis18/generate_all_baseline_results.sh new file mode 100644 index 0000000000000000000000000000000000000000..bb474971428407ad506704617eec8dd36b131577 --- /dev/null +++ b/AllinonSAM/eval/endovis18/generate_all_baseline_results.sh @@ -0,0 +1,18 @@ +python generate_predictions_baselines.py --data_folder "/media/ubuntu/New Volume/jay/endovis18/seq_1-20230624T000458Z-001/seq_1/left_frames" --data_config config_endovis18_test.yml --model_config model_baseline.yml --save_path "./unet_results/seq1" --pretrained_path ../../unet_ev18.pth --gt_path "/media/ubuntu/New Volume/jay/endovis18/seq_1-20230624T000458Z-001/seq_1/labels" + +echo "......................." + +python generate_predictions_baselines.py --data_folder "/media/ubuntu/New Volume/jay/endovis18/seq_2-20230624T000507Z-001/seq_2/left_frames" --data_config config_endovis18_test.yml --model_config model_baseline.yml --save_path "./unet_results/seq2" --pretrained_path ../../unet_ev18.pth --gt_path "/media/ubuntu/New Volume/jay/endovis18/seq_2-20230624T000507Z-001/seq_2/labels" + + +echo "......................." + +python generate_predictions_baselines.py --data_folder "/media/ubuntu/New Volume/jay/endovis18/seq_3-20230624T000508Z-001/seq_3/left_frames" --data_config config_endovis18_test.yml --model_config model_baseline.yml --save_path "./unet_results/seq3" --pretrained_path ../../unet_ev18.pth --gt_path "/media/ubuntu/New Volume/jay/endovis18/seq_3-20230624T000508Z-001/seq_3/labels" + + +echo "......................." + +python generate_predictions_baselines.py --data_folder "/media/ubuntu/New Volume/jay/endovis18/seq_4-20230624T000509Z-001/seq_4/left_frames" --data_config config_endovis18_test.yml --model_config model_baseline.yml --save_path "./unet_results/seq4" --pretrained_path ../../unet_ev18.pth --gt_path "/media/ubuntu/New Volume/jay/endovis18/seq_4-20230624T000509Z-001/seq_4/labels" + + +echo "......................." \ No newline at end of file diff --git a/AllinonSAM/eval/endovis18/generate_all_results.sh b/AllinonSAM/eval/endovis18/generate_all_results.sh new file mode 100644 index 0000000000000000000000000000000000000000..d4567c47c2a2d81784ede89e0822e29e2f2a3290 --- /dev/null +++ b/AllinonSAM/eval/endovis18/generate_all_results.sh @@ -0,0 +1,24 @@ +declare -a StringArray=("background tissue" "surgical instrument" "kidney parenchyma" "covered kidney" "thread" "clamps" "suturing needle" "suction instrument" "small intestine" "ultrasound probe") + +for label in "${StringArray[@]}"; do + echo "${label}" + + python generate_predictions.py --data_folder "/media/ubuntu/New Volume/jay/endovis18/seq_1-20230624T000458Z-001/seq_1/left_frames" --data_config config_endovis18_test.yml --model_config model_svdtuning.yml --save_path "./svdshiftscale_ev18_bs16_tal_focal075alpha2_1e-4_epoch150/seq1/${label}" --gt_path "/media/ubuntu/New Volume/jay/endovis18/seq_1-20230624T000458Z-001/seq_1/labels" --labels_of_interest "${label}" --pretrained_path "svdshiftscale_ev18_bs16_tal_focal075alpha2_1e-4_epoch150.pth" + + echo "......................." + + python generate_predictions.py --data_folder "/media/ubuntu/New Volume/jay/endovis18/seq_2-20230624T000507Z-001/seq_2/left_frames" --data_config config_endovis18_test.yml --model_config model_svdtuning.yml --save_path "./svdshiftscale_ev18_bs16_tal_focal075alpha2_1e-4_epoch150/seq2/${label}" --gt_path "/media/ubuntu/New Volume/jay/endovis18/seq_2-20230624T000507Z-001/seq_2/labels" --labels_of_interest "${label}" --pretrained_path "svdshiftscale_ev18_bs16_tal_focal075alpha2_1e-4_epoch150.pth" + + + echo "......................." + + python generate_predictions.py --data_folder "/media/ubuntu/New Volume/jay/endovis18/seq_3-20230624T000508Z-001/seq_3/left_frames" --data_config config_endovis18_test.yml --model_config model_svdtuning.yml --save_path "./svdshiftscale_ev18_bs16_tal_focal075alpha2_1e-4_epoch150/seq3/${label}" --gt_path "/media/ubuntu/New Volume/jay/endovis18/seq_3-20230624T000508Z-001/seq_3/labels" --labels_of_interest "${label}" --pretrained_path "svdshiftscale_ev18_bs16_tal_focal075alpha2_1e-4_epoch150.pth" + + + echo "......................." + + python generate_predictions.py --data_folder "/media/ubuntu/New Volume/jay/endovis18/seq_4-20230624T000509Z-001/seq_4/left_frames" --data_config config_endovis18_test.yml --model_config model_svdtuning.yml --save_path "./svdshiftscale_ev18_bs16_tal_focal075alpha2_1e-4_epoch150/seq4/${label}" --gt_path "/media/ubuntu/New Volume/jay/endovis18/seq_4-20230624T000509Z-001/seq_4/labels" --labels_of_interest "${label}" --pretrained_path "svdshiftscale_ev18_bs16_tal_focal075alpha2_1e-4_epoch150.pth" + + + echo "......................." +done \ No newline at end of file diff --git a/AllinonSAM/eval/endovis18/generate_predictions.py b/AllinonSAM/eval/endovis18/generate_predictions.py new file mode 100644 index 0000000000000000000000000000000000000000..a73a559ba95bfccdcf2e2e6b5802736d87ca925d --- /dev/null +++ b/AllinonSAM/eval/endovis18/generate_predictions.py @@ -0,0 +1,193 @@ +import torch +import yaml +import sys +import copy +import os +sys.path.append("/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/SVDSAM/") + +from data_utils import * +from model import * +from utils import * + +label_names = ['background tissue', 'surgical instrument', 'kidney parenchyma', 'covered kidney', 'thread', 'clamps', 'suturing needle', 'suction instrument', 'small intestine','ultrasound probe'] +# visualize_li = [[1,0,0],[0,1,0],[1,0,0], [0,0,1], [0,0,1]] +label_dict = {} +# visualize_dict = {} +for i,ln in enumerate(label_names): + label_dict[ln] = i + # visualize_dict[ln] = visualize_li[i] + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument('--data_folder', default='config_tmp.yml', + help='data folder file path') + + parser.add_argument('--data_config', default='config_tmp.yml', + help='data config file path') + + parser.add_argument('--model_config', default='model_baseline.yml', + help='model config file path') + + parser.add_argument('--pretrained_path', default=None, + help='pretrained model path') + + parser.add_argument('--save_path', default='checkpoints/temp.pth', + help='pretrained model path') + + parser.add_argument('--gt_path', default='', + help='ground truth path') + + parser.add_argument('--device', default='cuda:0', help='device to train on') + + parser.add_argument('--labels_of_interest', default='Left Prograsp Forceps,Maryland Bipolar Forceps,Right Prograsp Forceps,Left Large Needle Driver,Right Large Needle Driver', help='labels of interest') + + parser.add_argument('--codes', default='1,2,1,3,3', help='numeric label to save per instrument') + + args = parser.parse_args() + + return args + +def main(): + args = parse_args() + with open(args.data_config, 'r') as f: + data_config = yaml.load(f, Loader=yaml.FullLoader) + with open(args.model_config, 'r') as f: + model_config = yaml.load(f, Loader=yaml.FullLoader) + labels_of_interest = args.labels_of_interest.split(',') + codes = args.codes.split(',') + codes = [int(c) for c in codes] + + label_dict = { + 'background tissue': [[0,0,0]], + 'surgical instrument': [[0,255,0],[0,255,255],[125,255,12]], + 'kidney parenchyma': [[255,55,0]], + 'covered kidney': [[24,55,125]], + 'thread': [[187,155,25]], + 'clamps': [[0,255,125]], + 'suturing needle': [[255,255,125]], + 'suction instrument': [[123,15,175]], + 'small intestine': [[124,155,5]], + 'ultrasound probe': [[12,255,141]] + } + + + #make folder to save visualizations + os.makedirs(os.path.join(args.save_path,"preds"),exist_ok=True) + os.makedirs(os.path.join(args.save_path,"rescaled_preds"),exist_ok=True) + if args.gt_path: + os.makedirs(os.path.join(args.save_path,"rescaled_gt"),exist_ok=True) + + #load model + model = Prompt_Adapted_SAM(config=model_config, label_text_dict=label_dict, device=args.device, training_strategy='svdtuning') + # model = Prompt_Adapted_SAM(config=model_config, label_text_dict=label_dict, device=args.device, training_strategy='lora') + + #legacy model support + sdict = torch.load(args.pretrained_path, map_location=args.device) + # for key in list(sdict.keys()): + # if 'sam_encoder.neck' in key: + # if '0' in key: + # new_key = key.replace('0','conv1') + # if '1' in key: + # new_key = key.replace('1','ln1') + # if '2' in key: + # new_key = key.replace('2','conv2') + # if '3' in key: + # new_key = key.replace('3','ln2') + # sdict[new_key] = sdict[key] + # _ = sdict.pop(key) + # if 'mask_decoder' in key: + # if 'trainable' in key: + # _ = sdict.pop(key) + + model.load_state_dict(sdict,strict=True) + model = model.to(args.device) + model = model.eval() + + #load data transform + data_transform = ENDOVIS_18_Transform(config=data_config) + + #dice + dices = [] + ious=[] + + #load data + for i,img_name in enumerate(sorted(os.listdir(args.data_folder))): + # if i%5!=0: + # continue + img_path = (os.path.join(args.data_folder,img_name)) + if args.gt_path: + gt_path = (os.path.join(args.gt_path,img_name)) + + # print(img_path) + img = torch.as_tensor(np.array(Image.open(img_path).convert("RGB"))) + img = img.permute(2,0,1) + C,H,W = img.shape + #make a dummy mask of shape 1XHXW + if args.gt_path: + label = np.array(Image.open(gt_path).convert("RGB")) + temp = np.zeros((H,W)).astype('uint8') + selected_color_list = label_dict[args.labels_of_interest] + for c in selected_color_list: + temp = temp | (np.all(np.where(label==c,1,0),axis=2)) + + # plt.imshow(gold) + # plt.show() + mask = torch.Tensor(temp).unsqueeze(0) + + else: + mask = torch.zeros((1,H,W)) + img, mask = data_transform(img, mask, is_train=False, apply_norm=True) + mask = (mask>=0.5)+0 + + #get image embeddings + img = img.unsqueeze(0).to(args.device) #1XCXHXW + img_embeds = model.get_image_embeddings(img) + + # generate masks for all labels of interest + img_embeds_repeated = img_embeds.repeat(len(labels_of_interest),1,1,1) + x_text = [t for t in labels_of_interest] + masks = model.get_masks_for_multiple_labels(img_embeds_repeated, x_text).cpu() + argmax_masks = torch.argmax(masks, dim=0) + final_mask = torch.zeros(masks[0].shape) + final_mask_rescaled = torch.zeros(masks[0].shape).unsqueeze(-1).repeat(1,1,3) + #save masks + for i in range(final_mask.shape[0]): + for j in range(final_mask.shape[1]): + final_mask[i,j] = codes[argmax_masks[i,j]] if masks[argmax_masks[i,j],i,j]>=0.5 else 0 + # final_mask_rescaled[i,j] = torch.Tensor(visualize_dict[(labels_of_interest[argmax_masks[i,j]])] if masks[argmax_masks[i,j],i,j]>=0.5 else [0,0,0]) + + # save_im = Image.fromarray(final_mask.numpy()) + # save_im.save(os.path.join(args.save_path,'preds', img_name)) + + # plt.imshow(final_mask_rescaled,cmap='gray') + # plt.savefig(os.path.join(args.save_path,'rescaled_preds', img_name)) + # plt.close() + + # print("label shape: ", label.shape) + # plt.imshow(label[0], cmap='gray') + # plt.show() + + plt.imshow((masks[0]>=0.5), cmap='gray') + plt.savefig(os.path.join(args.save_path,'rescaled_preds', img_name)) + plt.close() + + if args.gt_path: + plt.imshow((mask[0]), cmap='gray') + plt.savefig(os.path.join(args.save_path,'rescaled_gt', img_name)) + plt.close() + + # print("dice: ",dice_coef(label, (masks>0.5)+0)) + dices.append(dice_coef(mask, (masks>=0.5)+0)) + ious.append(iou_coef(mask, (masks>=0.5)+0)) + # break + print(torch.mean(torch.Tensor(dices))) + print(torch.mean(torch.Tensor(ious))) + +if __name__ == '__main__': + main() + + + + + diff --git a/AllinonSAM/eval/endovis18/generate_predictions_baselines.py b/AllinonSAM/eval/endovis18/generate_predictions_baselines.py new file mode 100644 index 0000000000000000000000000000000000000000..97b8c8db8f1782ed1e1f2513a88a27859eabb6c6 --- /dev/null +++ b/AllinonSAM/eval/endovis18/generate_predictions_baselines.py @@ -0,0 +1,187 @@ +import torch +import yaml +import sys +import copy +import os +sys.path.append("/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/biastuning/") + +from data_utils import * +from model import * +from utils import * +from baselines import UNet, UNext, medt_net +from vit_seg_modeling import VisionTransformer +from vit_seg_modeling import CONFIGS as CONFIGS_ViT_seg +from axialnet import MedT + +label_names = ['background tissue', 'surgical instrument', 'kidney parenchyma', 'covered kidney', 'thread', 'clamps', 'suturing needle', 'suction instrument', 'small intestine','ultrasound probe'] +# visualize_li = [[1,0,0],[0,1,0],[1,0,0], [0,0,1], [0,0,1]] +label_dict = {} +# visualize_dict = {} +for i,ln in enumerate(label_names): + label_dict[ln] = i + # visualize_dict[ln] = visualize_li[i] + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument('--data_folder', default='config_tmp.yml', + help='data folder file path') + + parser.add_argument('--data_config', default='config_tmp.yml', + help='data config file path') + + parser.add_argument('--model_config', default='model_baseline.yml', + help='model config file path') + + parser.add_argument('--pretrained_path', default=None, + help='pretrained model path') + + parser.add_argument('--save_path', default='checkpoints/temp.pth', + help='pretrained model path') + + parser.add_argument('--gt_path', default='', + help='ground truth path') + + parser.add_argument('--device', default='cuda:0', help='device to train on') + + + parser.add_argument('--codes', default='1,2,1,3,3', help='numeric label to save per instrument') + + args = parser.parse_args() + + return args + +def main(): + args = parse_args() + with open(args.data_config, 'r') as f: + data_config = yaml.load(f, Loader=yaml.FullLoader) + with open(args.model_config, 'r') as f: + model_config = yaml.load(f, Loader=yaml.FullLoader) + codes = args.codes.split(',') + codes = [int(c) for c in codes] + + label_dict = { + 'background tissue': [[0,0,0]], + 'surgical instrument': [[0,255,0],[0,255,255],[125,255,12]], + 'kidney parenchyma': [[255,55,0]], + 'covered kidney': [[24,55,125]], + 'thread': [[187,155,25]], + 'clamps': [[0,255,125]], + 'suturing needle': [[255,255,125]], + 'suction instrument': [[123,15,175]], + 'small intestine': [[124,155,5]], + 'ultrasound probe': [[12,255,141]] + } + + + #make folder to save visualizations + os.makedirs(os.path.join(args.save_path,"preds"),exist_ok=True) + os.makedirs(os.path.join(args.save_path,"rescaled_preds"),exist_ok=True) + if args.gt_path: + os.makedirs(os.path.join(args.save_path,"rescaled_gt"),exist_ok=True) + + #load model + #change the img size in model config according to data config + in_channels = model_config['in_channels'] + out_channels = model_config['num_classes'] + img_size = model_config['img_size'] + if model_config['arch']=='Prompt Adapted SAM': + model = Prompt_Adapted_SAM(model_config, label_dict, args.device, training_strategy='biastuning') + elif model_config['arch']=='UNet': + model = UNet(in_channels=in_channels, out_channels=out_channels) + elif model_config['arch']=='UNext': + model = UNext(num_classes=out_channels, input_channels=in_channels, img_size=img_size) + elif model_config['arch']=='MedT': + #TODO + model = MedT(img_size=img_size, num_classes=out_channels) + elif model_config['arch']=='TransUNet': + config_vit = CONFIGS_ViT_seg['R50-ViT-B_16'] + config_vit.n_classes = out_channels + config_vit.n_skip = 3 + # if args.vit_name.find('R50') != -1: + # config_vit.patches.grid = (int(args.img_size / args.vit_patches_size), int(args.img_size / args.vit_patches_size)) + model = VisionTransformer(config_vit, img_size=img_size, num_classes=config_vit.n_classes) + + model.load_state_dict(torch.load(args.pretrained_path, map_location=args.device)) + model = model.to(args.device) + model = model.eval() + + #load data transform + data_transform = Cholec_8k_Transform(config=data_config) + + #dice + dices = [] + ious=[] + + #load data + for i,img_name in enumerate(sorted(os.listdir(args.data_folder))): + # if i%5!=0: + # continue + img_path = (os.path.join(args.data_folder,img_name)) + if args.gt_path: + gt_path = (os.path.join(args.gt_path,img_name)) + + # print(img_path) + img = torch.as_tensor(np.array(Image.open(img_path).convert("RGB"))) + img = img.permute(2,0,1) + C,H,W = img.shape + #make a dummy mask of shape 1XHXW + if args.gt_path: + label = np.array(Image.open(gt_path).convert("RGB")) + mask = np.zeros((len(label_dict),img.shape[1], img.shape[2])) + for i,c in enumerate(list(label_dict.keys())): + temp = np.zeros((H,W)).astype('uint8') + selected_color_list = label_dict[c] + for c in selected_color_list: + temp = temp | (np.all(np.where(label==c,1,0),axis=2)) + mask[i,:,:] = temp + mask = torch.Tensor(mask) + + else: + mask = torch.zeros((1,H,W)) + img, mask = data_transform(img, mask, is_train=False, apply_norm=True) + mask = (mask>=0.5)+0 + + img = img.unsqueeze(0).to(args.device) #1XCXHXW + masks = model(img,'') + + argmax_masks = torch.argmax(masks, dim=1).cpu().numpy() + + classwise_dices = [] + classwise_ious = [] + for j,c1 in enumerate(label_dict): + res = np.where(argmax_masks==j,1,0) + # print("res shape: ",res.shape) + plt.imshow(res[0], cmap='gray') + save_dir = os.path.join(args.save_path, c1, 'rescaled_preds') + os.makedirs(save_dir, exist_ok=True) + plt.savefig(os.path.join(args.save_path, c1, 'rescaled_preds', img_name)) + plt.close() + + if args.gt_path: + plt.imshow((mask[j]), cmap='gray') + save_dir = os.path.join(args.save_path, c1, 'rescaled_gt') + os.makedirs(save_dir, exist_ok=True) + plt.savefig(os.path.join(args.save_path, c1, 'rescaled_gt', img_name)) + plt.close() + + classwise_dices.append(dice_coef(mask[j], torch.Tensor(res[0]))) + classwise_ious.append(iou_coef(mask[j], torch.Tensor(res[0]))) + + # break + dices.append(classwise_dices) + ious.append(classwise_ious) + # print("classwise_dices: ", classwise_dices) + # print("classwise ious: ", classwise_ious) + + print(torch.mean(torch.Tensor(dices),dim=0)) + print(torch.mean(torch.Tensor(ious),dim=0)) + + +if __name__ == '__main__': + main() + + + + + diff --git a/AllinonSAM/eval/endovis18/model_baseline.yml b/AllinonSAM/eval/endovis18/model_baseline.yml new file mode 100644 index 0000000000000000000000000000000000000000..5d1e5b7b83e615c2d89d5185cd737ad138f7f507 --- /dev/null +++ b/AllinonSAM/eval/endovis18/model_baseline.yml @@ -0,0 +1,17 @@ + +img_size: 256 +num_classes: 10 +in_channels: 3 +img_type: 'image' +arch: "UNet" +use_fdn: False + +training: + optimizer: 'adamw' + lr: 1e-4 + batch_size: 16 + num_epochs: 500 + schedule_step: 2100 + schedule_step_factor: 0.5 + weight_decay: 1e-2 + loss: 'focal' \ No newline at end of file diff --git a/AllinonSAM/eval/endovis18/model_svdtuning.yml b/AllinonSAM/eval/endovis18/model_svdtuning.yml new file mode 100644 index 0000000000000000000000000000000000000000..3630f783a696b14c74529c26c5f96662084dfbb2 --- /dev/null +++ b/AllinonSAM/eval/endovis18/model_svdtuning.yml @@ -0,0 +1,30 @@ +sam: + img_size: 256 + num_classes: 10 + +img_type: 'image' +arch: "Prompt Adapted SAM" +use_fdn: False +decoder_training: 'none' +mlp_transform: False + +prompts: + USE_TEXT_PROMPT: True + USE_IMAGE_PROMPT: False + USE_SLICE_NUM: False + LOCATION: 'prepend' + DROPOUT: 0 + NUM_TOKENS: 5 + +training: + optimizer: 'adamw' + lr: 1e-2 + batch_size: 32 + num_epochs: 1000 + schedule_step: 100 + schedule_step_factor: 0.5 + weight_decay: 1e-2 + loss: 'focal' + reg_multiplier: 0 + +use_lora: False \ No newline at end of file diff --git a/AllinonSAM/eval/glas/config_glas.yml b/AllinonSAM/eval/glas/config_glas.yml new file mode 100644 index 0000000000000000000000000000000000000000..1c0a89c1a284aef4571b0fb878ca27bae7a6b1bd --- /dev/null +++ b/AllinonSAM/eval/glas/config_glas.yml @@ -0,0 +1,21 @@ +data_transforms: + a_min: 0 + a_max: 255 + img_size: 1024 + use_random_crop: False + use_rotation: True + rotation_angle: 10 + use_saturation: True + saturation: 2 + use_brightness: True + brightness: 2 + use_horizontal_flip: True + use_cjitter: False + use_affine: False +data: + name: GLAS + root_path: '/media/ubuntu/New Volume/jay/GLAS/archive' + label_list: [1] + label_names: ['Glands'] + volume_channel: 2 + negative_to_positive_ratio: -1 diff --git a/AllinonSAM/eval/glas/generate_all_results.sh b/AllinonSAM/eval/glas/generate_all_results.sh new file mode 100644 index 0000000000000000000000000000000000000000..5ead188a0cd7c6225731217090371a9fc6f5914e --- /dev/null +++ b/AllinonSAM/eval/glas/generate_all_results.sh @@ -0,0 +1,9 @@ + +echo "Testing Accuracy: " + +python generate_predictions.py --data_folder "/media/ubuntu/New Volume/jay/GLAS/archive/test" --data_config config_glas.yml --model_config model_svdtuning.yml --save_path "./lorasam" --gt_path "/media/ubuntu/New Volume/jay/PolypDataset/TestDataset/TestDataset/test/masks" --labels_of_interest "Glands" --pretrained_path "lora_glas_1024.pth" + +echo "......................." + +# echo "Training Accuracy: " +# python generate_predictions.py --data_folder "/media/ubuntu/New Volume/jay/GLAS/archive/train" --data_config config_glas.yml --model_config model_svdtuning.yml --save_path "./tmp" --gt_path "/media/ubuntu/New Volume/jay/GLAS/archive" --labels_of_interest "Glands" --pretrained_path "svdtuning_shiftscale_glas_tal_1repeat_CE_dice_1e-3_sz_1024.pth" diff --git a/AllinonSAM/eval/glas/generate_all_results_pointsam.sh b/AllinonSAM/eval/glas/generate_all_results_pointsam.sh new file mode 100644 index 0000000000000000000000000000000000000000..06d26e5bf1ccdd1de70077cd73fd1cab0b3cd06d --- /dev/null +++ b/AllinonSAM/eval/glas/generate_all_results_pointsam.sh @@ -0,0 +1,4 @@ + +echo "Testing Accuracy: " + +python generate_predictions_pointsam.py --data_folder "/media/ubuntu/New Volume/jay/GLAS/archive/test" --data_config config_glas.yml --model_config model_svdtuning.yml --save_path "./sam_point_glas" --gt_path "/media/ubuntu/New Volume/jay/PolypDataset/TestDataset/TestDataset/test/masks" --labels_of_interest "Glands" diff --git a/AllinonSAM/eval/glas/generate_predictions.py b/AllinonSAM/eval/glas/generate_predictions.py new file mode 100644 index 0000000000000000000000000000000000000000..92a79b44e7b5ab002c0dca33b13ccfc898d821cc --- /dev/null +++ b/AllinonSAM/eval/glas/generate_predictions.py @@ -0,0 +1,161 @@ +import torch +import yaml +import sys +import copy +import os +sys.path.append("/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/SVDSAM/") + +from data_utils import * +from model import * +from utils import * +from data_transforms.glas_transform import GLAS_Transform + +label_names = ['Glands'] +label_dict = {} +# visualize_dict = {} +for i,ln in enumerate(label_names): + label_dict[ln] = i + # visualize_dict[ln] = visualize_li[i] + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument('--data_folder', default='config_tmp.yml', + help='data folder file path') + + parser.add_argument('--data_config', default='config_tmp.yml', + help='data config file path') + + parser.add_argument('--model_config', default='model_baseline.yml', + help='model config file path') + + parser.add_argument('--pretrained_path', default=None, + help='pretrained model path') + + parser.add_argument('--save_path', default='checkpoints/temp.pth', + help='pretrained model path') + + parser.add_argument('--gt_path', default='', + help='ground truth path') + + parser.add_argument('--device', default='cuda:0', help='device to train on') + + parser.add_argument('--labels_of_interest', default='Left Prograsp Forceps,Maryland Bipolar Forceps,Right Prograsp Forceps,Left Large Needle Driver,Right Large Needle Driver', help='labels of interest') + + parser.add_argument('--codes', default='1,2,1,3,3', help='numeric label to save per instrument') + + args = parser.parse_args() + + return args + +def main(): + args = parse_args() + with open(args.data_config, 'r') as f: + data_config = yaml.load(f, Loader=yaml.FullLoader) + with open(args.model_config, 'r') as f: + model_config = yaml.load(f, Loader=yaml.FullLoader) + labels_of_interest = args.labels_of_interest.split(',') + + #make folder to save visualizations + os.makedirs(os.path.join(args.save_path,"preds"),exist_ok=True) + os.makedirs(os.path.join(args.save_path,"rescaled_preds"),exist_ok=True) + if args.gt_path: + os.makedirs(os.path.join(args.save_path,"rescaled_gt"),exist_ok=True) + + #load model + # model = Prompt_Adapted_SAM(config=model_config, label_text_dict=label_dict, device=args.device, training_strategy='svdtuning') + model = Prompt_Adapted_SAM(config=model_config, label_text_dict=label_dict, device=args.device, training_strategy='lora') + + #legacy model support + if args.pretrained_path: + sdict = torch.load(args.pretrained_path, map_location=args.device) + # for key in list(sdict.keys()): + # if 'sam_encoder.neck' in key: + # if '0' in key: + # new_key = key.replace('0','conv1') + # if '1' in key: + # new_key = key.replace('1','ln1') + # if '2' in key: + # new_key = key.replace('2','conv2') + # if '3' in key: + # new_key = key.replace('3','ln2') + # sdict[new_key] = sdict[key] + # _ = sdict.pop(key) + # if 'mask_decoder' in key: + # if 'trainable' in key: + # _ = sdict.pop(key) + + model.load_state_dict(sdict,strict=True) + model = model.to(args.device) + model = model.eval() + + #load data transform + data_transform = GLAS_Transform(config=data_config) + + #dice + dices = [] + ious=[] + + #load data + for i,img_name in enumerate(sorted(os.listdir(args.data_folder))): + if (('png' not in img_name) and ('jpg' not in img_name) and ('jpeg' not in img_name) and ('bmp' not in img_name)): + continue + if 'anno' in img_name: + continue + # if i%5!=0: + # continue + img_path = (os.path.join(args.data_folder,img_name)) + if args.gt_path: + gt_path = (os.path.join(args.data_folder,img_name[:-4]+'_anno.bmp')) + + # print(img_path) + img = torch.as_tensor(np.array(Image.open(img_path).convert("RGB"))) + img = img.permute(2,0,1) + C,H,W = img.shape + #make a dummy mask of shape 1XHXW + if args.gt_path: + label = torch.Tensor(np.array(Image.open(gt_path))) + if len(label.shape)==3: + label = label[:,:,0] + label = label.unsqueeze(0) + mask = (label>0)+0 + # plt.imshow(gold) + # plt.show() + + else: + mask = torch.zeros((1,H,W)) + img, mask = data_transform(img, mask, is_train=False, apply_norm=True) + mask = (mask>=0.5)+0 + + #get image embeddings + img = img.unsqueeze(0).to(args.device) #1XCXHXW + img_embeds = model.get_image_embeddings(img) + + # generate masks for all labels of interest + img_embeds_repeated = img_embeds.repeat(len(labels_of_interest),1,1,1) + x_text = [t for t in labels_of_interest] + masks = model.get_masks_for_multiple_labels(img_embeds_repeated, x_text).cpu() + + plt.imshow((masks[0]>=0.5), cmap='gray') + plt.savefig(os.path.join(args.save_path,'rescaled_preds', img_name[:-4]+'.png')) + plt.close() + + if args.gt_path: + plt.imshow((mask[0]), cmap='gray') + plt.savefig(os.path.join(args.save_path,'rescaled_gt', img_name[:-4]+'.png')) + plt.close() + + # print("dice: ",dice_coef(label, (masks>0.5)+0)) + dices.append(dice_coef(mask, (masks>=0.5)+0)) + ious.append(iou_coef(mask, (masks>=0.5)+0)) + # break + print(torch.mean(torch.Tensor(dices))) + print(torch.mean(torch.Tensor(ious))) + +if __name__ == '__main__': + main() + + + + + diff --git a/AllinonSAM/eval/glas/generate_predictions_pointsam.py b/AllinonSAM/eval/glas/generate_predictions_pointsam.py new file mode 100644 index 0000000000000000000000000000000000000000..2ef17bcc44443f86f6470c7384b8e8e699664c62 --- /dev/null +++ b/AllinonSAM/eval/glas/generate_predictions_pointsam.py @@ -0,0 +1,190 @@ +import torch +import yaml +import sys +import copy +import os +sys.path.append("/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/SVDSAM/") + +from data_utils import * +from model import * +from utils import * +from data_transforms.glas_transform import GLAS_Transform + +label_names = ['Glands'] +label_dict = {} +# visualize_dict = {} +for i,ln in enumerate(label_names): + label_dict[ln] = i + # visualize_dict[ln] = visualize_li[i] + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument('--data_folder', default='config_tmp.yml', + help='data folder file path') + + parser.add_argument('--data_config', default='config_tmp.yml', + help='data config file path') + + parser.add_argument('--model_config', default='model_baseline.yml', + help='model config file path') + + parser.add_argument('--pretrained_path', default=None, + help='pretrained model path') + + parser.add_argument('--save_path', default='checkpoints/temp.pth', + help='pretrained model path') + + parser.add_argument('--gt_path', default='', + help='ground truth path') + + parser.add_argument('--device', default='cuda:0', help='device to train on') + + parser.add_argument('--labels_of_interest', default='Left Prograsp Forceps,Maryland Bipolar Forceps,Right Prograsp Forceps,Left Large Needle Driver,Right Large Needle Driver', help='labels of interest') + + parser.add_argument('--codes', default='1,2,1,3,3', help='numeric label to save per instrument') + + args = parser.parse_args() + + return args + +def main(): + args = parse_args() + with open(args.data_config, 'r') as f: + data_config = yaml.load(f, Loader=yaml.FullLoader) + with open(args.model_config, 'r') as f: + model_config = yaml.load(f, Loader=yaml.FullLoader) + labels_of_interest = args.labels_of_interest.split(',') + + #make folder to save visualizations + os.makedirs(os.path.join(args.save_path,"preds"),exist_ok=True) + os.makedirs(os.path.join(args.save_path,"rescaled_preds"),exist_ok=True) + if args.gt_path: + os.makedirs(os.path.join(args.save_path,"rescaled_gt"),exist_ok=True) + + #load model + model = Prompt_Adapted_SAM(config=model_config, label_text_dict=label_dict, device=args.device, training_strategy='svdtuning') + # model = Prompt_Adapted_SAM(config=model_config, label_text_dict=label_dict, device=args.device, training_strategy='lora') + + #legacy model support + if args.pretrained_path: + sdict = torch.load(args.pretrained_path, map_location=args.device) + # for key in list(sdict.keys()): + # if 'sam_encoder.neck' in key: + # if '0' in key: + # new_key = key.replace('0','conv1') + # if '1' in key: + # new_key = key.replace('1','ln1') + # if '2' in key: + # new_key = key.replace('2','conv2') + # if '3' in key: + # new_key = key.replace('3','ln2') + # sdict[new_key] = sdict[key] + # _ = sdict.pop(key) + # if 'mask_decoder' in key: + # if 'trainable' in key: + # _ = sdict.pop(key) + + model.load_state_dict(sdict,strict=True) + model = model.to(args.device) + model = model.eval() + + #load data transform + data_transform = GLAS_Transform(config=data_config) + + #dice + dices = [] + ious=[] + + #load data + for i,img_name in enumerate(sorted(os.listdir(args.data_folder))): + if (('png' not in img_name) and ('jpg' not in img_name) and ('jpeg' not in img_name) and ('bmp' not in img_name)): + continue + if 'anno' in img_name: + continue + # if i%5!=0: + # continue + img_path = (os.path.join(args.data_folder,img_name)) + if args.gt_path: + gt_path = (os.path.join(args.data_folder,img_name[:-4]+'_anno.bmp')) + + # print(img_path) + img = torch.as_tensor(np.array(Image.open(img_path).convert("RGB"))) + img = img.permute(2,0,1) + C,H,W = img.shape + #make a dummy mask of shape 1XHXW + if args.gt_path: + label = torch.Tensor(np.array(Image.open(gt_path))) + if len(label.shape)==3: + label = label[:,:,0] + label = label.unsqueeze(0) + mask = (label>0)+0 + # plt.imshow(gold) + # plt.show() + + else: + mask = torch.zeros((1,H,W)) + img, mask = data_transform(img, mask, is_train=False, apply_norm=True) + mask = (mask>=0.5)+0 + + #get positive point prompts + _,y,x = torch.where(mask==1) + pos_prompts = torch.cat([x.unsqueeze(1),y.unsqueeze(1)],dim=1) + + #get negative point prompts + _,y_neg,x_neg = torch.where(mask==0) + neg_prompts = (torch.cat([x_neg.unsqueeze(1),y_neg.unsqueeze(1)],dim=1)) + + if len(y)>0: + pos_point_idx = random.randint(0,y.shape[0]-1) + neg_point_idx = random.randint(0,y_neg.shape[0]-1) + # points = (torch.cat([pos_prompts[pos_point_idx].unsqueeze(0), neg_prompts[neg_point_idx].unsqueeze(0)],dim=0).unsqueeze(0).to(args.device), torch.Tensor([1,-1]).unsqueeze(0).to(args.device)) + points = (pos_prompts[pos_point_idx].unsqueeze(0).unsqueeze(0).to(args.device), torch.Tensor([1]).unsqueeze(0).to(args.device)) + + else: + neg_point_idx1 = random.randint(0,y_neg.shape[0]-1) + neg_point_idx2 = random.randint(0,y_neg.shape[0]-1) + # points = (torch.cat([neg_prompts[neg_point_idx1].unsqueeze(0), neg_prompts[neg_point_idx2].unsqueeze(0)],dim=0).unsqueeze(0).to(args.device), torch.Tensor([-1,-1]).unsqueeze(0).to(args.device)) + points = (neg_prompts[neg_point_idx1].unsqueeze(0).unsqueeze(0).to(args.device), torch.Tensor([-1]).unsqueeze(0).to(args.device)) + + + #get image embeddings + img = img.unsqueeze(0).to(args.device) #1XCXHXW + img_embeds = model.get_image_embeddings(img) + + # generate masks for all labels of interest + img_embeds_repeated = img_embeds.repeat(len(labels_of_interest),1,1,1) + x_text = [t for t in labels_of_interest] + masks = model.get_masks_for_multiple_labels(img_embeds_repeated, x_text).cpu() + masks = model.get_masks_with_manual_prompts(img_embeds_repeated, points=points).cpu() + + + plt.imshow((masks[0]>=0.5), cmap='gray') + if len(y)>0: + plt.scatter(x[pos_point_idx], y[pos_point_idx], c='green') + # plt.scatter(x_neg[neg_point_idx], y_neg[neg_point_idx], c='red') + else: + plt.scatter(x_neg[neg_point_idx1], y_neg[neg_point_idx1], c='red') + # plt.scatter(x_neg[neg_point_idx2], y_neg[neg_point_idx2], c='red') + plt.savefig(os.path.join(args.save_path,'rescaled_preds', img_name[:-4]+'.png')) + plt.close() + + # if args.gt_path: + # plt.imshow((mask[0]), cmap='gray') + # plt.savefig(os.path.join(args.save_path,'rescaled_gt', img_name)) + # plt.close() + + # print("dice: ",dice_coef(label, (masks>0.5)+0)) + dices.append(dice_coef(mask, (masks>=0.5)+0)) + ious.append(iou_coef(mask, (masks>=0.5)+0)) + # break + print(torch.mean(torch.Tensor(dices))) + print(torch.mean(torch.Tensor(ious))) + +if __name__ == '__main__': + main() + + + + + diff --git a/AllinonSAM/eval/glas/model_svdtuning.yml b/AllinonSAM/eval/glas/model_svdtuning.yml new file mode 100644 index 0000000000000000000000000000000000000000..fe7a85318ff3c182bf6b6664fdebc0e98195c0d1 --- /dev/null +++ b/AllinonSAM/eval/glas/model_svdtuning.yml @@ -0,0 +1,31 @@ +sam: + img_size: 1024 + num_classes: 1 + sam_type: 'base' + +img_type: 'image' +arch: "Prompt Adapted SAM" +use_fdn: False +decoder_training: 'none' +mlp_transform: False + +prompts: + USE_TEXT_PROMPT: True + USE_IMAGE_PROMPT: False + USE_SLICE_NUM: False + LOCATION: 'prepend' + DROPOUT: 0 + NUM_TOKENS: 5 + +training: + optimizer: 'adamw' + lr: 1e-3 + batch_size: 32 + num_epochs: 1000 + schedule_step: 50 + schedule_step_factor: 0.5 + weight_decay: 1e-2 + loss: 'focal' + reg_multiplier: 0 + +use_lora: True \ No newline at end of file diff --git a/AllinonSAM/eval/isic2018/config_isic18.yml b/AllinonSAM/eval/isic2018/config_isic18.yml new file mode 100644 index 0000000000000000000000000000000000000000..4192456763b643efc5b57e4c4d4d6b9b7ea0034e --- /dev/null +++ b/AllinonSAM/eval/isic2018/config_isic18.yml @@ -0,0 +1,19 @@ +data_transforms: + a_min: 0 + a_max: 255 + img_size: 256 + use_random_crop: False + use_rotation: True + rotation_angle: 10 + use_saturation: False + saturation: 2 + use_brightness: True + brightness: 2 + use_horizontal_flip: False +data: + name: ISIC2018 + root_path: '/media/ubuntu/New Volume/jay/ISIC2018' + label_list: [1] + label_names: ['Lesion'] + volume_channel: 2 + negative_to_positive_ratio: -1 diff --git a/AllinonSAM/eval/isic2018/generate_all_predictions.sh b/AllinonSAM/eval/isic2018/generate_all_predictions.sh new file mode 100644 index 0000000000000000000000000000000000000000..da674da2337c8706550cf0383e77104be57ee5b6 --- /dev/null +++ b/AllinonSAM/eval/isic2018/generate_all_predictions.sh @@ -0,0 +1 @@ +python generate_predictions.py --data_folder "/media/ubuntu/New Volume/jay/ISIC2018/ISIC2018_Task1-2_Test_Input" --data_config config_isic18.yml --model_config model_svdtuning.yml --save_path "svdtuning_shiftscale_isic2018_tal_focal075_alpha2_1e-3" --gt_path "/media/ubuntu/New Volume/jay/ISIC2018/ISIC2018_Task1_Test_GroundTruth" --labels_of_interest "Lesion" --pretrained_path "svdtuning_shiftscale_isic2018_tal_focal075_alpha2_1e-3.pth" --device "cuda:0" diff --git a/AllinonSAM/eval/isic2018/generate_predictions.py b/AllinonSAM/eval/isic2018/generate_predictions.py new file mode 100644 index 0000000000000000000000000000000000000000..b70963ad160592c411b43385d253454d1c9eef22 --- /dev/null +++ b/AllinonSAM/eval/isic2018/generate_predictions.py @@ -0,0 +1,158 @@ +import torch +import yaml +import sys +import copy +import os +sys.path.append("/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/SVDSAM/") + +from data_utils import * +from model import * +from utils import * +from data_transforms.isic2018_transform import ISIC_Transform + +label_names = ['Lesion'] +label_dict = {} +# visualize_dict = {} +for i,ln in enumerate(label_names): + label_dict[ln] = i + # visualize_dict[ln] = visualize_li[i] + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument('--data_folder', default='config_tmp.yml', + help='data folder file path') + + parser.add_argument('--data_config', default='config_tmp.yml', + help='data config file path') + + parser.add_argument('--model_config', default='model_baseline.yml', + help='model config file path') + + parser.add_argument('--pretrained_path', default=None, + help='pretrained model path') + + parser.add_argument('--save_path', default='checkpoints/temp.pth', + help='pretrained model path') + + parser.add_argument('--gt_path', default='', + help='ground truth path') + + parser.add_argument('--device', default='cuda:0', help='device to train on') + + parser.add_argument('--labels_of_interest', default='Left Prograsp Forceps,Maryland Bipolar Forceps,Right Prograsp Forceps,Left Large Needle Driver,Right Large Needle Driver', help='labels of interest') + + parser.add_argument('--codes', default='1,2,1,3,3', help='numeric label to save per instrument') + + args = parser.parse_args() + + return args + +def main(): + args = parse_args() + with open(args.data_config, 'r') as f: + data_config = yaml.load(f, Loader=yaml.FullLoader) + with open(args.model_config, 'r') as f: + model_config = yaml.load(f, Loader=yaml.FullLoader) + labels_of_interest = args.labels_of_interest.split(',') + + #make folder to save visualizations + os.makedirs(os.path.join(args.save_path,"preds"),exist_ok=True) + os.makedirs(os.path.join(args.save_path,"rescaled_preds"),exist_ok=True) + if args.gt_path: + os.makedirs(os.path.join(args.save_path,"rescaled_gt"),exist_ok=True) + + #load model + model = Prompt_Adapted_SAM(config=model_config, label_text_dict=label_dict, device=args.device, training_strategy='svdtuning') + # model = Prompt_Adapted_SAM(config=model_config, label_text_dict=label_dict, device=args.device, training_strategy='lora') + + #legacy model support + sdict = torch.load(args.pretrained_path, map_location=args.device) + # for key in list(sdict.keys()): + # if 'sam_encoder.neck' in key: + # if '0' in key: + # new_key = key.replace('0','conv1') + # if '1' in key: + # new_key = key.replace('1','ln1') + # if '2' in key: + # new_key = key.replace('2','conv2') + # if '3' in key: + # new_key = key.replace('3','ln2') + # sdict[new_key] = sdict[key] + # _ = sdict.pop(key) + # if 'mask_decoder' in key: + # if 'trainable' in key: + # _ = sdict.pop(key) + + model.load_state_dict(sdict,strict=True) + model = model.to(args.device) + model = model.eval() + + #load data transform + data_transform = ISIC_Transform(config=data_config) + + #dice + dices = [] + ious=[] + + #load data + for i,img_name in enumerate(sorted(os.listdir(args.data_folder))): + if (('png' not in img_name) and ('jpg' not in img_name) and ('jpeg' not in img_name)): + continue + # if i%5!=0: + # continue + img_path = (os.path.join(args.data_folder,img_name)) + if args.gt_path: + gt_path = (os.path.join(args.gt_path,img_name[:-4]+'_segmentation.png')) + + # print(img_path) + img = torch.as_tensor(np.array(Image.open(img_path).convert("RGB"))) + img = img.permute(2,0,1) + C,H,W = img.shape + #make a dummy mask of shape 1XHXW + if args.gt_path: + label = torch.Tensor(np.array(Image.open(gt_path))) + if len(label.shape)==3: + label = label[:,:,0] + label = label.unsqueeze(0) + mask = (label>0)+0 + # plt.imshow(gold) + # plt.show() + + else: + mask = torch.zeros((1,H,W)) + img, mask = data_transform(img, mask, is_train=False, apply_norm=True) + mask = (mask>=0.5)+0 + + #get image embeddings + img = img.unsqueeze(0).to(args.device) #1XCXHXW + img_embeds = model.get_image_embeddings(img) + + # generate masks for all labels of interest + img_embeds_repeated = img_embeds.repeat(len(labels_of_interest),1,1,1) + x_text = [t for t in labels_of_interest] + masks = model.get_masks_for_multiple_labels(img_embeds_repeated, x_text).cpu() + + plt.imshow((masks[0]>=0.5), cmap='gray') + plt.savefig(os.path.join(args.save_path,'rescaled_preds', img_name)) + plt.close() + + if args.gt_path: + plt.imshow((mask[0]), cmap='gray') + plt.savefig(os.path.join(args.save_path,'rescaled_gt', img_name)) + plt.close() + + # print("dice: ",dice_coef(label, (masks>0.5)+0)) + dices.append(dice_coef(mask, (masks>=0.5)+0)) + ious.append(iou_coef(mask, (masks>=0.5)+0)) + # break + print(torch.mean(torch.Tensor(dices))) + print(torch.mean(torch.Tensor(ious))) + +if __name__ == '__main__': + main() + + + + + diff --git a/AllinonSAM/eval/isic2018/model_svdtuning.yml b/AllinonSAM/eval/isic2018/model_svdtuning.yml new file mode 100644 index 0000000000000000000000000000000000000000..56b9465b2cb506411ad45c1dec78347be924f113 --- /dev/null +++ b/AllinonSAM/eval/isic2018/model_svdtuning.yml @@ -0,0 +1,30 @@ +sam: + img_size: 256 + num_classes: 13 + +img_type: 'image' +arch: "Prompt Adapted SAM" +use_fdn: False +decoder_training: 'none' +mlp_transform: False + +prompts: + USE_TEXT_PROMPT: True + USE_IMAGE_PROMPT: False + USE_SLICE_NUM: False + LOCATION: 'prepend' + DROPOUT: 0 + NUM_TOKENS: 5 + +training: + optimizer: 'adamw' + lr: 1e-3 + batch_size: 32 + num_epochs: 1000 + schedule_step: 50 + schedule_step_factor: 0.5 + weight_decay: 1e-2 + loss: 'focal' + reg_multiplier: 0 + +use_lora: False \ No newline at end of file diff --git a/AllinonSAM/eval/lits/config_lits.yml b/AllinonSAM/eval/lits/config_lits.yml new file mode 100644 index 0000000000000000000000000000000000000000..c73b06b5457508ca2e3d385d1732704e41acf418 --- /dev/null +++ b/AllinonSAM/eval/lits/config_lits.yml @@ -0,0 +1,21 @@ +data_transforms: + a_min: -1410 + a_max: 3024 + img_size: 400 + use_random_crop: True + use_rotation: True + rotation_angle: 10 + use_saturation: False + saturation: 2 + use_brightness: False + brightness: 2 + use_horizontal_flip: False +data: + name: LITS + root_path: '/media/ubuntu/New Volume/jay/LiTS' + label_list: [1,2] + label_names: ['liver', 'tumor'] + volume_channel: 2 + sampling_deviation: 5 + samples_per_slice: 2 + negative_to_positive_ratio: -1 diff --git a/AllinonSAM/eval/lits/generate_predictions.py b/AllinonSAM/eval/lits/generate_predictions.py new file mode 100644 index 0000000000000000000000000000000000000000..7f1e6c5502bf53436c0ed75ee8ef09a37f003c7f --- /dev/null +++ b/AllinonSAM/eval/lits/generate_predictions.py @@ -0,0 +1,135 @@ +import torch +import yaml +import sys +import copy +import os +sys.path.append("/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/SVDSAM/") + +from data_utils import * +from model import * +from utils import * + +label_names = ['liver', 'tumor'] +# visualize_li = [[1,0,0],[0,1,0],[1,0,0], [0,0,1], [0,0,1]] +label_dict = {} +# visualize_dict = {} +for i,ln in enumerate(label_names): + label_dict[ln] = i + # visualize_dict[ln] = visualize_li[i] + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument('--data_folder', default='config_tmp.yml', + help='data folder file path') + + parser.add_argument('--data_config', default='config_tmp.yml', + help='data config file path') + + parser.add_argument('--model_config', default='model_baseline.yml', + help='model config file path') + + parser.add_argument('--pretrained_path', default=None, + help='pretrained model path') + + parser.add_argument('--save_path', default='checkpoints/temp.pth', + help='pretrained model path') + + parser.add_argument('--gt_path', default='', + help='ground truth path') + + parser.add_argument('--device', default='cuda:0', help='device to train on') + + parser.add_argument('--labels_of_interest', default='tumor', help='labels of interest') + + parser.add_argument('--codes', default='1,2,1,3,3', help='numeric label to save per instrument') + + args = parser.parse_args() + + return args + +def main(): + args = parse_args() + with open(args.data_config, 'r') as f: + data_config = yaml.load(f, Loader=yaml.FullLoader) + with open(args.model_config, 'r') as f: + model_config = yaml.load(f, Loader=yaml.FullLoader) + labels_of_interest = args.labels_of_interest.split(',') + codes = args.codes.split(',') + codes = [int(c) for c in codes] + + label_dict = { + 'liver': 1, + 'tumor': 2, + } + + #change the img size in model config according to data config + model_config['sam']['img_size'] = data_config['data_transforms']['img_size'] + model_config['sam']['num_classes'] = len(data_config['data']['label_list']) + + + #make folder to save visualizations + os.makedirs(os.path.join(args.save_path,"preds"),exist_ok=True) + os.makedirs(os.path.join(args.save_path,"rescaled_preds"),exist_ok=True) + if args.gt_path: + os.makedirs(os.path.join(args.save_path,"rescaled_gt"),exist_ok=True) + + #load model + model = Prompt_Adapted_SAM(config=model_config, label_text_dict=label_dict, device=args.device, training_strategy='svdtuning') + #legacy model support + sdict = torch.load(args.pretrained_path, map_location=args.device) + # for key in list(sdict.keys()): + # if 'sam_encoder.neck' in key: + # if '0' in key: + # new_key = key.replace('0','conv1') + # if '1' in key: + # new_key = key.replace('1','ln1') + # if '2' in key: + # new_key = key.replace('2','conv2') + # if '3' in key: + # new_key = key.replace('3','ln2') + # sdict[new_key] = sdict[key] + # _ = sdict.pop(key) + # if 'mask_decoder' in key: + # if 'trainable' in key: + # _ = sdict.pop(key) + + model.load_state_dict(sdict,strict=True) + model = model.to(args.device) + model = model.eval() + + data_transform = Slice_Transforms(config=data_config) + label_text = args.labels_of_interest + #load data + for i, file_name in enumerate(sorted(os.listdir(args.data_folder))): + print(i) + file_path = os.path.join(args.data_folder, file_name) + im_nib = nib.load(file_path) + + # for 2d mode + #image loading and conversion to rgb by replicating channels + if data_config['data']['volume_channel']==2: #data originally is HXWXC + im = (torch.Tensor(np.asanyarray(im_nib.dataobj)).permute(2,0,1).unsqueeze(1).repeat(1,3,1,1)) + else: #data originally is CXHXW + im = (torch.Tensor(np.asanyarray(im_nib.dataobj)).unsqueeze(1).repeat(1,3,1,1)) + num_slices = im.shape[0] + preds = [] + for i in range(num_slices): + slice_im = im[i] + slice_im = data_transform(slice_im) + slice_im = torch.Tensor(slice_im).to(args.device) + with torch.set_grad_enabled(False): + outputs, reg_loss = model(slice_im, [label_text], [i]) + slice_pred = (outputs>=0.5) +0 + preds.append(slice_pred) + + # print(len(preds)) + # print(preds[0].shape) + preds = (torch.cat(preds, dim=0).permute(1,2,0)).cpu().numpy().astype('uint8') + # print(preds.shape) + ni_img = nib.Nifti1Image(preds, im_nib.affine) + nib.save(ni_img, os.path.join(args.save_path,'preds',file_name)) + + +if __name__ == '__main__': + main() diff --git a/AllinonSAM/eval/lits/model_svdtuning.yml b/AllinonSAM/eval/lits/model_svdtuning.yml new file mode 100644 index 0000000000000000000000000000000000000000..268fc18c42d4be93b8fb13a480e543fe40e69c5a --- /dev/null +++ b/AllinonSAM/eval/lits/model_svdtuning.yml @@ -0,0 +1,28 @@ +sam: + img_size: 256 + num_classes: 2 + +img_type: 'image' +arch: "Prompt Adapted SAM" +use_fdn: False +decoder_training: 'none' +mlp_transform: False + +prompts: + USE_TEXT_PROMPT: True + USE_IMAGE_PROMPT: False + USE_SLICE_NUM: True + LOCATION: 'prepend' + DROPOUT: 0 + NUM_TOKENS: 5 + +training: + optimizer: 'adamw' + lr: 1e-3 + batch_size: 32 + num_epochs: 1000 + schedule_step: 100 + schedule_step_factor: 0.5 + weight_decay: 1e-2 + loss: 'focal' + reg_multiplier: 0 diff --git a/AllinonSAM/eval/lits/output_demo.nii b/AllinonSAM/eval/lits/output_demo.nii new file mode 100644 index 0000000000000000000000000000000000000000..ca7446809c3c7cb72dfdc1e375b4c5e55a3329f3 --- /dev/null +++ b/AllinonSAM/eval/lits/output_demo.nii @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:79d37fe195538590df233741d634043873e800b35e60712f2565d5a14c2ddf5c +size 15520352 diff --git a/AllinonSAM/eval/lits2/config_lits2.yml b/AllinonSAM/eval/lits2/config_lits2.yml new file mode 100644 index 0000000000000000000000000000000000000000..1ec9181bfc942f406e02054db1836136bd5115ef --- /dev/null +++ b/AllinonSAM/eval/lits2/config_lits2.yml @@ -0,0 +1,19 @@ +data_transforms: + a_min: 0 + a_max: 255 + img_size: 256 + use_random_crop: False + use_rotation: True + rotation_angle: 10 + use_saturation: False + saturation: 2 + use_brightness: True + brightness: 2 + use_horizontal_flip: False +data: + name: LITS2 + root_path: '/media/ubuntu/New Volume/jay/LiTS2/archive' + label_list: [1,2] + label_names: ['Liver','Tumor'] + volume_channel: 2 + negative_to_positive_ratio: -1 diff --git a/AllinonSAM/eval/lits2/generate_predictions.py b/AllinonSAM/eval/lits2/generate_predictions.py new file mode 100644 index 0000000000000000000000000000000000000000..88969c7a64989701cc9ed9b5ad41a9bf4698536e --- /dev/null +++ b/AllinonSAM/eval/lits2/generate_predictions.py @@ -0,0 +1,199 @@ +import torch +import yaml +import sys +import copy +import os +sys.path.append("/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/SVDSAM/") + +from data_utils import * +from model import * +from utils import * + +label_names = ['Liver', 'Tumor'] +# visualize_li = [[1,0,0],[0,1,0],[1,0,0], [0,0,1], [0,0,1]] +label_dict = {} +# visualize_dict = {} +for i,ln in enumerate(label_names): + label_dict[ln] = i + # visualize_dict[ln] = visualize_li[i] + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument('--data_config', default='config_tmp.yml', + help='data config file path') + + parser.add_argument('--model_config', default='model_baseline.yml', + help='model config file path') + + parser.add_argument('--pretrained_path', default=None, + help='pretrained model path') + + parser.add_argument('--save_path', default='checkpoints/temp.pth', + help='pretrained model path') + + parser.add_argument('--gt_path', default='', + help='ground truth path') + + parser.add_argument('--device', default='cuda:0', help='device to train on') + + parser.add_argument('--labels_of_interest', default='Left Prograsp Forceps,Maryland Bipolar Forceps,Right Prograsp Forceps,Left Large Needle Driver,Right Large Needle Driver', help='labels of interest') + + parser.add_argument('--codes', default='1,2,1,3,3', help='numeric label to save per instrument') + + args = parser.parse_args() + + return args + +def main(): + args = parse_args() + with open(args.data_config, 'r') as f: + data_config = yaml.load(f, Loader=yaml.FullLoader) + with open(args.model_config, 'r') as f: + model_config = yaml.load(f, Loader=yaml.FullLoader) + + + #make folder to save visualizations + os.makedirs(os.path.join(args.save_path,"preds"),exist_ok=True) + os.makedirs(os.path.join(args.save_path,"rescaled_preds"),exist_ok=True) + os.makedirs(os.path.join(args.save_path,"rescaled_gt"),exist_ok=True) + + #load model + model = Prompt_Adapted_SAM(config=model_config, label_text_dict=label_dict, device=args.device, training_strategy='svdtuning') + # model = Prompt_Adapted_SAM(config=model_config, label_text_dict=label_dict, device=args.device, training_strategy='lora') + + #legacy model support + sdict = torch.load(args.pretrained_path, map_location=args.device) + # for key in list(sdict.keys()): + # if 'sam_encoder.neck' in key: + # if '0' in key: + # new_key = key.replace('0','conv1') + # if '1' in key: + # new_key = key.replace('1','ln1') + # if '2' in key: + # new_key = key.replace('2','conv2') + # if '3' in key: + # new_key = key.replace('3','ln2') + # sdict[new_key] = sdict[key] + # _ = sdict.pop(key) + # if 'mask_decoder' in key: + # if 'trainable' in key: + # _ = sdict.pop(key) + + model.load_state_dict(sdict,strict=True) + model = model.to(args.device) + model = model.eval() + + #load data transform + data_transform = LiTS2_Transform(config=data_config) + + #dice + tumor_dices = [] + tumor_ious=[] + liver_dices = [] + liver_ious=[] + + + #load data + root_path = "/media/ubuntu/New Volume/jay/LiTS2/archive" + imgs_path = os.path.join(root_path, 'dataset_6/dataset_6') + test_csv = pd.read_csv(os.path.join(root_path, 'lits_test.csv')) + for i in range(len(test_csv)): + if i%10!=0: + continue + img_path = (os.path.join(root_path,'dataset_6',test_csv['filepath'].iloc[i][18:])) + image_name = test_csv['filepath'].iloc[i][28:] + liver_mask_path = os.path.join(root_path,'dataset_6',test_csv['liver_maskpath'].iloc[i][18:]) + tumor_mask_path = os.path.join(root_path,'dataset_6',test_csv['tumor_maskpath'].iloc[i][18:]) + + # print(img_path) + img = torch.as_tensor(np.array(Image.open(img_path).convert("RGB"))) + img = img.permute(2,0,1) + C,H,W = img.shape + #make a dummy mask of shape 1XHXW + + try: + liver_label = torch.Tensor(np.array(Image.open(liver_mask_path)))[:,:,0] + tumor_label = torch.Tensor(np.array(Image.open(tumor_mask_path)))[:,:,0] + except: + liver_label = torch.zeros(H, W) + tumor_label = torch.zeros(H, W) + # label = np.array(Image.open(gt_path).convert("RGB")) + # temp = np.zeros((H,W)).astype('uint8') + # selected_color_list = label_dict[args.labels_of_interest] + # for c in selected_color_list: + # temp = temp | (np.all(np.where(label==c,1,0),axis=2)) + + # # plt.imshow(gold) + # # plt.show() + # mask = torch.Tensor(temp).unsqueeze(0) + + liver_label = liver_label.unsqueeze(0) + liver_label = (liver_label>0)+0 + tumor_label = tumor_label.unsqueeze(0) + tumor_label = (tumor_label>0)+0 + + #convert all grayscale pixels due to resizing back to 0, 1 + img1, liver_label = data_transform(img, liver_label, is_train=False, apply_norm=True) + liver_label = (liver_label>=0.5)+0 + # liver_label = liver_label[0] + + #convert all grayscale pixels due to resizing back to 0, 1 + _, tumor_label = data_transform(img, tumor_label, is_train=False, apply_norm=True) + tumor_label = (tumor_label>=0.5)+0 + # tumor_label = tumor_label[0] + + #get image embeddings + img = img1.unsqueeze(0).to(args.device) #1XCXHXW + img_embeds = model.get_image_embeddings(img) + + # generate masks for all labels of interest + img_embeds_repeated = img_embeds.repeat(1,1,1,1) + x_text = ['Liver'] + x_text2 = ['Tumor'] + masks_liver = model.get_masks_for_multiple_labels(img_embeds_repeated, x_text).cpu() + masks_tumor = model.get_masks_for_multiple_labels(img_embeds_repeated, x_text2).cpu() + + plt.imshow((masks_liver[0]>=0.5), cmap='gray') + plt.savefig(os.path.join(args.save_path,'rescaled_preds', image_name[:-4] +'_liver.png')) + plt.close() + # plt.show() + + plt.imshow((masks_tumor[0]>=0.5), cmap='gray') + plt.savefig(os.path.join(args.save_path,'rescaled_preds', image_name[:-4] +'_tumor.png')) + plt.close() + # plt.show() + + + plt.imshow((liver_label[0]), cmap='gray') + plt.savefig(os.path.join(args.save_path,'rescaled_gt', image_name[:-4] +'_liver.png')) + plt.close() + # plt.show() + + + plt.imshow((tumor_label[0]), cmap='gray') + plt.savefig(os.path.join(args.save_path,'rescaled_gt', image_name[:-4] +'_tumor.png')) + plt.close() + # plt.show() + + # print("dice: ",dice_coef(label, (masks>0.5)+0)) + # print(liver_label.shape) + # print((((masks[0]>=0.5)+0).unsqueeze(0)).shape) + liver_dices.append(dice_coef(liver_label, ((masks_liver[0]>=0.5)+0).unsqueeze(0))) + tumor_dices.append(dice_coef(tumor_label, ((masks_tumor[0]>=0.5)+0).unsqueeze(0))) + + liver_ious.append(iou_coef(liver_label, ((masks_liver[0]>=0.5)+0).unsqueeze(0))) + tumor_ious.append(iou_coef(tumor_label, ((masks_tumor[0]>=0.5)+0).unsqueeze(0))) + # 1/0 + # break + print("Liver DICE: ",torch.mean(torch.Tensor(liver_dices))) + print("Liver IoU", torch.mean(torch.Tensor(liver_ious))) + print("Tumor DICE", torch.mean(torch.Tensor(tumor_dices))) + print("Tumor IoU", torch.mean(torch.Tensor(tumor_ious))) +if __name__ == '__main__': + main() + + + + + diff --git a/AllinonSAM/eval/lits2/generate_predictions_baselines.py b/AllinonSAM/eval/lits2/generate_predictions_baselines.py new file mode 100644 index 0000000000000000000000000000000000000000..5a22023b59bb47d68dcbdff0acdf06a8559c0d40 --- /dev/null +++ b/AllinonSAM/eval/lits2/generate_predictions_baselines.py @@ -0,0 +1,215 @@ +import torch +import yaml +import sys +import copy +import os +sys.path.append("/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/SVDSAM/") + +from data_utils import * +from model import * +from utils import * +from baselines import UNet, UNext, medt_net +from vit_seg_modeling import VisionTransformer +from vit_seg_modeling import CONFIGS as CONFIGS_ViT_seg +from axialnet import MedT + +label_names = ['Liver','Tumor'] +# visualize_li = [[1,0,0],[0,1,0],[1,0,0], [0,0,1], [0,0,1]] +label_dict = {} +# visualize_dict = {} +for i,ln in enumerate(label_names): + label_dict[ln] = i + # visualize_dict[ln] = visualize_li[i] + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument('--data_folder', default='config_tmp.yml', + help='data folder file path') + + parser.add_argument('--data_config', default='config_tmp.yml', + help='data config file path') + + parser.add_argument('--model_config', default='model_baseline.yml', + help='model config file path') + + parser.add_argument('--pretrained_path', default=None, + help='pretrained model path') + + parser.add_argument('--save_path', default='checkpoints/temp.pth', + help='pretrained model path') + + parser.add_argument('--gt_path', default='', + help='ground truth path') + + parser.add_argument('--device', default='cuda:0', help='device to train on') + + parser.add_argument('--codes', default='1,2,1,3,3', help='numeric label to save per instrument') + + args = parser.parse_args() + + return args + +def main(): + args = parse_args() + with open(args.data_config, 'r') as f: + data_config = yaml.load(f, Loader=yaml.FullLoader) + with open(args.model_config, 'r') as f: + model_config = yaml.load(f, Loader=yaml.FullLoader) + codes = args.codes.split(',') + codes = [int(c) for c in codes] + + label_dict = { + 'Liver': [[100,0,100]], + 'Kidney': [[255,255,0]], + 'Pancreas': [[0,0,255]], + 'Vessels': [[255,0,0]], + 'Adrenals': [[0,255,255]], + 'Gall Bladder': [[0,255,0]], + 'Bones': [[255,255,255]], + 'Spleen': [[255,0,255]] + } + + + #make folder to save visualizations + os.makedirs(os.path.join(args.save_path,"preds"),exist_ok=True) + os.makedirs(os.path.join(args.save_path,"rescaled_preds"),exist_ok=True) + os.makedirs(os.path.join(args.save_path,"rescaled_gt"),exist_ok=True) + + + #load model + #change the img size in model config according to data config + in_channels = model_config['in_channels'] + out_channels = model_config['num_classes'] + img_size = model_config['img_size'] + if model_config['arch']=='Prompt Adapted SAM': + model = Prompt_Adapted_SAM(model_config, label_dict, args.device, training_strategy='svdtuning') + elif model_config['arch']=='UNet': + model = UNet(in_channels=in_channels, out_channels=out_channels) + elif model_config['arch']=='UNext': + model = UNext(num_classes=out_channels, input_channels=in_channels, img_size=img_size) + elif model_config['arch']=='MedT': + #TODO + model = MedT(img_size=img_size, num_classes=out_channels) + elif model_config['arch']=='TransUNet': + config_vit = CONFIGS_ViT_seg['R50-ViT-B_16'] + config_vit.n_classes = out_channels + config_vit.n_skip = 3 + # if args.vit_name.find('R50') != -1: + # config_vit.patches.grid = (int(args.img_size / args.vit_patches_size), int(args.img_size / args.vit_patches_size)) + model = VisionTransformer(config_vit, img_size=img_size, num_classes=config_vit.n_classes) + + model.load_state_dict(torch.load(args.pretrained_path, map_location=args.device)) + model = model.to(args.device) + model = model.eval() + + #load data transform + data_transform = LiTS2_Transform(config=data_config) + + #dice + tumor_dices = [] + tumor_ious=[] + liver_dices = [] + liver_ious=[] + + + #load data + root_path = "/media/ubuntu/New Volume/jay/LiTS2/archive" + imgs_path = os.path.join(root_path, 'dataset_6/dataset_6') + test_csv = pd.read_csv(os.path.join(root_path, 'lits_test.csv')) + for i in range(len(test_csv)): + if i%10!=0: + continue + img_path = (os.path.join(root_path,'dataset_6',test_csv['filepath'].iloc[i][18:])) + image_name = test_csv['filepath'].iloc[i][28:] + liver_mask_path = os.path.join(root_path,'dataset_6',test_csv['liver_maskpath'].iloc[i][18:]) + tumor_mask_path = os.path.join(root_path,'dataset_6',test_csv['tumor_maskpath'].iloc[i][18:]) + + # print(img_path) + img = torch.as_tensor(np.array(Image.open(img_path).convert("RGB"))) + img = img.permute(2,0,1) + C,H,W = img.shape + #make a dummy mask of shape 1XHXW + + try: + liver_label = torch.Tensor(np.array(Image.open(liver_mask_path)))[:,:,0] + tumor_label = torch.Tensor(np.array(Image.open(tumor_mask_path)))[:,:,0] + except: + liver_label = torch.zeros(H, W) + tumor_label = torch.zeros(H, W) + # label = np.array(Image.open(gt_path).convert("RGB")) + # temp = np.zeros((H,W)).astype('uint8') + # selected_color_list = label_dict[args.labels_of_interest] + # for c in selected_color_list: + # temp = temp | (np.all(np.where(label==c,1,0),axis=2)) + + # # plt.imshow(gold) + # # plt.show() + # mask = torch.Tensor(temp).unsqueeze(0) + + liver_label = liver_label.unsqueeze(0) + liver_label = (liver_label>0)+0 + tumor_label = tumor_label.unsqueeze(0) + tumor_label = (tumor_label>0)+0 + + #convert all grayscale pixels due to resizing back to 0, 1 + img1, liver_label = data_transform(img, liver_label, is_train=False, apply_norm=True) + liver_label = (liver_label>=0.5)+0 + # liver_label = liver_label[0] + + #convert all grayscale pixels due to resizing back to 0, 1 + _, tumor_label = data_transform(img, tumor_label, is_train=False, apply_norm=True) + tumor_label = (tumor_label>=0.5)+0 + # tumor_label = tumor_label[0] + + #get image embeddings + img = img1.unsqueeze(0).to(args.device) #1XCXHXW + final_label = torch.cat([liver_label,tumor_label], dim=0) + masks,_ = model(img,'') + masks_liver = masks[:,0,:,:].cpu() + masks_tumor = masks[:,1,:,:].cpu() + + plt.imshow(((masks_liver>=0.5)[0]), cmap='gray') + plt.savefig(os.path.join(args.save_path,'rescaled_preds', image_name[:-4] +'_liver.png')) + plt.close() + # plt.show() + + plt.imshow(((masks_tumor>=0.5)[0]), cmap='gray') + plt.savefig(os.path.join(args.save_path,'rescaled_preds', image_name[:-4] +'_tumor.png')) + plt.close() + # plt.show() + + + plt.imshow((liver_label[0]), cmap='gray') + plt.savefig(os.path.join(args.save_path,'rescaled_gt', image_name[:-4] +'_liver.png')) + plt.close() + # plt.show() + + + plt.imshow((tumor_label[0]), cmap='gray') + plt.savefig(os.path.join(args.save_path,'rescaled_gt', image_name[:-4] +'_tumor.png')) + plt.close() + # plt.show() + + # print("dice: ",dice_coef(label, (masks>0.5)+0)) + # print(liver_label.shape) + # print((((masks[0]>=0.5)+0).unsqueeze(0)).shape) + liver_dices.append(dice_coef(liver_label, ((masks_liver[0]>=0.5)+0).unsqueeze(0))) + tumor_dices.append(dice_coef(tumor_label, ((masks_tumor[0]>=0.5)+0).unsqueeze(0))) + + liver_ious.append(iou_coef(liver_label, ((masks_liver[0]>=0.5)+0).unsqueeze(0))) + tumor_ious.append(iou_coef(tumor_label, ((masks_tumor[0]>=0.5)+0).unsqueeze(0))) + # 1/0 + # break + print("Liver DICE: ",torch.mean(torch.Tensor(liver_dices))) + print("Liver IoU", torch.mean(torch.Tensor(liver_ious))) + print("Tumor DICE", torch.mean(torch.Tensor(tumor_dices))) + print("Tumor IoU", torch.mean(torch.Tensor(tumor_ious))) + +if __name__ == '__main__': + main() + + + + + diff --git a/AllinonSAM/eval/lits2/model_baseline.yml b/AllinonSAM/eval/lits2/model_baseline.yml new file mode 100644 index 0000000000000000000000000000000000000000..251f3a0c7ac7f28763267f5c4e42538b271b31ec --- /dev/null +++ b/AllinonSAM/eval/lits2/model_baseline.yml @@ -0,0 +1,17 @@ + +img_size: 256 +num_classes: 2 +in_channels: 3 +img_type: 'image' +arch: "UNet" +use_fdn: False + +training: + optimizer: 'adamw' + lr: 1e-4 + batch_size: 16 + num_epochs: 500 + schedule_step: 2100 + schedule_step_factor: 0.5 + weight_decay: 1e-2 + loss: 'focal' \ No newline at end of file diff --git a/AllinonSAM/eval/lits2/model_svdtuning.yml b/AllinonSAM/eval/lits2/model_svdtuning.yml new file mode 100644 index 0000000000000000000000000000000000000000..9bc74eb46bc954e6cb17df6646cc978ac3585af5 --- /dev/null +++ b/AllinonSAM/eval/lits2/model_svdtuning.yml @@ -0,0 +1,31 @@ +sam: + img_size: 256 + num_classes: 2 + sam_type: "base" + +img_type: 'image' +arch: "Prompt Adapted SAM" +use_fdn: False +decoder_training: 'none' +mlp_transform: False + +prompts: + USE_TEXT_PROMPT: False + USE_IMAGE_PROMPT: False + USE_SLICE_NUM: False + LOCATION: 'prepend' + DROPOUT: 0 + NUM_TOKENS: 5 + +training: + optimizer: 'adamw' + lr: 1e-3 + batch_size: 32 + num_epochs: 1000 + schedule_step: 1000 + schedule_step_factor: 0.5 + weight_decay: 1e-2 + loss: 'focal' + reg_multiplier: 0 + +use_lora: True \ No newline at end of file diff --git a/AllinonSAM/eval/lits2/predictions_pointsam.py b/AllinonSAM/eval/lits2/predictions_pointsam.py new file mode 100644 index 0000000000000000000000000000000000000000..66c6702e82cebdff30813b3a5f5ba4e4ec2938e0 --- /dev/null +++ b/AllinonSAM/eval/lits2/predictions_pointsam.py @@ -0,0 +1,249 @@ +import torch +import yaml +import sys +import copy +import os +sys.path.append("/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/SVDSAM/") + +from data_utils import * +from model import * +from utils import * + +label_names = ['Liver', 'Tumor'] +# visualize_li = [[1,0,0],[0,1,0],[1,0,0], [0,0,1], [0,0,1]] +label_dict = {} +# visualize_dict = {} +for i,ln in enumerate(label_names): + label_dict[ln] = i + # visualize_dict[ln] = visualize_li[i] + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument('--data_config', default='config_tmp.yml', + help='data config file path') + + parser.add_argument('--model_config', default='model_baseline.yml', + help='model config file path') + + parser.add_argument('--pretrained_path', default=None, + help='pretrained model path') + + parser.add_argument('--save_path', default='checkpoints/temp.pth', + help='pretrained model path') + + parser.add_argument('--gt_path', default='', + help='ground truth path') + + parser.add_argument('--device', default='cuda:0', help='device to train on') + + parser.add_argument('--labels_of_interest', default='Left Prograsp Forceps,Maryland Bipolar Forceps,Right Prograsp Forceps,Left Large Needle Driver,Right Large Needle Driver', help='labels of interest') + + parser.add_argument('--codes', default='1,2,1,3,3', help='numeric label to save per instrument') + + args = parser.parse_args() + + return args + +def main(): + args = parse_args() + with open(args.data_config, 'r') as f: + data_config = yaml.load(f, Loader=yaml.FullLoader) + with open(args.model_config, 'r') as f: + model_config = yaml.load(f, Loader=yaml.FullLoader) + + + #make folder to save visualizations + os.makedirs(os.path.join(args.save_path,"preds"),exist_ok=True) + os.makedirs(os.path.join(args.save_path,"rescaled_preds"),exist_ok=True) + os.makedirs(os.path.join(args.save_path,"rescaled_gt"),exist_ok=True) + + #load model + model = Prompt_Adapted_SAM(config=model_config, label_text_dict=label_dict, device=args.device, training_strategy='svdtuning') + # model = Prompt_Adapted_SAM(config=model_config, label_text_dict=label_dict, device=args.device, training_strategy='lora') + + #legacy model support + if args.pretrained_path: + sdict = torch.load(args.pretrained_path, map_location=args.device) + # for key in list(sdict.keys()): + # if 'sam_encoder.neck' in key: + # if '0' in key: + # new_key = key.replace('0','conv1') + # if '1' in key: + # new_key = key.replace('1','ln1') + # if '2' in key: + # new_key = key.replace('2','conv2') + # if '3' in key: + # new_key = key.replace('3','ln2') + # sdict[new_key] = sdict[key] + # _ = sdict.pop(key) + # if 'mask_decoder' in key: + # if 'trainable' in key: + # _ = sdict.pop(key) + + model.load_state_dict(sdict,strict=True) + model = model.to(args.device) + model = model.eval() + + #load data transform + data_transform = LiTS2_Transform(config=data_config) + + #dice + tumor_dices = [] + tumor_ious=[] + liver_dices = [] + liver_ious=[] + + + #load data + root_path = "/media/ubuntu/New Volume/jay/LiTS2/archive" + imgs_path = os.path.join(root_path, 'dataset_6/dataset_6') + test_csv = pd.read_csv(os.path.join(root_path, 'lits_test.csv')) + for i in range(len(test_csv)): + # if i%10!=0: + # continue + img_path = (os.path.join(root_path,'dataset_6',test_csv['filepath'].iloc[i][18:])) + image_name = test_csv['filepath'].iloc[i][28:] + liver_mask_path = os.path.join(root_path,'dataset_6',test_csv['liver_maskpath'].iloc[i][18:]) + tumor_mask_path = os.path.join(root_path,'dataset_6',test_csv['tumor_maskpath'].iloc[i][18:]) + + # print(img_path) + img = torch.as_tensor(np.array(Image.open(img_path).convert("RGB"))) + img = img.permute(2,0,1) + C,H,W = img.shape + #make a dummy mask of shape 1XHXW + + try: + liver_label = torch.Tensor(np.array(Image.open(liver_mask_path)))[:,:,0] + tumor_label = torch.Tensor(np.array(Image.open(tumor_mask_path)))[:,:,0] + except: + liver_label = torch.zeros(H, W) + tumor_label = torch.zeros(H, W) + # label = np.array(Image.open(gt_path).convert("RGB")) + # temp = np.zeros((H,W)).astype('uint8') + # selected_color_list = label_dict[args.labels_of_interest] + # for c in selected_color_list: + # temp = temp | (np.all(np.where(label==c,1,0),axis=2)) + + # # plt.imshow(gold) + # # plt.show() + # mask = torch.Tensor(temp).unsqueeze(0) + + liver_label = liver_label.unsqueeze(0) + liver_label = (liver_label>0)+0 + tumor_label = tumor_label.unsqueeze(0) + tumor_label = (tumor_label>0)+0 + + #convert all grayscale pixels due to resizing back to 0, 1 + img1, liver_label = data_transform(img, liver_label, is_train=False, apply_norm=True) + liver_label = (liver_label>=0.5)+0 + # liver_label = liver_label[0] + + #get positive point prompts + _,y_liver,x_liver = torch.where(liver_label==1) + pos_prompts = torch.cat([x_liver.unsqueeze(1),y_liver.unsqueeze(1)],dim=1) + + #get negative point prompts + _,y_liver_neg,x_liver_neg = torch.where(liver_label==0) + neg_prompts = (torch.cat([x_liver_neg.unsqueeze(1),y_liver_neg.unsqueeze(1)],dim=1)) + + if len(y_liver)>0: + pos_point_idx = random.randint(0,y_liver.shape[0]-1) + neg_point_idx = random.randint(0,y_liver_neg.shape[0]-1) + points_liver = (pos_prompts[pos_point_idx].unsqueeze(0).unsqueeze(0).to(args.device), torch.Tensor([1]).unsqueeze(0).to(args.device)) + else: + neg_point_idx1 = random.randint(0,y_liver_neg.shape[0]-1) + neg_point_idx2 = random.randint(0,y_liver_neg.shape[0]-1) + points_liver = (neg_prompts[neg_point_idx1].unsqueeze(0).unsqueeze(0).to(args.device), torch.Tensor([-1]).unsqueeze(0).to(args.device)) + + #convert all grayscale pixels due to resizing back to 0, 1 + _, tumor_label = data_transform(img, tumor_label, is_train=False, apply_norm=True) + tumor_label = (tumor_label>=0.5)+0 + # tumor_label = tumor_label[0] + + #get positive point prompts + _,y_tumor,x_tumor = torch.where(tumor_label==1) + pos_prompts = torch.cat([x_tumor.unsqueeze(1),y_tumor.unsqueeze(1)],dim=1) + + #get negative point prompts + _,y_tumor_neg,x_tumor_neg = torch.where(tumor_label==0) + neg_prompts = (torch.cat([x_tumor_neg.unsqueeze(1),y_tumor_neg.unsqueeze(1)],dim=1)) + + if len(y_tumor)>0: + pos_point_idx = random.randint(0,y_tumor.shape[0]-1) + neg_point_idx = random.randint(0,y_tumor_neg.shape[0]-1) + # points_tumor = (torch.cat([pos_prompts[pos_point_idx].unsqueeze(0), neg_prompts[neg_point_idx].unsqueeze(0)],dim=0).unsqueeze(0).to(args.device), torch.Tensor([1,-1]).unsqueeze(0).to(args.device)) + points_tumor = (pos_prompts[pos_point_idx].unsqueeze(0).unsqueeze(0).to(args.device), torch.Tensor([1]).unsqueeze(0).to(args.device)) + + else: + neg_point_idx1 = random.randint(0,y_tumor_neg.shape[0]-1) + neg_point_idx2 = random.randint(0,y_tumor_neg.shape[0]-1) + # points_tumor = (torch.cat([neg_prompts[neg_point_idx1].unsqueeze(0), neg_prompts[neg_point_idx2].unsqueeze(0)],dim=0).unsqueeze(0).to(args.device), torch.Tensor([-1,-1]).unsqueeze(0).to(args.device)) + points_tumor = (neg_prompts[neg_point_idx1].unsqueeze(0).unsqueeze(0).to(args.device), torch.Tensor([-1]).unsqueeze(0).to(args.device)) + + + + #get image embeddings + img = img1.unsqueeze(0).to(args.device) #1XCXHXW + img_embeds = model.get_image_embeddings(img) + + # generate masks for all labels of interest + img_embeds_repeated = img_embeds.repeat(1,1,1,1) + masks_liver = model.get_masks_with_manual_prompts(img_embeds_repeated, points=points_liver).cpu() + masks_tumor = model.get_masks_with_manual_prompts(img_embeds_repeated, points=points_tumor).cpu() + + plt.imshow((masks_liver[0]>=0.5), cmap='gray') + if len(y_liver)>0: + plt.scatter(x_liver[pos_point_idx], y_liver[pos_point_idx], c='green') + # plt.scatter(x_neg[neg_point_idx], y_neg[neg_point_idx], c='red') + else: + plt.scatter(x_liver_neg[neg_point_idx1], y_liver_neg[neg_point_idx1], c='red') + # plt.scatter(x_neg[neg_point_idx2], y_neg[neg_point_idx2], c='red') + plt.savefig(os.path.join(args.save_path,'rescaled_preds', image_name[:-4] +'_liver.png')) + plt.close() + # plt.show() + + plt.imshow((masks_tumor[0]>=0.5), cmap='gray') + if len(y_tumor)>0: + plt.scatter(x_tumor[pos_point_idx], y_tumor[pos_point_idx], c='green') + # plt.scatter(x_neg[neg_point_idx], y_neg[neg_point_idx], c='red') + else: + plt.scatter(x_tumor_neg[neg_point_idx1], y_tumor_neg[neg_point_idx1], c='red') + # plt.scatter(x_neg[neg_point_idx2], y_neg[neg_point_idx2], c='red') + plt.savefig(os.path.join(args.save_path,'rescaled_preds', image_name[:-4] +'_tumor.png')) + plt.close() + # plt.show() + + + plt.imshow((liver_label[0]), cmap='gray') + plt.savefig(os.path.join(args.save_path,'rescaled_gt', image_name[:-4] +'_liver.png')) + plt.close() + # plt.show() + + + plt.imshow((tumor_label[0]), cmap='gray') + plt.savefig(os.path.join(args.save_path,'rescaled_gt', image_name[:-4] +'_tumor.png')) + plt.close() + # plt.show() + + # print("dice: ",dice_coef(label, (masks>0.5)+0)) + # print(liver_label.shape) + # print((((masks[0]>=0.5)+0).unsqueeze(0)).shape) + liver_dices.append(dice_coef(liver_label, ((masks_liver[0]>=0.5)+0).unsqueeze(0))) + tumor_dices.append(dice_coef(tumor_label, ((masks_tumor[0]>=0.5)+0).unsqueeze(0))) + + liver_ious.append(iou_coef(liver_label, ((masks_liver[0]>=0.5)+0).unsqueeze(0))) + tumor_ious.append(iou_coef(tumor_label, ((masks_tumor[0]>=0.5)+0).unsqueeze(0))) + # 1/0 + # break + print("Liver DICE: ",torch.mean(torch.Tensor(liver_dices))) + print("Liver IoU", torch.mean(torch.Tensor(liver_ious))) + print("Tumor DICE", torch.mean(torch.Tensor(tumor_dices))) + print("Tumor IoU", torch.mean(torch.Tensor(tumor_ious))) +if __name__ == '__main__': + main() + + + + + diff --git a/AllinonSAM/eval/polyp/config_polyp.yml b/AllinonSAM/eval/polyp/config_polyp.yml new file mode 100644 index 0000000000000000000000000000000000000000..436314ef34e627c863638693f42f8b764d8ffb90 --- /dev/null +++ b/AllinonSAM/eval/polyp/config_polyp.yml @@ -0,0 +1,19 @@ +data_transforms: + a_min: 0 + a_max: 255 + img_size: 1024 + use_random_crop: False + use_rotation: False + rotation_angle: 10 + use_saturation: False + saturation: 2 + use_brightness: False + brightness: 2 + use_horizontal_flip: False +data: + name: Polyp + root_path: '/media/ubuntu/New Volume/jay/Polyp2' + label_list: [1] + label_names: ['Polyp'] + volume_channel: 2 + negative_to_positive_ratio: -1 diff --git a/AllinonSAM/eval/polyp/generate_all_results.sh b/AllinonSAM/eval/polyp/generate_all_results.sh new file mode 100644 index 0000000000000000000000000000000000000000..6e677e66c8926c652cfcda1bd5439f57fd90725d --- /dev/null +++ b/AllinonSAM/eval/polyp/generate_all_results.sh @@ -0,0 +1,13 @@ +declare -a StringArray=("CVC-300" "CVC-ClinicDB" "CVC-ColonDB" "ETIS-LaribPolypDB" "Kvasir") +# declare -a StringArray=("Kvasir") +# echo "Training Accuracy: " +# python generate_predictions.py --data_folder "/media/ubuntu/New Volume/jay/PolypDataset/kvasirsegTRimage" --data_config config_polyp.yml --model_config model_svdtuning.yml --save_path "./svdsam_polyp_1024/${dataset}" --gt_path "/media/ubuntu/New Volume/jay/PolypDataset/kvasirsegTRmask" --labels_of_interest "Polyp" --pretrained_path "svdsam_polyp_1024.pth" + +echo "Testing Accuracy: " +for dataset in "${StringArray[@]}"; do + echo "${dataset}" + + python generate_predictions.py --data_folder "/media/ubuntu/New Volume/jay/Polyp2/TestDataset/${dataset}/images" --data_config config_polyp.yml --model_config model_svdtuning.yml --save_path "./svdsam_polyp_1024/${dataset}" --gt_path "/media/ubuntu/New Volume/jay/Polyp2/TestDataset/${dataset}/masks" --labels_of_interest "Polyp" --pretrained_path "svdsam_polyp_1024.pth" + + echo "......................." +done \ No newline at end of file diff --git a/AllinonSAM/eval/polyp/generate_predictions.py b/AllinonSAM/eval/polyp/generate_predictions.py new file mode 100644 index 0000000000000000000000000000000000000000..a5dc32dd7bf1fa767bfca585ff63daad1f8ece45 --- /dev/null +++ b/AllinonSAM/eval/polyp/generate_predictions.py @@ -0,0 +1,158 @@ +import torch +import yaml +import sys +import copy +import os +sys.path.append("/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/SVDSAM/") + +from data_utils import * +from model import * +from utils import * +from data_transforms.polyp_transform import Polyp_Transform + +label_names = ['Polyp'] +label_dict = {} +# visualize_dict = {} +for i,ln in enumerate(label_names): + label_dict[ln] = i + # visualize_dict[ln] = visualize_li[i] + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument('--data_folder', default='config_tmp.yml', + help='data folder file path') + + parser.add_argument('--data_config', default='config_tmp.yml', + help='data config file path') + + parser.add_argument('--model_config', default='model_baseline.yml', + help='model config file path') + + parser.add_argument('--pretrained_path', default=None, + help='pretrained model path') + + parser.add_argument('--save_path', default='checkpoints/temp.pth', + help='pretrained model path') + + parser.add_argument('--gt_path', default='', + help='ground truth path') + + parser.add_argument('--device', default='cuda:0', help='device to train on') + + parser.add_argument('--labels_of_interest', default='Left Prograsp Forceps,Maryland Bipolar Forceps,Right Prograsp Forceps,Left Large Needle Driver,Right Large Needle Driver', help='labels of interest') + + parser.add_argument('--codes', default='1,2,1,3,3', help='numeric label to save per instrument') + + args = parser.parse_args() + + return args + +def main(): + args = parse_args() + with open(args.data_config, 'r') as f: + data_config = yaml.load(f, Loader=yaml.FullLoader) + with open(args.model_config, 'r') as f: + model_config = yaml.load(f, Loader=yaml.FullLoader) + labels_of_interest = args.labels_of_interest.split(',') + + #make folder to save visualizations + os.makedirs(os.path.join(args.save_path,"preds"),exist_ok=True) + os.makedirs(os.path.join(args.save_path,"rescaled_preds"),exist_ok=True) + if args.gt_path: + os.makedirs(os.path.join(args.save_path,"rescaled_gt"),exist_ok=True) + + #load model + model = Prompt_Adapted_SAM(config=model_config, label_text_dict=label_dict, device=args.device, training_strategy='svdtuning') + # model = Prompt_Adapted_SAM(config=model_config, label_text_dict=label_dict, device=args.device, training_strategy='lora') + + #legacy model support + sdict = torch.load(args.pretrained_path, map_location=args.device) + # for key in list(sdict.keys()): + # if 'sam_encoder.neck' in key: + # if '0' in key: + # new_key = key.replace('0','conv1') + # if '1' in key: + # new_key = key.replace('1','ln1') + # if '2' in key: + # new_key = key.replace('2','conv2') + # if '3' in key: + # new_key = key.replace('3','ln2') + # sdict[new_key] = sdict[key] + # _ = sdict.pop(key) + # if 'mask_decoder' in key: + # if 'trainable' in key: + # _ = sdict.pop(key) + + model.load_state_dict(sdict,strict=True) + model = model.to(args.device) + model = model.eval() + + #load data transform + data_transform = Polyp_Transform(config=data_config) + + #dice + dices = [] + ious=[] + + #load data + for i,img_name in enumerate(sorted(os.listdir(args.data_folder))): + if (('png' not in img_name) and ('jpg' not in img_name) and ('jpeg' not in img_name)): + continue + # if i%5!=0: + # continue + img_path = (os.path.join(args.data_folder,img_name)) + if args.gt_path: + gt_path = (os.path.join(args.gt_path,img_name[:-4]+'.png')) + + # print(img_path) + img = torch.as_tensor(np.array(Image.open(img_path).convert("RGB"))) + img = img.permute(2,0,1) + C,H,W = img.shape + #make a dummy mask of shape 1XHXW + if args.gt_path: + label = torch.Tensor(np.array(Image.open(gt_path))) + if len(label.shape)==3: + label = label[:,:,0] + label = label.unsqueeze(0) + mask = (label>0)+0 + # plt.imshow(gold) + # plt.show() + + else: + mask = torch.zeros((1,H,W)) + img, mask = data_transform(img, mask, is_train=False, apply_norm=True) + mask = (mask>=0.5)+0 + + #get image embeddings + img = img.unsqueeze(0).to(args.device) #1XCXHXW + img_embeds = model.get_image_embeddings(img) + + # generate masks for all labels of interest + img_embeds_repeated = img_embeds.repeat(len(labels_of_interest),1,1,1) + x_text = [t for t in labels_of_interest] + masks = model.get_masks_for_multiple_labels(img_embeds_repeated, x_text).cpu() + + plt.imshow((masks[0]>=0.5), cmap='gray') + plt.savefig(os.path.join(args.save_path,'rescaled_preds', img_name)) + plt.close() + + if args.gt_path: + plt.imshow((mask[0]), cmap='gray') + plt.savefig(os.path.join(args.save_path,'rescaled_gt', img_name)) + plt.close() + + # print("dice: ",dice_coef(label, (masks>0.5)+0)) + dices.append(dice_coef(mask, (masks>=0.5)+0)) + ious.append(iou_coef(mask, (masks>=0.5)+0)) + # break + print(torch.mean(torch.Tensor(dices))) + print(torch.mean(torch.Tensor(ious))) + +if __name__ == '__main__': + main() + + + + + diff --git a/AllinonSAM/eval/polyp/model_svdtuning.yml b/AllinonSAM/eval/polyp/model_svdtuning.yml new file mode 100644 index 0000000000000000000000000000000000000000..aa018b475a0e77c4560c3f4a04df6f05d2a51117 --- /dev/null +++ b/AllinonSAM/eval/polyp/model_svdtuning.yml @@ -0,0 +1,31 @@ +sam: + img_size: 1024 + num_classes: 1 + sam_type: 'base' + +img_type: 'image' +arch: "Prompt Adapted SAM" +use_fdn: False +decoder_training: 'none' +mlp_transform: False + +prompts: + USE_TEXT_PROMPT: True + USE_IMAGE_PROMPT: False + USE_SLICE_NUM: False + LOCATION: 'prepend' + DROPOUT: 0 + NUM_TOKENS: 5 + +training: + optimizer: 'adamw' + lr: 1e-3 + batch_size: 32 + num_epochs: 1000 + schedule_step: 50 + schedule_step_factor: 0.5 + weight_decay: 1e-2 + loss: 'focal' + reg_multiplier: 0 + +use_lora: False \ No newline at end of file diff --git a/AllinonSAM/eval/post_evaluations.ipynb b/AllinonSAM/eval/post_evaluations.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..5c16afbebfc42816ddfe5719ce7eb5ef15bd02a6 --- /dev/null +++ b/AllinonSAM/eval/post_evaluations.ipynb @@ -0,0 +1,761 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/ubuntu/anaconda3/envs/dassl/lib/python3.8/site-packages/tqdm/auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + " from .autonotebook import tqdm as notebook_tqdm\n" + ] + } + ], + "source": [ + "from scipy import stats\n", + "import numpy as np\n", + "import os\n", + "import matplotlib.pyplot as plt\n", + "import monai\n", + "import torch" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "def compute_dices(preds, gt):\n", + " intersection = np.sum(gt * preds,axis=(-1,-2))\n", + " union = np.sum(gt, axis=(-1,-2)) + np.sum(preds, axis=(-1,-2))\n", + " dice = ((2. * intersection + 1)/(union + 1))\n", + " return dice\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "def t_test(dices_a, dices_b):\n", + " differences = dices_a - dices_b\n", + " t_statistic, p_value = stats.ttest_rel(dices_a, dices_b)\n", + " print(p_value)\n", + " alpha = 0.05\n", + " if p_value < alpha:\n", + " print(\"There is significant difference\")\n", + " return p_value " + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "def t_test_folder(pred_folder1, pred_folder2):\n", + " dices_a = []\n", + " dices_b = []\n", + " try:\n", + " for c in os.listdir(pred_folder1):\n", + " for i in os.listdir(os.path.join(pred_folder1,c,'rescaled_preds')):\n", + " p1 = plt.imread(os.path.join(pred_folder1, c, 'rescaled_preds', i))[80:400,150:500,0]\n", + " # plt.imshow(p1, cmap='gray')\n", + " # plt.show()\n", + " p2 = plt.imread(os.path.join(pred_folder2, c, 'rescaled_preds', i))[80:400,150:500,0]\n", + " gt = plt.imread(os.path.join(pred_folder2, c, 'rescaled_gt', i))[80:400,150:500,0]\n", + " # print(p1.shape)\n", + " # 1/0\n", + " dice_a = compute_dices(p1, gt)\n", + " dice_b = compute_dices(p2, gt)\n", + " dices_a.append(dice_a)\n", + " dices_b.append(dice_b)\n", + " except:\n", + " 1/0\n", + " return dices_a, dices_b" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "def t_test_lits2(pred_folder1, pred_folder2):\n", + " dices_a = []\n", + " dices_b = []\n", + " try:\n", + " for i in os.listdir(os.path.join(pred_folder1,'rescaled_preds')):\n", + " p1 = plt.imread(os.path.join(pred_folder1, 'rescaled_preds', i))[80:400,150:500,0]\n", + " # plt.imshow(p1, cmap='gray')\n", + " # plt.show()\n", + " p2 = plt.imread(os.path.join(pred_folder2, 'rescaled_preds', i))[80:400,150:500,0]\n", + " gt = plt.imread(os.path.join(pred_folder2, 'rescaled_gt', i))[80:400,150:500,0]\n", + " # print(p1.shape)\n", + " # 1/0\n", + " dice_a = compute_dices(p1, gt)\n", + " dice_b = compute_dices(p2, gt)\n", + " dices_a.append(dice_a)\n", + " dices_b.append(dice_b)\n", + " except:\n", + " 1/0\n", + " return dices_a, dices_b" + ] + }, + { + "cell_type": "code", + "execution_count": 40, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "4.5704910462821235e-26\n", + "There is significant difference\n", + "4.5704910462821235e-26\n" + ] + } + ], + "source": [ + "#CholecSeg8k\n", + "folder1 = './cholec8k/svd_shiftscale_cholec_tal_focal075_1e-4'\n", + "folder2 = './cholec8k/lora_cholec_tmp/'\n", + "dices_a, dices_b = t_test_folder(folder1, folder2)\n", + "print(t_test(np.array(dices_a), np.array(dices_b)))" + ] + }, + { + "cell_type": "code", + "execution_count": 42, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2.117079926030917e-06\n", + "There is significant difference\n", + "2.117079926030917e-06\n" + ] + } + ], + "source": [ + "#Ultrasound\n", + "folder1 = '/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/SVDSAM'\n", + "folder2 = '/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/Lora_results'\n", + "dices_a, dices_b = t_test_folder(folder1, folder2)\n", + "print(t_test(np.array(dices_a), np.array(dices_b)))" + ] + }, + { + "cell_type": "code", + "execution_count": 43, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "5.985398380396225e-14\n", + "There is significant difference\n", + "5.985398380396225e-14\n" + ] + } + ], + "source": [ + "#ChestXDet\n", + "folder1 = './chestXDet/SVDSAM'\n", + "folder2 = './chestXDet/lora_chestxdet_tal_focal075_1e-3'\n", + "dices_a, dices_b = t_test_folder(folder1, folder2)\n", + "print(t_test(np.array(dices_a), np.array(dices_b)))" + ] + }, + { + "cell_type": "code", + "execution_count": 46, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "0.00015471901272947407\n", + "There is significant difference\n", + "0.00015471901272947407\n" + ] + } + ], + "source": [ + "#LITS2\n", + "folder1 = './lits2/svdsam'\n", + "folder2 = './lits2/lora_lits2/'\n", + "dices_a, dices_b = t_test_lits2(folder1, folder2)\n", + "print(t_test(np.array(dices_a), np.array(dices_b)))" + ] + }, + { + "cell_type": "code", + "execution_count": 47, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2.5465986918502266e-23\n", + "There is significant difference\n", + "2.5465986918502266e-23\n" + ] + } + ], + "source": [ + "#GLAS\n", + "folder1 = './glas/svdshiftscale_glas_tal_CE_1e-4'\n", + "folder2 = './glas/lorasam'\n", + "dices_a, dices_b = t_test_lits2(folder1, folder2)\n", + "print(t_test(np.array(dices_a), np.array(dices_b)))" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "def compute_surface_dice(preds, gt, threshold=[0]):\n", + " preds = torch.Tensor(preds).unsqueeze(0).unsqueeze(0)\n", + " gt = torch.Tensor(gt).unsqueeze(0).unsqueeze(0)\n", + " cd = monai.metrics.compute_surface_dice(preds, gt, threshold, include_background=True)\n", + " return cd.item()\n", + "\n", + "def compute_hd95(preds, gt):\n", + " preds = torch.Tensor(preds).unsqueeze(0).unsqueeze(0)\n", + " gt = torch.Tensor(gt).unsqueeze(0).unsqueeze(0)\n", + " cd = monai.metrics.compute_hausdorff_distance(preds, gt, include_background=True, percentile=95)\n", + " return cd.item()" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "def surface_dice_lits(pred_folder1, pred_folder2):\n", + " dices_a = []\n", + " dices_b = []\n", + " try:\n", + " for i in os.listdir(os.path.join(pred_folder1,'rescaled_preds')):\n", + " p1 = plt.imread(os.path.join(pred_folder1, 'rescaled_preds', i))[80:400,150:500,0]\n", + " # plt.imshow(p1, cmap='gray')\n", + " # plt.show()\n", + " p2 = plt.imread(os.path.join(pred_folder2, 'rescaled_preds', i))[80:400,150:500,0]\n", + " gt = plt.imread(os.path.join(pred_folder2, 'rescaled_gt', i))[80:400,150:500,0]\n", + " # print(p1.shape)\n", + " # 1/0\n", + " dice_a = compute_surface_dice(p1, gt)\n", + " dice_b = compute_surface_dice(p2, gt)\n", + " dices_a.append(dice_a)\n", + " dices_b.append(dice_b)\n", + " except:\n", + " 1/0\n", + " return np.mean(dices_a), np.mean(dices_b)\n", + "\n", + "def hd95_lits(pred_folder1, pred_folder2):\n", + " dices_a = []\n", + " dices_b = []\n", + " try:\n", + " for i in os.listdir(os.path.join(pred_folder1,'rescaled_preds')):\n", + " p1 = plt.imread(os.path.join(pred_folder1, 'rescaled_preds', i))[80:400,150:500,0]\n", + " # plt.imshow(p1, cmap='gray')\n", + " # plt.show()\n", + " p2 = plt.imread(os.path.join(pred_folder2, 'rescaled_preds', i))[80:400,150:500,0]\n", + " gt = plt.imread(os.path.join(pred_folder2, 'rescaled_gt', i))[80:400,150:500,0]\n", + " # print(p1.shape)\n", + " # 1/0\n", + " dice_a = compute_hd95(p1, gt)\n", + " dice_b = compute_hd95(p2, gt)\n", + " dices_a.append(dice_a)\n", + " dices_b.append(dice_b)\n", + " except:\n", + " 1/0\n", + " return np.mean(dices_a), np.mean(dices_b)\n", + "\n", + "\n", + "def surface_dice(pred_folder1, pred_folder2):\n", + " dices_a = []\n", + " dices_b = []\n", + " try:\n", + " for c in os.listdir(pred_folder1):\n", + " for i in os.listdir(os.path.join(pred_folder1,c,'rescaled_preds')):\n", + " p1 = plt.imread(os.path.join(pred_folder1, c, 'rescaled_preds', i))[80:400,150:500,0]\n", + " # plt.imshow(p1, cmap='gray')\n", + " # plt.show()\n", + " p2 = plt.imread(os.path.join(pred_folder2, c, 'rescaled_preds', i))[80:400,150:500,0]\n", + " gt = plt.imread(os.path.join(pred_folder2, c, 'rescaled_gt', i))[80:400,150:500,0]\n", + " if not gt.any():\n", + " continue\n", + " # print(p1.shape)\n", + " # 1/0\n", + " dice_a = compute_surface_dice(p1, gt)\n", + " dice_b = compute_surface_dice(p2, gt)\n", + " if not np.isnan(dice_a):\n", + " dices_a.append(dice_a)\n", + " if not np.isnan(dice_b):\n", + " dices_b.append(dice_b)\n", + " except:\n", + " 1/0\n", + " return np.mean(dices_a), np.mean(dices_b)" + ] + }, + { + "cell_type": "code", + "execution_count": 77, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Surface distance of S-SAM: 0.1306660174857825\n", + "Surface distance of LoRA: 0.19686604533344507\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/ubuntu/anaconda3/envs/dassl/lib/python3.8/site-packages/monai/metrics/utils.py:338: UserWarning: the prediction of class 0 is all 0, this may result in nan/inf distance.\n", + " warnings.warn(\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Surface distance of adaptivesam: 0.21469646031036974\n", + "Surface distance of sam point zs: 0.026642101479228585\n" + ] + } + ], + "source": [ + "#GLAS\n", + "folder1 = './glas/svdshiftscale_glas_tal_CE_1e-4'\n", + "folder2 = './glas/lorasam'\n", + "folder3 = './glas/adaptivesam'\n", + "folder4 = './glas/sam_point_glas'\n", + "dices_a, dices_b = surface_dice_lits(folder1, folder2)\n", + "print('Surface distance of S-SAM: ', dices_a)\n", + "print('Surface distance of LoRA: ', dices_b)\n", + "dices_c, dices_d = surface_dice_lits(folder3, folder4)\n", + "print('Surface distance of adaptivesam: ', dices_c)\n", + "print('Surface distance of sam point zs: ', dices_d)" + ] + }, + { + "cell_type": "code", + "execution_count": 71, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "HD95 of S-SAM: 49.37816700935364\n", + "HD95 of LoRA: 44.13817956447601\n" + ] + } + ], + "source": [ + "#GLAS\n", + "folder1 = './glas/svdshiftscale_glas_tal_CE_1e-4'\n", + "folder2 = './glas/lorasam'\n", + "dices_a, dices_b = hd95_lits(folder1, folder2)\n", + "print('HD95 of S-SAM: ', dices_a)\n", + "print('HD95 of LoRA: ', dices_b)" + ] + }, + { + "cell_type": "code", + "execution_count": 75, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/ubuntu/anaconda3/envs/dassl/lib/python3.8/site-packages/monai/metrics/utils.py:333: UserWarning: the ground truth of class 0 is all 0, this may result in nan/inf distance.\n", + " warnings.warn(\n", + "/home/ubuntu/anaconda3/envs/dassl/lib/python3.8/site-packages/monai/metrics/utils.py:338: UserWarning: the prediction of class 0 is all 0, this may result in nan/inf distance.\n", + " warnings.warn(\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Surface distance of S-SAM: 0.1387163305927508\n", + "Surface distance of LoRA: 0.11434185548909355\n" + ] + } + ], + "source": [ + "#CholecSeg8k\n", + "folder1 = './cholec8k/svd_shiftscale_cholec_tal_focal075_1e-4'\n", + "folder2 = './cholec8k/lora_cholec_tmp/'\n", + "dices_a, dices_b = surface_dice(folder1, folder2)\n", + "print('Surface distance of S-SAM: ', dices_a)\n", + "print('Surface distance of LoRA: ', dices_b)" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "def hd95(folder, trim=True):\n", + " hd_a = []\n", + " for i in os.listdir(os.path.join(folder,'rescaled_preds')):\n", + " if trim:\n", + " p1 = plt.imread(os.path.join(folder, 'rescaled_preds', i))[80:400,150:500,0]\n", + " gt = plt.imread(os.path.join(folder, 'rescaled_gt', i))[80:400,150:500,0]\n", + " else:\n", + " p1 = plt.imread(os.path.join(folder, 'rescaled_preds', i))\n", + " gt = plt.imread(os.path.join(folder, 'rescaled_gt', i))\n", + " hd = compute_hd95(p1, gt)\n", + " if np.isnan(hd):\n", + " continue\n", + " hd_a.append(hd)\n", + "\n", + " return np.mean(hd_a)" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/ubuntu/anaconda3/envs/dassl/lib/python3.8/site-packages/monai/metrics/utils.py:333: UserWarning: the ground truth of class 0 is all 0, this may result in nan/inf distance.\n", + " warnings.warn(\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "HD95 of ISIC lora point: 30.952387651606422\n" + ] + } + ], + "source": [ + "folder1 = './isic2018/point_lora'\n", + "hds_a = hd95(folder1)\n", + "print('HD95 of ISIC lora point: ', hds_a)" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "HD95 of ISIC VP: 70.9658404827118\n" + ] + } + ], + "source": [ + "folder2 = '/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/Blackbox/eval/isic/vp'\n", + "hds_b = hd95(folder2, trim=False)\n", + "print('HD95 of ISIC VP: ', hds_b)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "HD95 of Kvasir VP: 99.31089088042577\n" + ] + } + ], + "source": [ + "folder3 = '/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/Blackbox/eval/Kvasir-Seg/vp'\n", + "hds_b = hd95(folder3, trim=False)\n", + "print('HD95 of Kvasir VP: ', hds_b)" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "HD95 of refuge VP: 201.66749757289887\n" + ] + } + ], + "source": [ + "folder3 = '/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/Blackbox/eval/refuge/vp'\n", + "hds_b = hd95(folder3, trim=False)\n", + "print('HD95 of refuge VP: ', hds_b)" + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "HD95 of ev17 VP: 83.76758135225008\n" + ] + } + ], + "source": [ + "folder3 = '/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/Blackbox/eval/endovis17/vp'\n", + "hds_b = hd95(folder3, trim=False)\n", + "print('HD95 of ev17 VP: ', hds_b)" + ] + }, + { + "cell_type": "code", + "execution_count": 34, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "HD95 of ISIC SAMZS: 85.98172161102295\n", + "HD95 of Kvasir SAMZS: 99.28112110296885\n", + "HD95 of refuge SAMZS: 214.98097531795503\n", + "HD95 of ev17 SAMZS: 89.458785050192\n" + ] + } + ], + "source": [ + "folder2 = '/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/Blackbox/eval/isic/sam_zs'\n", + "hds_b = hd95(folder2, trim=False)\n", + "print('HD95 of ISIC SAMZS: ', hds_b)\n", + "\n", + "folder3 = '/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/Blackbox/eval/Kvasir-Seg/sam_zs'\n", + "hds_b = hd95(folder3, trim=False)\n", + "print('HD95 of Kvasir SAMZS: ', hds_b)\n", + "\n", + "folder3 = '/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/Blackbox/eval/refuge/sam_zs'\n", + "hds_b = hd95(folder3, trim=False)\n", + "print('HD95 of refuge SAMZS: ', hds_b)\n", + "\n", + "folder3 = '/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/Blackbox/eval/endovis17/samzs'\n", + "hds_b = hd95(folder3, trim=False)\n", + "print('HD95 of ev17 SAMZS: ', hds_b)" + ] + }, + { + "cell_type": "code", + "execution_count": 36, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "HD95 of ISIC blackbox: 70.33514058589935\n", + "HD95 of Kvasir blackbox: 83.55525796731312\n", + "HD95 of refuge blackbox: 176.66371806144716\n", + "HD95 of ev17 blackbox: 81.5668385080266\n" + ] + } + ], + "source": [ + "folder2 = '/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/Blackbox/eval/isic/model1_besttr'\n", + "hds_b = hd95(folder2, trim=False)\n", + "print('HD95 of ISIC blackbox: ', hds_b)\n", + "\n", + "folder3 = '/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/Blackbox/eval/Kvasir-Seg/model26'\n", + "hds_b = hd95(folder3, trim=False)\n", + "print('HD95 of Kvasir blackbox: ', hds_b)\n", + "\n", + "folder3 = '/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/Blackbox/eval/refuge/model1'\n", + "hds_b = hd95(folder3, trim=False)\n", + "print('HD95 of refuge blackbox: ', hds_b)\n", + "\n", + "folder3 = '/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/Blackbox/eval/endovis17/model2_tr'\n", + "hds_b = hd95(folder3, trim=False)\n", + "print('HD95 of ev17 blackbox: ', hds_b)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "HD95 of ev17 blackbox: 85.75525301522042\n" + ] + } + ], + "source": [ + "#BlackVIP\n", + "# folder2 = '/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/Blackbox/eval/isic/model2_bestval'\n", + "# hds_b = hd95(folder2, trim=False)\n", + "# print('HD95 of ISIC blackbox: ', hds_b)\n", + "\n", + "# folder3 = '/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/Blackbox/eval/Kvasir-Seg/model23'\n", + "# hds_b = hd95(folder3, trim=False)\n", + "# print('HD95 of Kvasir blackbox: ', hds_b)\n", + "\n", + "# folder3 = '/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/Blackbox/eval/refuge/model1_tr2'\n", + "# hds_b = hd95(folder3, trim=False)\n", + "# print('HD95 of refuge blackbox: ', hds_b)\n", + "\n", + "folder3 = '/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/Blackbox/eval/endovis17/model2_val'\n", + "hds_b = hd95(folder3, trim=False)\n", + "print('HD95 of ev17 blackbox: ', hds_b)" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "ename": "FileNotFoundError", + "evalue": "[Errno 2] No such file or directory: '/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/Blackbox/eval/isic/model1_medsam_tr/rescaled_preds'", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mFileNotFoundError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[10], line 3\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;66;03m#BlackMedSAM\u001b[39;00m\n\u001b[1;32m 2\u001b[0m folder2 \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124m/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/Blackbox/eval/isic/model1_medsam_tr\u001b[39m\u001b[38;5;124m'\u001b[39m\n\u001b[0;32m----> 3\u001b[0m hds_b \u001b[38;5;241m=\u001b[39m \u001b[43mhd95\u001b[49m\u001b[43m(\u001b[49m\u001b[43mfolder2\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtrim\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m)\u001b[49m\n\u001b[1;32m 4\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mHD95 of ISIC model2: \u001b[39m\u001b[38;5;124m'\u001b[39m, hds_b)\n\u001b[1;32m 6\u001b[0m folder2 \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124m/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/Blackbox/eval/isic/model1_medsam_tr\u001b[39m\u001b[38;5;124m'\u001b[39m\n", + "Cell \u001b[0;32mIn[2], line 3\u001b[0m, in \u001b[0;36mhd95\u001b[0;34m(folder, trim)\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mhd95\u001b[39m(folder, trim\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m):\n\u001b[1;32m 2\u001b[0m hd_a \u001b[38;5;241m=\u001b[39m []\n\u001b[0;32m----> 3\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m i \u001b[38;5;129;01min\u001b[39;00m \u001b[43mos\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mlistdir\u001b[49m\u001b[43m(\u001b[49m\u001b[43mos\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpath\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mjoin\u001b[49m\u001b[43m(\u001b[49m\u001b[43mfolder\u001b[49m\u001b[43m,\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mrescaled_preds\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m:\n\u001b[1;32m 4\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m trim:\n\u001b[1;32m 5\u001b[0m p1 \u001b[38;5;241m=\u001b[39m plt\u001b[38;5;241m.\u001b[39mimread(os\u001b[38;5;241m.\u001b[39mpath\u001b[38;5;241m.\u001b[39mjoin(folder, \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mrescaled_preds\u001b[39m\u001b[38;5;124m'\u001b[39m, i))[\u001b[38;5;241m80\u001b[39m:\u001b[38;5;241m400\u001b[39m,\u001b[38;5;241m150\u001b[39m:\u001b[38;5;241m500\u001b[39m,\u001b[38;5;241m0\u001b[39m]\n", + "\u001b[0;31mFileNotFoundError\u001b[0m: [Errno 2] No such file or directory: '/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/Blackbox/eval/isic/model1_medsam_tr/rescaled_preds'" + ] + } + ], + "source": [ + "#BlackMedSAM\n", + "folder2 = '/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/Blackbox/eval/isic/model1_medsam_tr'\n", + "hds_b = hd95(folder2, trim=False)\n", + "print('HD95 of ISIC model2: ', hds_b)\n", + "\n", + "folder2 = '/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/Blackbox/eval/isic/model1_medsam_tr'\n", + "hds_b = hd95(folder2, trim=False)\n", + "print('HD95 of ISIC model1: ', hds_b)\n", + "\n", + "folder2 = '/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/Blackbox/eval/isic/vp_medsam'\n", + "hds_b = hd95(folder2, trim=False)\n", + "print('HD95 of ISIC vp: ', hds_b)\n", + "\n", + "folder2 = '/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/Blackbox/eval/isic/medsam_zs'\n", + "hds_b = hd95(folder2, trim=False)\n", + "print('HD95 of ISIC medsam-zs: ', hds_b)\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "HD95 of Kvasir blackbox medsam model1: 97.05688610394796\n", + "HD95 of Kvasir blackbox medsam model2: 80.01432559490203\n", + "HD95 of Kvasir blackbox: 90.36405935287476\n", + "HD95 of Kvasir blackbox medsam zs: 98.39987009366354\n" + ] + } + ], + "source": [ + "folder3 = '/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/Blackbox/eval/Kvasir-Seg/model1_medsam_besttr'\n", + "hds_b = hd95(folder3, trim=False)\n", + "print('HD95 of Kvasir blackbox medsam model1: ', hds_b)\n", + "\n", + "folder3 = '/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/Blackbox/eval/Kvasir-Seg/model2_medsam_besttr'\n", + "hds_b = hd95(folder3, trim=False)\n", + "print('HD95 of Kvasir blackbox medsam model2: ', hds_b)\n", + "\n", + "folder3 = '/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/Blackbox/eval/Kvasir-Seg/vp_medsam'\n", + "hds_b = hd95(folder3, trim=False)\n", + "print('HD95 of Kvasir blackbox: ', hds_b)\n", + "\n", + "folder3 = '/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/Blackbox/eval/Kvasir-Seg/medsam_zs'\n", + "hds_b = hd95(folder3, trim=False)\n", + "print('HD95 of Kvasir blackbox medsam zs: ', hds_b)\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "folder3 = '/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/Blackbox/eval/refuge/model1_tr2'\n", + "hds_b = hd95(folder3, trim=False)\n", + "print('HD95 of refuge blackbox: ', hds_b)\n", + "\n", + "folder3 = '/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/Blackbox/eval/endovis17/model2_val'\n", + "hds_b = hd95(folder3, trim=False)\n", + "print('HD95 of ev17 blackbox: ', hds_b)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3.8.16 ('dassl')", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.16" + }, + "orig_nbformat": 4, + "vscode": { + "interpreter": { + "hash": "5b0d24c0401191df5ff06ef3cb04a21077c1fd7ca08d243336ea8a8a1206ff02" + } + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/AllinonSAM/eval/refuge/config_refuge.yml b/AllinonSAM/eval/refuge/config_refuge.yml new file mode 100644 index 0000000000000000000000000000000000000000..790627814e1e3571eaaa374075d7b4def3538527 --- /dev/null +++ b/AllinonSAM/eval/refuge/config_refuge.yml @@ -0,0 +1,19 @@ +data_transforms: + a_min: 0 + a_max: 255 + img_size: 512 + use_random_crop: False + use_rotation: True + rotation_angle: 10 + use_saturation: False + saturation: 2 + use_brightness: True + brightness: 2 + use_horizontal_flip: True +data: + name: Refuge + root_path: '/media/ubuntu/New Volume/jay/fundus_images/archive/REFUGE' + label_list: [1,2] + label_names: ['optic cup', 'optic disk'] + volume_channel: 2 + negative_to_positive_ratio: -1 diff --git a/AllinonSAM/eval/refuge/generate_all_predictions.sh b/AllinonSAM/eval/refuge/generate_all_predictions.sh new file mode 100644 index 0000000000000000000000000000000000000000..954da53b5e7bc21e4d973b96698d764bc9e62748 --- /dev/null +++ b/AllinonSAM/eval/refuge/generate_all_predictions.sh @@ -0,0 +1,5 @@ +echo "Optic cup: " +python generate_predictions.py --data_folder "/media/ubuntu/New Volume/jay/fundus_images/archive/REFUGE/test/Images_Cropped" --gt_path "/media/ubuntu/New Volume/jay/fundus_images/archive/REFUGE/test/Masks_Cropped" --data_config "config_refuge.yml" --model_config "model_svdtuning.yml" --pretrained_path "svdtuning_shiftscale_refuge_tal_focal075_alpha2_1e-4_512_cropped.pth" --save_path "svdtuning_shiftscale_refuge_tal_focal075_alpha2_1e-4_512_cropped/optic_cup" --labels_of_interest "optic cup" --device "cuda:1" + +echo "Optic Disk" +python generate_predictions.py --data_folder "/media/ubuntu/New Volume/jay/fundus_images/archive/REFUGE/test/Images_Cropped" --gt_path "/media/ubuntu/New Volume/jay/fundus_images/archive/REFUGE/test/Masks_Cropped" --data_config "config_refuge.yml" --model_config "model_svdtuning.yml" --pretrained_path "svdtuning_shiftscale_refuge_tal_focal075_alpha2_1e-4_512_cropped.pth" --save_path "svdtuning_shiftscale_refuge_tal_focal075_alpha2_1e-4_512_cropped/optic_disk" --labels_of_interest "optic disk" --device "cuda:1" diff --git a/AllinonSAM/eval/refuge/generate_predictions.py b/AllinonSAM/eval/refuge/generate_predictions.py new file mode 100644 index 0000000000000000000000000000000000000000..8be1fed2a0076a8c8c674b7633fcf9d9dd971a2a --- /dev/null +++ b/AllinonSAM/eval/refuge/generate_predictions.py @@ -0,0 +1,165 @@ +import torch +import yaml +import sys +import copy +import os +sys.path.append("/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/SVDSAM/") + +from data_utils import * +from model import * +from utils import * +from data_transforms.refuge_transform import Refuge_Transform + +label_names = ['optic cup', 'optic disk'] +label_dict = {} +# visualize_dict = {} +for i,ln in enumerate(label_names): + label_dict[ln] = i + # visualize_dict[ln] = visualize_li[i] + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument('--data_folder', default='config_tmp.yml', + help='data folder file path') + + parser.add_argument('--data_config', default='config_tmp.yml', + help='data config file path') + + parser.add_argument('--model_config', default='model_baseline.yml', + help='model config file path') + + parser.add_argument('--pretrained_path', default=None, + help='pretrained model path') + + parser.add_argument('--save_path', default='checkpoints/temp.pth', + help='pretrained model path') + + parser.add_argument('--gt_path', default='', + help='ground truth path') + + parser.add_argument('--device', default='cuda:0', help='device to train on') + + parser.add_argument('--labels_of_interest', default='Left Prograsp Forceps,Maryland Bipolar Forceps,Right Prograsp Forceps,Left Large Needle Driver,Right Large Needle Driver', help='labels of interest') + + parser.add_argument('--codes', default='1,2,1,3,3', help='numeric label to save per instrument') + + args = parser.parse_args() + + return args + +def main(): + args = parse_args() + with open(args.data_config, 'r') as f: + data_config = yaml.load(f, Loader=yaml.FullLoader) + with open(args.model_config, 'r') as f: + model_config = yaml.load(f, Loader=yaml.FullLoader) + labels_of_interest = args.labels_of_interest.split(',') + + label_dict = { + 'optic cup': 2, + 'optic disk': 1 + } + + #make folder to save visualizations + os.makedirs(os.path.join(args.save_path,"preds"),exist_ok=True) + os.makedirs(os.path.join(args.save_path,"rescaled_preds"),exist_ok=True) + if args.gt_path: + os.makedirs(os.path.join(args.save_path,"rescaled_gt"),exist_ok=True) + + #load model + model = Prompt_Adapted_SAM(config=model_config, label_text_dict=label_dict, device=args.device, training_strategy='svdtuning') + # model = Prompt_Adapted_SAM(config=model_config, label_text_dict=label_dict, device=args.device, training_strategy='lora') + + #legacy model support + sdict = torch.load(args.pretrained_path, map_location=args.device) + # for key in list(sdict.keys()): + # if 'sam_encoder.neck' in key: + # if '0' in key: + # new_key = key.replace('0','conv1') + # if '1' in key: + # new_key = key.replace('1','ln1') + # if '2' in key: + # new_key = key.replace('2','conv2') + # if '3' in key: + # new_key = key.replace('3','ln2') + # sdict[new_key] = sdict[key] + # _ = sdict.pop(key) + # if 'mask_decoder' in key: + # if 'trainable' in key: + # _ = sdict.pop(key) + + model.load_state_dict(sdict,strict=True) + model = model.to(args.device) + model = model.eval() + + #load data transform + data_transform = Refuge_Transform(config=data_config) + + #dice + dices = [] + ious=[] + + #load data + for i,img_name in enumerate(sorted(os.listdir(args.data_folder))): + if (('png' not in img_name) and ('jpg' not in img_name) and ('jpeg' not in img_name) and ('bmp' not in img_name)): + continue + # if i%5!=0: + # continue + img_path = (os.path.join(args.data_folder,img_name)) + if args.gt_path: + gt_path = (os.path.join(args.gt_path,img_name[:-4]+'.png')) + + # print(img_path) + img = torch.as_tensor(np.array(Image.open(img_path).convert("RGB"))) + img = img.permute(2,0,1) + C,H,W = img.shape + #make a dummy mask of shape 1XHXW + if args.gt_path: + label_of_interest = args.labels_of_interest + label = torch.Tensor(np.array(Image.open(gt_path))) + if len(label.shape)==3: + label = label[:,:,0] + label = (label == label_dict[label_of_interest]) + label = label.unsqueeze(0) + mask = (label>0)+0 + # plt.imshow(gold) + # plt.show() + + else: + mask = torch.zeros((1,H,W)) + img, mask = data_transform(img, mask, is_train=False, apply_norm=True) + mask = (mask>=0.5)+0 + + #get image embeddings + img = img.unsqueeze(0).to(args.device) #1XCXHXW + img_embeds = model.get_image_embeddings(img) + + # generate masks for all labels of interest + img_embeds_repeated = img_embeds.repeat(len(labels_of_interest),1,1,1) + x_text = [t for t in labels_of_interest] + masks = model.get_masks_for_multiple_labels(img_embeds_repeated, x_text).cpu() + + plt.imshow((masks[0]>=0.5), cmap='gray') + plt.savefig(os.path.join(args.save_path,'rescaled_preds', img_name[:-4]+'.png')) + plt.close() + + if args.gt_path: + plt.imshow((mask[0]), cmap='gray') + plt.savefig(os.path.join(args.save_path,'rescaled_gt', img_name)) + plt.close() + + # print("dice: ",dice_coef(label, (masks>0.5)+0)) + dices.append(dice_coef(mask, (masks>=0.5)+0)) + ious.append(iou_coef(mask, (masks>=0.5)+0)) + # break + print(torch.mean(torch.Tensor(dices))) + print(torch.mean(torch.Tensor(ious))) + +if __name__ == '__main__': + main() + + + + + diff --git a/AllinonSAM/eval/refuge/model_svdtuning.yml b/AllinonSAM/eval/refuge/model_svdtuning.yml new file mode 100644 index 0000000000000000000000000000000000000000..c15308063b7e3faf59e7a4cce4acaba59d66962f --- /dev/null +++ b/AllinonSAM/eval/refuge/model_svdtuning.yml @@ -0,0 +1,30 @@ +sam: + img_size: 512 + num_classes: 2 + +img_type: 'image' +arch: "Prompt Adapted SAM" +use_fdn: False +decoder_training: 'none' +mlp_transform: False + +prompts: + USE_TEXT_PROMPT: True + USE_IMAGE_PROMPT: False + USE_SLICE_NUM: False + LOCATION: 'prepend' + DROPOUT: 0 + NUM_TOKENS: 5 + +training: + optimizer: 'adamw' + lr: 1e-3 + batch_size: 32 + num_epochs: 1000 + schedule_step: 1000 + schedule_step_factor: 0.5 + weight_decay: 1e-2 + loss: 'focal' + reg_multiplier: 0 + +use_lora: False \ No newline at end of file diff --git a/AllinonSAM/eval/ultrasound/config_ultrasound.yml b/AllinonSAM/eval/ultrasound/config_ultrasound.yml new file mode 100644 index 0000000000000000000000000000000000000000..36fddb297e57dd086aa9453888b3ee8f47d7e96d --- /dev/null +++ b/AllinonSAM/eval/ultrasound/config_ultrasound.yml @@ -0,0 +1,19 @@ +data_transforms: + a_min: 0 + a_max: 255 + img_size: 256 + use_random_crop: False + use_rotation: False + rotation_angle: 10 + use_saturation: False + saturation: 2 + use_brightness: False + brightness: 2 + use_horizontal_flip: False +data: + name: ULTRASOUND + root_path: '/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/AUS' + label_list: [1,2,3,4,5,6,7,8] + label_names: ['Liver', 'Kidney', 'Pancreas', 'Vessels', 'Adrenals', 'Gall Bladder', 'Bones', 'Spleen'] + volume_channel: 2 + negative_to_positive_ratio: -1 diff --git a/AllinonSAM/eval/ultrasound/generate_all_predictions.sh b/AllinonSAM/eval/ultrasound/generate_all_predictions.sh new file mode 100644 index 0000000000000000000000000000000000000000..fbbe63484643b8e8bd4253e9df3e78ba4f932078 --- /dev/null +++ b/AllinonSAM/eval/ultrasound/generate_all_predictions.sh @@ -0,0 +1,15 @@ +python generate_predictions.py --data_config config_ultrasound.yml --model_config model_svdtuning.yml --data_folder "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/images/test" --gt_path "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/annotations/test" --pretrained_path "samed_ultrasound_final_256_bs32_focaldice.pth" --save_path "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/samed_ultrasound_final_256_bs32_focaldice/Liver" --labels_of_interest "Liver" + +python generate_predictions.py --data_config config_ultrasound.yml --model_config model_svdtuning.yml --data_folder "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/images/test" --gt_path "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/annotations/test" --pretrained_path samed_ultrasound_final_256_bs32_focaldice.pth --save_path "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/samed_ultrasound_final_256_bs32_focaldice/Kidney" --labels_of_interest "Kidney" + +python generate_predictions.py --data_config config_ultrasound.yml --model_config model_svdtuning.yml --data_folder "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/images/test" --gt_path "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/annotations/test" --pretrained_path samed_ultrasound_final_256_bs32_focaldice.pth --save_path "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/samed_ultrasound_final_256_bs32_focaldice/Pancreas" --labels_of_interest "Pancreas" + +python generate_predictions.py --data_config config_ultrasound.yml --model_config model_svdtuning.yml --data_folder "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/images/test" --gt_path "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/annotations/test" --pretrained_path samed_ultrasound_final_256_bs32_focaldice.pth --save_path "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/samed_ultrasound_final_256_bs32_focaldice/Vessels" --labels_of_interest "Vessels" + +python generate_predictions.py --data_config config_ultrasound.yml --model_config model_svdtuning.yml --data_folder "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/images/test" --gt_path "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/annotations/test" --pretrained_path samed_ultrasound_final_256_bs32_focaldice.pth --save_path "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/samed_ultrasound_final_256_bs32_focaldice/Adrenals" --labels_of_interest "Adrenals" + +python generate_predictions.py --data_config config_ultrasound.yml --model_config model_svdtuning.yml --data_folder "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/images/test" --gt_path "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/annotations/test" --pretrained_path samed_ultrasound_final_256_bs32_focaldice.pth --save_path "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/samed_ultrasound_final_256_bs32_focaldice/Gall Bladder" --labels_of_interest "Gall Bladder" + +python generate_predictions.py --data_config config_ultrasound.yml --model_config model_svdtuning.yml --data_folder "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/images/test" --gt_path "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/annotations/test" --pretrained_path samed_ultrasound_final_256_bs32_focaldice.pth --save_path "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/samed_ultrasound_final_256_bs32_focaldice/Bones" --labels_of_interest "Bones" + +python generate_predictions.py --data_config config_ultrasound.yml --model_config model_svdtuning.yml --data_folder "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/images/test" --gt_path "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/annotations/test" --pretrained_path samed_ultrasound_final_256_bs32_focaldice.pth --save_path "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/samed_ultrasound_final_256_bs32_focaldice/Spleen" --labels_of_interest "Spleen" \ No newline at end of file diff --git a/AllinonSAM/eval/ultrasound/generate_all_predictions_pointsam.sh b/AllinonSAM/eval/ultrasound/generate_all_predictions_pointsam.sh new file mode 100644 index 0000000000000000000000000000000000000000..f21f40f5316784b0645016546e621fed00449c14 --- /dev/null +++ b/AllinonSAM/eval/ultrasound/generate_all_predictions_pointsam.sh @@ -0,0 +1,15 @@ +python predictions_pointsam.py --data_config config_ultrasound.yml --model_config model_svdtuning.yml --data_folder "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/images/test" --gt_path "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/annotations/test" --save_path "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/sam_point_try2/Liver" --labels_of_interest "Liver" + +python predictions_pointsam.py --data_config config_ultrasound.yml --model_config model_svdtuning.yml --data_folder "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/images/test" --gt_path "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/annotations/test" --save_path "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/sam_point_try2/Kidney" --labels_of_interest "Kidney" + +python predictions_pointsam.py --data_config config_ultrasound.yml --model_config model_svdtuning.yml --data_folder "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/images/test" --gt_path "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/annotations/test" --save_path "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/sam_point_try2/Pancreas" --labels_of_interest "Pancreas" + +python predictions_pointsam.py --data_config config_ultrasound.yml --model_config model_svdtuning.yml --data_folder "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/images/test" --gt_path "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/annotations/test" --save_path "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/sam_point_try2/Vessels" --labels_of_interest "Vessels" + +python predictions_pointsam.py --data_config config_ultrasound.yml --model_config model_svdtuning.yml --data_folder "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/images/test" --gt_path "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/annotations/test" --save_path "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/sam_point_try2/Adrenals" --labels_of_interest "Adrenals" + +python predictions_pointsam.py --data_config config_ultrasound.yml --model_config model_svdtuning.yml --data_folder "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/images/test" --gt_path "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/annotations/test" --save_path "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/sam_point_try2/Gall Bladder" --labels_of_interest "Gall Bladder" + +python predictions_pointsam.py --data_config config_ultrasound.yml --model_config model_svdtuning.yml --data_folder "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/images/test" --gt_path "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/annotations/test" --save_path "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/sam_point_try2/Bones" --labels_of_interest "Bones" + +python predictions_pointsam.py --data_config config_ultrasound.yml --model_config model_svdtuning.yml --data_folder "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/images/test" --gt_path "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/annotations/test" --save_path "/media/ubuntu/New Volume/jay/ultrasound/abdominal_US/abdominal_US/RUS/sam_point_try2/Spleen" --labels_of_interest "Spleen" \ No newline at end of file diff --git a/AllinonSAM/eval/ultrasound/generate_predictions.py b/AllinonSAM/eval/ultrasound/generate_predictions.py new file mode 100644 index 0000000000000000000000000000000000000000..20091c183d46312d6e867d2daa11a9f2d9e2b098 --- /dev/null +++ b/AllinonSAM/eval/ultrasound/generate_predictions.py @@ -0,0 +1,195 @@ +import torch +import yaml +import sys +import copy +import os +sys.path.append("/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/SVDSAM/") + +from data_utils import * +from model import * +from utils import * + +label_names = ['Liver', 'Kidney', 'Pancreas', 'Vessels', 'Adrenals', 'Gall Bladder', 'Bones', 'Spleen'] +# visualize_li = [[1,0,0],[0,1,0],[1,0,0], [0,0,1], [0,0,1]] +label_dict = {} +# visualize_dict = {} +for i,ln in enumerate(label_names): + label_dict[ln] = i + # visualize_dict[ln] = visualize_li[i] + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument('--data_folder', default='config_tmp.yml', + help='data folder file path') + + parser.add_argument('--data_config', default='config_tmp.yml', + help='data config file path') + + parser.add_argument('--model_config', default='model_baseline.yml', + help='model config file path') + + parser.add_argument('--pretrained_path', default=None, + help='pretrained model path') + + parser.add_argument('--save_path', default='checkpoints/temp.pth', + help='pretrained model path') + + parser.add_argument('--gt_path', default='', + help='ground truth path') + + parser.add_argument('--device', default='cuda:0', help='device to train on') + + parser.add_argument('--labels_of_interest', default='Left Prograsp Forceps,Maryland Bipolar Forceps,Right Prograsp Forceps,Left Large Needle Driver,Right Large Needle Driver', help='labels of interest') + + parser.add_argument('--codes', default='1,2,1,3,3', help='numeric label to save per instrument') + + args = parser.parse_args() + + return args + +def main(): + args = parse_args() + with open(args.data_config, 'r') as f: + data_config = yaml.load(f, Loader=yaml.FullLoader) + with open(args.model_config, 'r') as f: + model_config = yaml.load(f, Loader=yaml.FullLoader) + labels_of_interest = args.labels_of_interest.split(',') + codes = args.codes.split(',') + codes = [int(c) for c in codes] + + label_dict = { + 'Liver': [[100,0,100]], + 'Kidney': [[255,255,0]], + 'Pancreas': [[0,0,255]], + 'Vessels': [[255,0,0]], + 'Adrenals': [[0,255,255]], + 'Gall Bladder': [[0,255,0]], + 'Bones': [[255,255,255]], + 'Spleen': [[255,0,255]] + } + + + #make folder to save visualizations + os.makedirs(os.path.join(args.save_path,"preds"),exist_ok=True) + os.makedirs(os.path.join(args.save_path,"rescaled_preds"),exist_ok=True) + if args.gt_path: + os.makedirs(os.path.join(args.save_path,"rescaled_gt"),exist_ok=True) + + #load model + model = Prompt_Adapted_SAM(config=model_config, label_text_dict=label_dict, device=args.device, training_strategy='svdtuning') + # model = Prompt_Adapted_SAM(config=model_config, label_text_dict=label_dict, device=args.device, training_strategy='lora') + + #legacy model support + sdict = torch.load(args.pretrained_path, map_location=args.device) + # for key in list(sdict.keys()): + # if 'sam_encoder.neck' in key: + # if '0' in key: + # new_key = key.replace('0','conv1') + # if '1' in key: + # new_key = key.replace('1','ln1') + # if '2' in key: + # new_key = key.replace('2','conv2') + # if '3' in key: + # new_key = key.replace('3','ln2') + # sdict[new_key] = sdict[key] + # _ = sdict.pop(key) + # if 'mask_decoder' in key: + # if 'trainable' in key: + # _ = sdict.pop(key) + + model.load_state_dict(sdict,strict=True) + model = model.to(args.device) + model = model.eval() + + #load data transform + data_transform = Ultrasound_Transform(config=data_config) + + #dice + dices = [] + ious=[] + + #load data + for i,img_name in enumerate(sorted(os.listdir(args.data_folder))): + # if i%5!=0: + # continue + img_path = (os.path.join(args.data_folder,img_name)) + if args.gt_path: + gt_path = (os.path.join(args.gt_path,img_name)) + if not os.path.exists(gt_path): + gt_path = (os.path.join(args.gt_path,img_name[:-4]+'.png')) + if not os.path.exists(gt_path): + continue + + # print(img_path) + img = torch.as_tensor(np.array(Image.open(img_path).convert("RGB"))) + img = img.permute(2,0,1) + C,H,W = img.shape + #make a dummy mask of shape 1XHXW + if args.gt_path: + label = np.array(Image.open(gt_path).convert("RGB")) + temp = np.zeros((H,W)).astype('uint8') + selected_color_list = label_dict[args.labels_of_interest] + for c in selected_color_list: + temp = temp | (np.all(np.where(label==c,1,0),axis=2)) + + # plt.imshow(gold) + # plt.show() + mask = torch.Tensor(temp).unsqueeze(0) + + else: + mask = torch.zeros((1,H,W)) + img, mask = data_transform(img, mask, is_train=False, apply_norm=True) + mask = (mask>=0.5)+0 + + #get image embeddings + img = img.unsqueeze(0).to(args.device) #1XCXHXW + img_embeds = model.get_image_embeddings(img) + + # generate masks for all labels of interest + img_embeds_repeated = img_embeds.repeat(len(labels_of_interest),1,1,1) + x_text = [t for t in labels_of_interest] + masks = model.get_masks_for_multiple_labels(img_embeds_repeated, x_text).cpu() + argmax_masks = torch.argmax(masks, dim=0) + final_mask = torch.zeros(masks[0].shape) + final_mask_rescaled = torch.zeros(masks[0].shape).unsqueeze(-1).repeat(1,1,3) + #save masks + for i in range(final_mask.shape[0]): + for j in range(final_mask.shape[1]): + final_mask[i,j] = codes[argmax_masks[i,j]] if masks[argmax_masks[i,j],i,j]>=0.5 else 0 + # final_mask_rescaled[i,j] = torch.Tensor(visualize_dict[(labels_of_interest[argmax_masks[i,j]])] if masks[argmax_masks[i,j],i,j]>=0.5 else [0,0,0]) + + # save_im = Image.fromarray(final_mask.numpy()) + # save_im.save(os.path.join(args.save_path,'preds', img_name)) + + # plt.imshow(final_mask_rescaled,cmap='gray') + # plt.savefig(os.path.join(args.save_path,'rescaled_preds', img_name)) + # plt.close() + + # print("label shape: ", label.shape) + # plt.imshow(label[0], cmap='gray') + # plt.show() + + plt.imshow((masks[0]>=0.5), cmap='gray') + plt.savefig(os.path.join(args.save_path,'rescaled_preds', img_name)) + plt.close() + + if args.gt_path: + plt.imshow((mask[0]), cmap='gray') + plt.savefig(os.path.join(args.save_path,'rescaled_gt', img_name)) + plt.close() + + # print("dice: ",dice_coef(label, (masks>0.5)+0)) + dices.append(dice_coef(mask, (masks>=0.5)+0)) + ious.append(iou_coef(mask, (masks>=0.5)+0)) + # break + print(torch.mean(torch.Tensor(dices))) + print(torch.mean(torch.Tensor(ious))) + +if __name__ == '__main__': + main() + + + + + diff --git a/AllinonSAM/eval/ultrasound/generate_predictions_baselines.py b/AllinonSAM/eval/ultrasound/generate_predictions_baselines.py new file mode 100644 index 0000000000000000000000000000000000000000..c4c37cac531d1a3d0e92a76f4d988e45fb9b9d90 --- /dev/null +++ b/AllinonSAM/eval/ultrasound/generate_predictions_baselines.py @@ -0,0 +1,192 @@ +import torch +import yaml +import sys +import copy +import os +sys.path.append("/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/biastuning/") + +from data_utils import * +from model import * +from utils import * +from baselines import UNet, UNext, medt_net +from vit_seg_modeling import VisionTransformer +from vit_seg_modeling import CONFIGS as CONFIGS_ViT_seg +from axialnet import MedT + +label_names = ['Liver', 'Kidney', 'Pancreas', 'Vessels', 'Adrenals', 'Gall Bladder', 'Bones', 'Spleen'] +# visualize_li = [[1,0,0],[0,1,0],[1,0,0], [0,0,1], [0,0,1]] +label_dict = {} +# visualize_dict = {} +for i,ln in enumerate(label_names): + label_dict[ln] = i + # visualize_dict[ln] = visualize_li[i] + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument('--data_folder', default='config_tmp.yml', + help='data folder file path') + + parser.add_argument('--data_config', default='config_tmp.yml', + help='data config file path') + + parser.add_argument('--model_config', default='model_baseline.yml', + help='model config file path') + + parser.add_argument('--pretrained_path', default=None, + help='pretrained model path') + + parser.add_argument('--save_path', default='checkpoints/temp.pth', + help='pretrained model path') + + parser.add_argument('--gt_path', default='', + help='ground truth path') + + parser.add_argument('--device', default='cuda:0', help='device to train on') + + parser.add_argument('--codes', default='1,2,1,3,3', help='numeric label to save per instrument') + + args = parser.parse_args() + + return args + +def main(): + args = parse_args() + with open(args.data_config, 'r') as f: + data_config = yaml.load(f, Loader=yaml.FullLoader) + with open(args.model_config, 'r') as f: + model_config = yaml.load(f, Loader=yaml.FullLoader) + codes = args.codes.split(',') + codes = [int(c) for c in codes] + + label_dict = { + 'Liver': [[100,0,100]], + 'Kidney': [[255,255,0]], + 'Pancreas': [[0,0,255]], + 'Vessels': [[255,0,0]], + 'Adrenals': [[0,255,255]], + 'Gall Bladder': [[0,255,0]], + 'Bones': [[255,255,255]], + 'Spleen': [[255,0,255]] + } + + + #make folder to save visualizations + os.makedirs(os.path.join(args.save_path,"preds"),exist_ok=True) + os.makedirs(os.path.join(args.save_path,"rescaled_preds"),exist_ok=True) + if args.gt_path: + os.makedirs(os.path.join(args.save_path,"rescaled_gt"),exist_ok=True) + + + #load model + #change the img size in model config according to data config + in_channels = model_config['in_channels'] + out_channels = model_config['num_classes'] + img_size = model_config['img_size'] + if model_config['arch']=='Prompt Adapted SAM': + model = Prompt_Adapted_SAM(model_config, label_dict, args.device, training_strategy='biastuning') + elif model_config['arch']=='UNet': + model = UNet(in_channels=in_channels, out_channels=out_channels) + elif model_config['arch']=='UNext': + model = UNext(num_classes=out_channels, input_channels=in_channels, img_size=img_size) + elif model_config['arch']=='MedT': + #TODO + model = MedT(img_size=img_size, num_classes=out_channels) + elif model_config['arch']=='TransUNet': + config_vit = CONFIGS_ViT_seg['R50-ViT-B_16'] + config_vit.n_classes = out_channels + config_vit.n_skip = 3 + # if args.vit_name.find('R50') != -1: + # config_vit.patches.grid = (int(args.img_size / args.vit_patches_size), int(args.img_size / args.vit_patches_size)) + model = VisionTransformer(config_vit, img_size=img_size, num_classes=config_vit.n_classes) + + model.load_state_dict(torch.load(args.pretrained_path, map_location=args.device)) + model = model.to(args.device) + model = model.eval() + + #load data transform + data_transform = Ultrasound_Transform(config=data_config) + + #dice + dices = [] + ious=[] + + #load data + for i,img_name in enumerate(sorted(os.listdir(args.data_folder))): + # if i%5!=0: + # continue + img_path = (os.path.join(args.data_folder,img_name)) + if args.gt_path: + gt_path = (os.path.join(args.gt_path,img_name)) + if not os.path.exists(gt_path): + gt_path = (os.path.join(args.gt_path,img_name[:-4]+'.png')) + if not os.path.exists(gt_path): + continue + + # print(img_path) + img = torch.as_tensor(np.array(Image.open(img_path).convert("RGB"))) + img = img.permute(2,0,1) + C,H,W = img.shape + #make a dummy mask of shape 1XHXW + label = np.array(Image.open(gt_path).convert("RGB")) + + if args.gt_path: + + mask = np.zeros((len(label_dict),img.shape[1], img.shape[2])) + for i,c in enumerate(list(label_dict.keys())): + temp = np.zeros(label.shape).astype('uint8')[:,:,0] + selected_color_list = label_dict[c] + for c in selected_color_list: + temp = temp | (np.all(np.where(label==c,1,0),axis=2)) + mask[i,:,:] = temp + mask = torch.Tensor(mask) + + else: + mask = torch.zeros((len(label_dict),H,W)) + img, mask = data_transform(img, mask, is_train=False, apply_norm=True) + mask = (mask>=0.5)+0 + + img = img.unsqueeze(0).to(args.device) #1XCXHXW + masks = model(img,'') + # print("masks shape: ",masks.shape) + + argmax_masks = torch.argmax(masks, dim=1).cpu().numpy() + # print("argmax masks shape: ",argmax_masks.shape) + + classwise_dices = [] + classwise_ious = [] + for j,c1 in enumerate(label_dict): + res = np.where(argmax_masks==j,1,0) + # print("res shape: ",res.shape) + plt.imshow(res[0], cmap='gray') + save_dir = os.path.join(args.save_path, c1, 'rescaled_preds') + os.makedirs(save_dir, exist_ok=True) + plt.savefig(os.path.join(args.save_path, c1, 'rescaled_preds', img_name)) + plt.close() + + if args.gt_path: + plt.imshow((mask[j]), cmap='gray') + save_dir = os.path.join(args.save_path, c1, 'rescaled_gt') + os.makedirs(save_dir, exist_ok=True) + plt.savefig(os.path.join(args.save_path, c1, 'rescaled_gt', img_name)) + plt.close() + + classwise_dices.append(dice_coef(mask[j], torch.Tensor(res[0]))) + classwise_ious.append(iou_coef(mask[j], torch.Tensor(res[0]))) + + # break + dices.append(classwise_dices) + ious.append(classwise_ious) + # print("classwise_dices: ", classwise_dices) + # print("classwise ious: ", classwise_ious) + + print(torch.mean(torch.Tensor(dices),dim=0)) + print(torch.mean(torch.Tensor(ious),dim=0)) + +if __name__ == '__main__': + main() + + + + + diff --git a/AllinonSAM/eval/ultrasound/model_baseline.yml b/AllinonSAM/eval/ultrasound/model_baseline.yml new file mode 100644 index 0000000000000000000000000000000000000000..deeccfdddfb4820de7d433d5d9cbeccd72a03b44 --- /dev/null +++ b/AllinonSAM/eval/ultrasound/model_baseline.yml @@ -0,0 +1,17 @@ + +img_size: 256 +num_classes: 8 +in_channels: 3 +img_type: 'image' +arch: "TransUNet" +use_fdn: False + +training: + optimizer: 'adamw' + lr: 1e-4 + batch_size: 16 + num_epochs: 500 + schedule_step: 2100 + schedule_step_factor: 0.5 + weight_decay: 1e-2 + loss: 'focal' \ No newline at end of file diff --git a/AllinonSAM/eval/ultrasound/model_svdtuning.yml b/AllinonSAM/eval/ultrasound/model_svdtuning.yml new file mode 100644 index 0000000000000000000000000000000000000000..144529fd8fc7035d3d2ac31d82b4c38218d2cd02 --- /dev/null +++ b/AllinonSAM/eval/ultrasound/model_svdtuning.yml @@ -0,0 +1,31 @@ +sam: + img_size: 256 + num_classes: 13 + sam_type: "base" + +img_type: 'image' +arch: "Prompt Adapted SAM" +use_fdn: False +decoder_training: 'none' +mlp_transform: False + +prompts: + USE_TEXT_PROMPT: False + USE_IMAGE_PROMPT: False + USE_SLICE_NUM: False + LOCATION: 'prepend' + DROPOUT: 0 + NUM_TOKENS: 5 + +training: + optimizer: 'adamw' + lr: 1e-3 + batch_size: 32 + num_epochs: 1000 + schedule_step: 100 + schedule_step_factor: 0.5 + weight_decay: 1e-2 + loss: 'focal' + reg_multiplier: 0 + +use_lora: True \ No newline at end of file diff --git a/AllinonSAM/eval/ultrasound/predictions_pointsam.py b/AllinonSAM/eval/ultrasound/predictions_pointsam.py new file mode 100644 index 0000000000000000000000000000000000000000..a975657d75d744f283cace54d24346c32671d7b2 --- /dev/null +++ b/AllinonSAM/eval/ultrasound/predictions_pointsam.py @@ -0,0 +1,217 @@ +import torch +import yaml +import sys +import copy +import os +sys.path.append("/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/SVDSAM/") + +from data_utils import * +from model import * +from utils import * + +label_names = ['Liver', 'Kidney', 'Pancreas', 'Vessels', 'Adrenals', 'Gall Bladder', 'Bones', 'Spleen'] +# visualize_li = [[1,0,0],[0,1,0],[1,0,0], [0,0,1], [0,0,1]] +label_dict = {} +# visualize_dict = {} +for i,ln in enumerate(label_names): + label_dict[ln] = i + # visualize_dict[ln] = visualize_li[i] + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument('--data_folder', default='config_tmp.yml', + help='data folder file path') + + parser.add_argument('--data_config', default='config_tmp.yml', + help='data config file path') + + parser.add_argument('--model_config', default='model_baseline.yml', + help='model config file path') + + parser.add_argument('--pretrained_path', default=None, + help='pretrained model path') + + parser.add_argument('--save_path', default='checkpoints/temp.pth', + help='pretrained model path') + + parser.add_argument('--gt_path', default='', + help='ground truth path') + + parser.add_argument('--device', default='cuda:0', help='device to train on') + + parser.add_argument('--labels_of_interest', default='Left Prograsp Forceps,Maryland Bipolar Forceps,Right Prograsp Forceps,Left Large Needle Driver,Right Large Needle Driver', help='labels of interest') + + parser.add_argument('--codes', default='1,2,1,3,3', help='numeric label to save per instrument') + + args = parser.parse_args() + + return args + +def main(): + args = parse_args() + with open(args.data_config, 'r') as f: + data_config = yaml.load(f, Loader=yaml.FullLoader) + with open(args.model_config, 'r') as f: + model_config = yaml.load(f, Loader=yaml.FullLoader) + labels_of_interest = args.labels_of_interest.split(',') + codes = args.codes.split(',') + codes = [int(c) for c in codes] + + label_dict = { + 'Liver': [[100,0,100]], + 'Kidney': [[255,255,0]], + 'Pancreas': [[0,0,255]], + 'Vessels': [[255,0,0]], + 'Adrenals': [[0,255,255]], + 'Gall Bladder': [[0,255,0]], + 'Bones': [[255,255,255]], + 'Spleen': [[255,0,255]] + } + + + #make folder to save visualizations + os.makedirs(os.path.join(args.save_path,"preds"),exist_ok=True) + os.makedirs(os.path.join(args.save_path,"rescaled_preds"),exist_ok=True) + if args.gt_path: + os.makedirs(os.path.join(args.save_path,"rescaled_gt"),exist_ok=True) + + #load model + model = Prompt_Adapted_SAM(config=model_config, label_text_dict=label_dict, device=args.device, training_strategy='svdtuning') + # model = Prompt_Adapted_SAM(config=model_config, label_text_dict=label_dict, device=args.device, training_strategy='lora') + + #legacy model support + if args.pretrained_path: + sdict = torch.load(args.pretrained_path, map_location=args.device) + # for key in list(sdict.keys()): + # if 'sam_encoder.neck' in key: + # if '0' in key: + # new_key = key.replace('0','conv1') + # if '1' in key: + # new_key = key.replace('1','ln1') + # if '2' in key: + # new_key = key.replace('2','conv2') + # if '3' in key: + # new_key = key.replace('3','ln2') + # sdict[new_key] = sdict[key] + # _ = sdict.pop(key) + # if 'mask_decoder' in key: + # if 'trainable' in key: + # _ = sdict.pop(key) + + model.load_state_dict(sdict,strict=True) + model = model.to(args.device) + model = model.eval() + + #load data transform + data_transform = Ultrasound_Transform(config=data_config) + + #dice + dices = [] + ious=[] + + #load data + for i,img_name in enumerate(sorted(os.listdir(args.data_folder))): + # if i%5!=0: + # continue + img_path = (os.path.join(args.data_folder,img_name)) + if args.gt_path: + gt_path = (os.path.join(args.gt_path,img_name)) + if not os.path.exists(gt_path): + gt_path = (os.path.join(args.gt_path,img_name[:-4]+'.png')) + if not os.path.exists(gt_path): + continue + + # print(img_path) + img = torch.as_tensor(np.array(Image.open(img_path).convert("RGB"))) + img = img.permute(2,0,1) + C,H,W = img.shape + #make a dummy mask of shape 1XHXW + if args.gt_path: + label = np.array(Image.open(gt_path).convert("RGB")) + temp = np.zeros((H,W)).astype('uint8') + selected_color_list = label_dict[args.labels_of_interest] + for c in selected_color_list: + temp = temp | (np.all(np.where(label==c,1,0),axis=2)) + + # plt.imshow(gold) + # plt.show() + mask = torch.Tensor(temp).unsqueeze(0) + + else: + mask = torch.zeros((1,H,W)) + img, mask = data_transform(img, mask, is_train=False, apply_norm=True) + mask = (mask>=0.5)+0 + + #get positive point prompts + _,y,x = torch.where(mask==1) + pos_prompts = torch.cat([x.unsqueeze(1),y.unsqueeze(1)],dim=1) + + #get negative point prompts + _,y_neg,x_neg = torch.where(mask==0) + neg_prompts = (torch.cat([x_neg.unsqueeze(1),y_neg.unsqueeze(1)],dim=1)) + + if len(y)>0: + pos_point_idx = random.randint(0,y.shape[0]-1) + neg_point_idx = random.randint(0,y_neg.shape[0]-1) + # points = (torch.cat([pos_prompts[pos_point_idx].unsqueeze(0), neg_prompts[neg_point_idx].unsqueeze(0)],dim=0).unsqueeze(0).to(args.device), torch.Tensor([1,-1]).unsqueeze(0).to(args.device)) + points = (pos_prompts[pos_point_idx].unsqueeze(0).unsqueeze(0).to(args.device), torch.Tensor([1]).unsqueeze(0).to(args.device)) + # print(points[0].shape) + else: + neg_point_idx1 = random.randint(0,y_neg.shape[0]-1) + neg_point_idx2 = random.randint(0,y_neg.shape[0]-1) + # points = (torch.cat([neg_prompts[neg_point_idx1].unsqueeze(0), neg_prompts[neg_point_idx2].unsqueeze(0)],dim=0).unsqueeze(0).to(args.device), torch.Tensor([-1,-1]).unsqueeze(0).to(args.device)) + points = (neg_prompts[neg_point_idx1].unsqueeze(0).unsqueeze(0).to(args.device), torch.Tensor([-1]).unsqueeze(0).to(args.device)) + # print(points[0].shape) + + #get image embeddings + img = img.unsqueeze(0).to(args.device) #1XCXHXW + img_embeds = model.get_image_embeddings(img) + + # generate masks for all labels of interest + img_embeds_repeated = img_embeds.repeat(len(labels_of_interest),1,1,1) + masks= model.get_masks_with_manual_prompts(img_embeds_repeated, points=points).cpu() + + # save_im = Image.fromarray(final_mask.numpy()) + # save_im.save(os.path.join(args.save_path,'preds', img_name)) + + # plt.imshow(final_mask_rescaled,cmap='gray') + # plt.savefig(os.path.join(args.save_path,'rescaled_preds', img_name)) + # plt.close() + + # print("label shape: ", label.shape) + # plt.imshow(label[0], cmap='gray') + # plt.show() + + if args.gt_path: + plt.imshow((mask[0]), cmap='gray') + plt.savefig(os.path.join(args.save_path,'rescaled_gt', img_name)) + plt.close() + + plt.imshow((masks[0]>=0.5), cmap='gray') + if len(y)>0: + plt.scatter(x[pos_point_idx], y[pos_point_idx], c='green') + # plt.scatter(x_neg[neg_point_idx], y_neg[neg_point_idx], c='red') + else: + plt.scatter(x_neg[neg_point_idx1], y_neg[neg_point_idx1], c='red') + # plt.scatter(x_neg[neg_point_idx2], y_neg[neg_point_idx2], c='red') + plt.savefig(os.path.join(args.save_path,'rescaled_preds', img_name)) + plt.close() + # 10/0 + + + + # print("dice: ",dice_coef(label, (masks>0.5)+0)) + dices.append(dice_coef(mask, (masks>=0.5)+0)) + ious.append(iou_coef(mask, (masks>=0.5)+0)) + + print(torch.mean(torch.Tensor(dices))) + print(torch.mean(torch.Tensor(ious))) + +if __name__ == '__main__': + main() + + + + + diff --git a/AllinonSAM/model.py b/AllinonSAM/model.py new file mode 100644 index 0000000000000000000000000000000000000000..0d30399740a3a34bda2c6bd5d6916755dd7af5f3 --- /dev/null +++ b/AllinonSAM/model.py @@ -0,0 +1,334 @@ +from prompt_adapted_segment_anything.modeling.image_encoder import ImageEncoderViT +from prompt_adapted_segment_anything.modeling.mask_decoder import MaskDecoder +from prompt_adapted_segment_anything.modeling.prompt_encoder import PromptEncoder +from prompt_adapted_segment_anything.modeling import TwoWayTransformer +import torch +import torch.nn as nn +from torch.nn import functional as F +from typing import Any, Dict, List, Tuple +import clip +from functools import partial, reduce +from operator import mul +import math +from typing import Union, List + +class Prompt_Adapted_SAM(nn.Module): + def __init__( + self, + config, + label_text_dict = {}, + device = 'cuda:0', + training_strategy='biastuning' + ): + super().__init__() + self.device = device + self.img_size = config['sam']['img_size'] + self.num_classes = config['sam']['num_classes'] + self.label_dict = label_text_dict + self.prompt_config = config['prompts'] + self.im_type = config['img_type'] + self.use_fdn = config['use_fdn'] + self.training_strategy = training_strategy + self.encoder_embed_dim= 1280 if config['sam']['sam_type']=='huge' else 768 + self.encoder_depth=32 if config['sam']['sam_type']=='huge' else 12 + self.encoder_num_heads=16 if config['sam']['sam_type']=='huge' else 12 + self.encoder_global_attn_indexes=[7, 15, 23, 31] if config['sam']['sam_type']=='huge' else [2, 5, 8, 11] + + #define hyperparameters, can be taken to a config later + prompt_embed_dim=256 + image_embedding_size=16 + mask_in_chans=16 + + print(self.prompt_config) + #define pretrained clip and sam models + self.sam_encoder = ImageEncoderViT(img_size=self.img_size,prompt_config=self.prompt_config, mlp_transform=config['mlp_transform'], use_lora=config['use_lora'], embed_dim=self.encoder_embed_dim, depth=self.encoder_depth, num_heads=self.encoder_num_heads, global_attn_indexes=self.encoder_global_attn_indexes) + self.clip_model, _ = clip.load("ViT-B/32", device=device) + + #define the components of sam + self.prompt_encoder=PromptEncoder( + embed_dim=prompt_embed_dim, + image_embedding_size=(image_embedding_size, image_embedding_size), + input_image_size=(self.img_size, self.img_size), + mask_in_chans=mask_in_chans, + ) + + self.mask_decoder=MaskDecoder( + num_multimask_outputs=3, + transformer=TwoWayTransformer( + depth=2, + embedding_dim=256, + mlp_dim=2048, + num_heads=8, + ), + transformer_dim=256, + iou_head_depth=3, + iou_head_hidden_dim=256, + ) + + + #define text prompt layers if they are to be used + if self.prompt_config['USE_TEXT_PROMPT']: + if self.prompt_config['USE_SLICE_NUM']: + self.Text_Embedding_Affine = nn.Sequential( + nn.Linear(512, 128), + nn.ReLU(), + nn.BatchNorm1d(128) + ) + else: + self.Text_Embedding_Affine = nn.Sequential( + nn.Linear(512, 256), + nn.ReLU(), + nn.BatchNorm1d(256) + ) + if self.training_strategy=='prompttuning': + self.text_prompt_dropout = nn.Dropout(self.prompt_config['DROPOUT']) + self.text_prompt_embeddings = nn.Parameter(torch.zeros(self.num_classes+1, prompt_embed_dim)) + nn.init.xavier_uniform_(self.text_prompt_embeddings.data) + + self.label_dict = self.label_dict.update({ + 'other': self.num_classes + }) + + #define the slice number embedding + if self.prompt_config['USE_SLICE_NUM']: + self.slice_embedding = nn.Embedding(1024,128) + + #initialize sam with pretrained weights + sam_ckpt = '/l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/ortho/checkpoints/sam_vit_b_01ec64.pth' + # sam_ckpt = '/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/segment-anything/checkpoints/sam_vit_h_4b8939.pth' + # sam_ckpt = '/mnt/store/jparanj1/sam_vit_b_01ec64.pth' + sam_state_dict = torch.load(sam_ckpt) + + #for medsam analysis + # sam_ckpt = '/media/ubuntu/New Volume/jay/medsam_vit_b.pth' + # sam_state_dict = torch.load(sam_ckpt) + + + for k in list(sam_state_dict.keys()): + if self.img_size!=1024: + #pos embed can be loaded only when image size is 1024 + if "pos_embed" in k: + full_matrix = sam_state_dict.pop(k) + + adapted_matrix = nn.functional.adaptive_avg_pool2d(full_matrix.permute(0,3,1,2), (self.sam_encoder.pos_embed.shape[1], self.sam_encoder.pos_embed.shape[2])) + adapted_matrix = adapted_matrix.permute(0,2,3,1) + sam_state_dict[k] = adapted_matrix + + if "image_encoder." in k: + if 'image_encoder.neck' in k: + if '0' in k: + new_key = k.replace('0','conv1') + if '1' in k: + new_key = k.replace('1','ln1') + if '2' in k: + new_key = k.replace('2','conv2') + if '3' in k: + new_key = k.replace('3','ln2') + new_key = new_key[14:] + sam_state_dict[new_key] = sam_state_dict[k] + _ = sam_state_dict.pop(k) + + else: + sam_state_dict[k[14:]] = sam_state_dict.pop(k) + + + if "prompt_encoder." in k: + sam_state_dict[k[15:]] = sam_state_dict.pop(k) + + if "mask_decoder." in k: + sam_state_dict[k[13:]] = sam_state_dict.pop(k) + + + self.sam_encoder.load_state_dict(sam_state_dict,strict=False) + + self.prompt_encoder.load_state_dict(sam_state_dict, strict=False) + + self.mask_decoder.load_state_dict(sam_state_dict,strict=False) + + def forward(self, x_img, x_text, slice_num=0): + B, C, H, W = x_img.shape + x_text = list(x_text) + + if self.prompt_config['USE_TEXT_PROMPT']: + if self.training_strategy=='prompttuning': + prompt_text = [] + for t in x_text: + try: + prompt_text.append(self.text_prompt_embeddings[self.label_dict[t]]) + except: + prompt_text.append(self.text_prompt_embeddings[-1]) + prompt_text = torch.stack(prompt_text) + + image_embeddings, reg_loss = self.sam_encoder(x_img) + if self.use_fdn: + image_embeddings = self.FDN_branch(image_embeddings, x_img) + + text_inputs = (clip.tokenize(x_text)).to(self.device) + # with torch.no_grad(): + text_features = self.clip_model.encode_text(text_inputs) + # text_features = text_features.unsqueeze(1) + # print(text_features.shape) + + + sparse_embeddings, dense_embeddings = self.prompt_encoder( + points=None, + boxes=None, + masks=None, + ) + + # print(sparse_embeddings.shape) + try: + if self.prompt_config['USE_TEXT_PROMPT']: + text_features_affine = self.Text_Embedding_Affine(text_features.float()) + else: + text_features_affine = text_features[:,:256] + except: + print(text_features.shape) + 1/0 + + if self.prompt_config['USE_SLICE_NUM']: + # print("slice num: ", slice_num) + slice_features = self.slice_embedding(torch.LongTensor(slice_num).to(self.device)) + slice_features = slice_features.unsqueeze(1) + if self.prompt_config['USE_TEXT_PROMPT'] and self.training_strategy=='prompttuning': + text_features_affine = text_features_affine + prompt_text + text_features_affine = text_features_affine.unsqueeze(1) + text_features_affine = text_features_affine.repeat(1,self.prompt_config['NUM_TEXT_REPEAT'],1) + sparse_embeddings = sparse_embeddings.to(self.device).repeat(B,1,1) + if self.prompt_config['USE_SLICE_NUM']: + # print(sparse_embeddings.shape) + # print(text_features_affine.shape) + # print(slice_features.shape) + sparse_embeddings = torch.cat( + [sparse_embeddings, torch.cat([text_features_affine, slice_features], dim=-1)], dim=1) + else: + sparse_embeddings = torch.cat( + [sparse_embeddings, text_features_affine], dim=1) + # print("sparse embedding shape: ", sparse_embeddings.shape) + # sparse_embeddings = sparse_embeddings.squeeze() + # sparse_embeddings = sparse_embeddings.unsqueeze(1) + + low_res_masks, iou_predictions = self.mask_decoder( + image_embeddings=image_embeddings, + image_pe=self.prompt_encoder.get_dense_pe(), + sparse_prompt_embeddings=sparse_embeddings, + dense_prompt_embeddings=dense_embeddings, + multimask_output=False, + use_gsam = False + ) + high_res_masks = self.postprocess_masks(low_res_masks, (self.img_size,self.img_size), (self.img_size,self.img_size)) + return high_res_masks, reg_loss + + def get_image_embeddings(self, x_img): + with torch.no_grad(): + B, C, H, W = x_img.shape + image_embeddings,_ = self.sam_encoder(x_img) + if self.use_fdn: + image_embeddings = self.FDN_branch(image_embeddings, x_img) + return image_embeddings + + def get_masks_with_manual_prompts(self, img_embeds, points=None, boxes=None, masks=None): + B = img_embeds.shape[0] + sparse_embeddings, dense_embeddings = self.prompt_encoder( + points=points, + boxes=boxes, + masks=masks, + ) + # print("sparse embeddings shape: ", sparse_embeddings.shape) + low_res_masks, iou_predictions = self.mask_decoder( + image_embeddings=img_embeds, + image_pe=self.prompt_encoder.get_dense_pe(), + sparse_prompt_embeddings=sparse_embeddings, + dense_prompt_embeddings=dense_embeddings, + multimask_output=False, + use_gsam = False + ) + high_res_masks = self.postprocess_masks(low_res_masks, (self.img_size,self.img_size), (self.img_size,self.img_size)) + return high_res_masks + + + + + def get_masks_for_multiple_labels(self, img_embeds, x_text): + ''' + img_embeds - image embeddings obtained from get_imgae_embeddings function + xtext - text prompts. image encoder wont be run and only the decoder will be run for each of these + ''' + B = img_embeds.shape[0] + with torch.no_grad(): + x_text = list(x_text) + if self.prompt_config['USE_TEXT_PROMPT']: + if self.training_strategy=='prompttuning': + prompt_text = [] + for t in x_text: + try: + prompt_text.append(self.text_prompt_embeddings[self.label_dict[t]]) + except: + prompt_text.append(self.text_prompt_embeddings[-1]) + prompt_text = torch.stack(prompt_text) + + text_inputs = (clip.tokenize(x_text)).to(self.device) + text_features = self.clip_model.encode_text(text_inputs) + + sparse_embeddings, dense_embeddings = self.prompt_encoder( + points=None, + boxes=None, + masks=None, + ) + + if self.prompt_config['USE_TEXT_PROMPT']: + text_features_affine = self.Text_Embedding_Affine(text_features.float()) + else: + text_features_affine = text_features[:,:256] + + if self.prompt_config['USE_TEXT_PROMPT'] and self.training_strategy=='prompttuning': + text_features_affine = text_features_affine + prompt_text + + text_features_affine = text_features_affine.unsqueeze(1) + sparse_embeddings = sparse_embeddings.to(self.device).repeat(B,1,1) + sparse_embeddings = torch.cat( + [sparse_embeddings,text_features_affine], dim=1) + + low_res_masks, iou_predictions = self.mask_decoder( + image_embeddings=img_embeds, + image_pe=self.prompt_encoder.get_dense_pe(), + sparse_prompt_embeddings=sparse_embeddings, + dense_prompt_embeddings=dense_embeddings, + multimask_output=False, + use_gsam = False + ) + high_res_masks = self.postprocess_masks(low_res_masks, (self.img_size,self.img_size), (self.img_size,self.img_size)) + return high_res_masks + + + def postprocess_masks( + self, + masks: torch.Tensor, + input_size: Tuple[int, ...], + original_size: Tuple[int, ...], + ) -> torch.Tensor: + """ + Remove padding and upscale masks to the original image size. + + Arguments: + masks (torch.Tensor): Batched masks from the mask_decoder, + in BxCxHxW format. + input_size (tuple(int, int)): The size of the image input to the + model, in (H, W) format. Used to remove padding. + original_size (tuple(int, int)): The original size of the image + before resizing for input to the model, in (H, W) format. + + Returns: + (torch.Tensor): Batched masks in BxCxHxW format, where (H, W) + is given by original_size. + """ + masks = F.interpolate( + masks, + (self.sam_encoder.img_size, self.sam_encoder.img_size), + mode="bilinear", + align_corners=False, + ) + masks = masks[..., : input_size[0], : input_size[1]] + masks = F.interpolate(masks, original_size, mode="bilinear", align_corners=False) + masks = torch.sigmoid(masks) + return masks.squeeze(1) \ No newline at end of file diff --git a/AllinonSAM/model_baseline.yml b/AllinonSAM/model_baseline.yml new file mode 100644 index 0000000000000000000000000000000000000000..8cae0912a7e9496620d0321c466637e47235688a --- /dev/null +++ b/AllinonSAM/model_baseline.yml @@ -0,0 +1,18 @@ + +img_size: 512 +num_classes: 1 +in_channels: 3 +img_type: 'image' +arch: "TransUNet" +use_fdn: False + + +training: + optimizer: 'adamw' + lr: 1e-4 + batch_size: 32 + num_epochs: 500 + schedule_step: 2100 + schedule_step_factor: 0.5 + weight_decay: 1e-2 + loss: 'dice+CE' \ No newline at end of file diff --git a/AllinonSAM/model_biastuning.yml b/AllinonSAM/model_biastuning.yml new file mode 100644 index 0000000000000000000000000000000000000000..0dd9f6de17cd0fe0f8cbc118358165a3af589f65 --- /dev/null +++ b/AllinonSAM/model_biastuning.yml @@ -0,0 +1,23 @@ +sam: + img_size: 256 + num_classes: 13 + +img_type: 'image' +arch: "Prompt Adapted SAM" +use_fdn: False +prompts: + USE_TEXT_PROMPT: True + USE_IMAGE_PROMPT: False + LOCATION: 'prepend' + DROPOUT: 0 + NUM_TOKENS: 5 + +training: + optimizer: 'adamw' + lr: 1e-4 + batch_size: 32 + num_epochs: 1000 + schedule_step: 2000 + schedule_step_factor: 0.5 + weight_decay: 1e-2 + loss: 'focal' diff --git a/AllinonSAM/model_fdn.yml b/AllinonSAM/model_fdn.yml new file mode 100644 index 0000000000000000000000000000000000000000..75daa32cc4037f90b85df2652f29654ea79c5f02 --- /dev/null +++ b/AllinonSAM/model_fdn.yml @@ -0,0 +1,25 @@ +sam: + img_size: 256 + num_classes: 2 + +img_type: 'ct' +arch: "Prompt Adapted SAM" +use_fdn: True + +prompts: + USE_TEXT_PROMPT: True + USE_IMAGE_PROMPT: True + LOCATION: 'prepend' + DROPOUT: 0.1 + NUM_TOKENS: 5 + +training: + optimizer: 'adamw' + lr: 1e-2 + batch_size: 4 + num_epochs: 1000 + schedule_step: 200 + schedule_step_factor: 0.5 + weight_decay: 1e-7 + loss: 'dice' + diff --git a/AllinonSAM/model_lora_decoder.yml b/AllinonSAM/model_lora_decoder.yml new file mode 100644 index 0000000000000000000000000000000000000000..515ec66f674ef1077e666a8973ac562a4c8cf062 --- /dev/null +++ b/AllinonSAM/model_lora_decoder.yml @@ -0,0 +1,34 @@ +sam: + img_size: 512 + num_classes: 2 + sam_type: "base" + +img_type: 'image' +arch: "Prompt Adapted SAM" +use_fdn: False +decoder_training: 'none' +mlp_transform: False + +prompts: + USE_TEXT_PROMPT: True + NUM_TEXT_REPEAT: 1 + USE_IMAGE_PROMPT: False + USE_SLICE_NUM: False + LOCATION: 'prepend' + DROPOUT: 0 + NUM_TOKENS: 5 + + +decoder_training: full +training: + optimizer: 'adamw' + lr: 1e-4 + batch_size: 8 + num_epochs: 200 + schedule_step: 200 + schedule_step_factor: 0.2 + weight_decay: 1e-2 + loss: 'focal+dice' + reg_multiplier: 0 + +# use_lora: False \ No newline at end of file diff --git a/AllinonSAM/model_lora_decoder_encoder.yml b/AllinonSAM/model_lora_decoder_encoder.yml new file mode 100644 index 0000000000000000000000000000000000000000..b275b48d07a7986982af643282d1db37721ce21a --- /dev/null +++ b/AllinonSAM/model_lora_decoder_encoder.yml @@ -0,0 +1,35 @@ +sam: + img_size: 512 + num_classes: 2 + sam_type: "base" + +img_type: 'image' +arch: "Prompt Adapted SAM" +use_fdn: False +decoder_training: 'none' +mlp_transform: False + +prompts: + USE_TEXT_PROMPT: True + NUM_TEXT_REPEAT: 1 + USE_IMAGE_PROMPT: False + USE_SLICE_NUM: False + LOCATION: 'prepend' + DROPOUT: 0 + NUM_TOKENS: 5 + + +decoder_training: full +prompt_encoder: full +training: + optimizer: 'adamw' + lr: 1e-4 + batch_size: 8 + num_epochs: 200 + schedule_step: 200 + schedule_step_factor: 0.2 + weight_decay: 1e-2 + loss: 'focal+dice' + reg_multiplier: 0 + +# use_lora: False \ No newline at end of file diff --git a/AllinonSAM/model_prompttuning.yml b/AllinonSAM/model_prompttuning.yml new file mode 100644 index 0000000000000000000000000000000000000000..eb63afd2049dbb069693649bac9a4b041a6d4c26 --- /dev/null +++ b/AllinonSAM/model_prompttuning.yml @@ -0,0 +1,25 @@ +sam: + img_size: 1024 + num_classes: 5 + +img_type: 'images' +arch: "Prompt Adapted SAM" +use_fdn: False + +prompts: + USE_TEXT_PROMPT: True + USE_IMAGE_PROMPT: True + LOCATION: 'prepend' + DROPOUT: 0.1 + NUM_TOKENS: 5 + +training: + optimizer: 'adamw' + lr: 1e-3 + batch_size: 8 + num_epochs: 1000 + schedule_step: 200 + schedule_step_factor: 0.5 + weight_decay: 1e-7 + loss: 'weighted CE+dice' + diff --git a/AllinonSAM/model_svdtuning.yml b/AllinonSAM/model_svdtuning.yml new file mode 100644 index 0000000000000000000000000000000000000000..6d7fa3fef42c445b6a2f0be6a59e318893776276 --- /dev/null +++ b/AllinonSAM/model_svdtuning.yml @@ -0,0 +1,46 @@ +sam: + img_size: 512 + num_classes: 2 + sam_type: "base" + +img_type: 'image' +arch: "Prompt Adapted SAM" +use_fdn: False +decoder_training: 'none' +mlp_transform: False + +prompts: + USE_TEXT_PROMPT: True + NUM_TEXT_REPEAT: 1 + USE_IMAGE_PROMPT: False + USE_SLICE_NUM: False + LOCATION: 'prepend' + DROPOUT: 0 + NUM_TOKENS: 5 + + +decoder_training: none +training: + optimizer: 'adamw' + lr: 1e-4 + batch_size: 8 + num_epochs: 200 + schedule_step: 200 + warmup_steps: 1000 + schedular: step + steps: [5000, 10000] + decay_factor: 0.1 + schedule_step_factor: 0.2 + weight_decay: 1e-2 + loss: 'focal+dice' + reg_multiplier: 0 + +#TODO: implement logic to parse this params. +use_salt: True +salt: + type: SALT_1 + svd_rank_linear: 500 + svd_rank_conv2d: 150 + r_lora: 256 + +# use_lora: False \ No newline at end of file diff --git a/AllinonSAM/model_svdtuning_decoder_promptencoder.yml b/AllinonSAM/model_svdtuning_decoder_promptencoder.yml new file mode 100644 index 0000000000000000000000000000000000000000..b275b48d07a7986982af643282d1db37721ce21a --- /dev/null +++ b/AllinonSAM/model_svdtuning_decoder_promptencoder.yml @@ -0,0 +1,35 @@ +sam: + img_size: 512 + num_classes: 2 + sam_type: "base" + +img_type: 'image' +arch: "Prompt Adapted SAM" +use_fdn: False +decoder_training: 'none' +mlp_transform: False + +prompts: + USE_TEXT_PROMPT: True + NUM_TEXT_REPEAT: 1 + USE_IMAGE_PROMPT: False + USE_SLICE_NUM: False + LOCATION: 'prepend' + DROPOUT: 0 + NUM_TOKENS: 5 + + +decoder_training: full +prompt_encoder: full +training: + optimizer: 'adamw' + lr: 1e-4 + batch_size: 8 + num_epochs: 200 + schedule_step: 200 + schedule_step_factor: 0.2 + weight_decay: 1e-2 + loss: 'focal+dice' + reg_multiplier: 0 + +# use_lora: False \ No newline at end of file diff --git a/AllinonSAM/model_svdtuning_encoder_decoder_prompt.yml b/AllinonSAM/model_svdtuning_encoder_decoder_prompt.yml new file mode 100644 index 0000000000000000000000000000000000000000..01a18009cf409cbea6ae34803e1a0be2549eb0f2 --- /dev/null +++ b/AllinonSAM/model_svdtuning_encoder_decoder_prompt.yml @@ -0,0 +1,36 @@ +sam: + img_size: 512 + num_classes: 2 + sam_type: "base" + +img_type: 'image' +arch: "Prompt Adapted SAM" +use_fdn: False +decoder_training: 'none' +mlp_transform: False + +prompts: + USE_TEXT_PROMPT: True + NUM_TEXT_REPEAT: 1 + USE_IMAGE_PROMPT: False + USE_SLICE_NUM: False + LOCATION: 'prepend' + DROPOUT: 0 + NUM_TOKENS: 5 + + +decoder_training: full +prompt_encoder: full +sam_encoder: full +training: + optimizer: 'adamw' + lr: 1e-4 + batch_size: 6 + num_epochs: 500 + schedule_step: 200 + schedule_step_factor: 0.2 + weight_decay: 1e-2 + loss: 'focal+dice' + reg_multiplier: 0 + +# use_lora: False \ No newline at end of file diff --git a/AllinonSAM/modelsDIAS/final_model.pth b/AllinonSAM/modelsDIAS/final_model.pth new file mode 100644 index 0000000000000000000000000000000000000000..fad80bd7dfe25035bff7ca29e5c21e0941fe33db --- /dev/null +++ b/AllinonSAM/modelsDIAS/final_model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:419d62ca571949a56e96573f2b06896ad39a2cadcca30c6e2db5d2b713333b6e +size 5925395 diff --git a/AllinonSAM/prompt_adapted_segment_anything/__init__.py b/AllinonSAM/prompt_adapted_segment_anything/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..34383d83f5e76bc801f31b20e5651e383be348b6 --- /dev/null +++ b/AllinonSAM/prompt_adapted_segment_anything/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from .build_sam import ( + build_sam, + build_sam_vit_h, + build_sam_vit_l, + build_sam_vit_b, + sam_model_registry, +) +from .predictor import SamPredictor +from .automatic_mask_generator import SamAutomaticMaskGenerator diff --git a/AllinonSAM/prompt_adapted_segment_anything/__pycache__/__init__.cpython-312.pyc b/AllinonSAM/prompt_adapted_segment_anything/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..44f3a1b237f15d971d3f7547f96054747b218244 Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/__pycache__/__init__.cpython-312.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/__pycache__/__init__.cpython-38.pyc b/AllinonSAM/prompt_adapted_segment_anything/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..062b226ce47f2f65e6a8fc8b27c4d272ffd4f4c8 Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/__pycache__/__init__.cpython-38.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/__pycache__/__init__.cpython-39.pyc b/AllinonSAM/prompt_adapted_segment_anything/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f71210c197d6e1b96d39c916bea554e99066fcb Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/__pycache__/__init__.cpython-39.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/__pycache__/automatic_mask_generator.cpython-312.pyc b/AllinonSAM/prompt_adapted_segment_anything/__pycache__/automatic_mask_generator.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c4976b25510effc89ced97939eb542bd3e75265 Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/__pycache__/automatic_mask_generator.cpython-312.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/__pycache__/automatic_mask_generator.cpython-38.pyc b/AllinonSAM/prompt_adapted_segment_anything/__pycache__/automatic_mask_generator.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..edb7b3e62a2d65a27f7b66c9cf4b9f96b81a3211 Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/__pycache__/automatic_mask_generator.cpython-38.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/__pycache__/automatic_mask_generator.cpython-39.pyc b/AllinonSAM/prompt_adapted_segment_anything/__pycache__/automatic_mask_generator.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3604c62375c0ab92412bb85e817cb7e7d20b37bf Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/__pycache__/automatic_mask_generator.cpython-39.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/__pycache__/build_sam.cpython-312.pyc b/AllinonSAM/prompt_adapted_segment_anything/__pycache__/build_sam.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..24281cfdc59703770e14b5fd688a870b399a7730 Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/__pycache__/build_sam.cpython-312.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/__pycache__/build_sam.cpython-38.pyc b/AllinonSAM/prompt_adapted_segment_anything/__pycache__/build_sam.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a92765204862ae076bd94f55034b5a215db232d Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/__pycache__/build_sam.cpython-38.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/__pycache__/build_sam.cpython-39.pyc b/AllinonSAM/prompt_adapted_segment_anything/__pycache__/build_sam.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc23facadb86976ac99356721e9d2a790acf06a3 Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/__pycache__/build_sam.cpython-39.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/__pycache__/predictor.cpython-312.pyc b/AllinonSAM/prompt_adapted_segment_anything/__pycache__/predictor.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..37b8b4b19c2ab6c126840a8bc6e8e7f40e2bc514 Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/__pycache__/predictor.cpython-312.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/__pycache__/predictor.cpython-38.pyc b/AllinonSAM/prompt_adapted_segment_anything/__pycache__/predictor.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e596d854325374eb97de0bfb6b73902d7ed7e471 Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/__pycache__/predictor.cpython-38.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/__pycache__/predictor.cpython-39.pyc b/AllinonSAM/prompt_adapted_segment_anything/__pycache__/predictor.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1a8b720131ee4d01b23247cd6e9d98e30d2101c Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/__pycache__/predictor.cpython-39.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/automatic_mask_generator.py b/AllinonSAM/prompt_adapted_segment_anything/automatic_mask_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..23264971b7ff5aa0b4f499ade7773b68dce984b6 --- /dev/null +++ b/AllinonSAM/prompt_adapted_segment_anything/automatic_mask_generator.py @@ -0,0 +1,372 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import numpy as np +import torch +from torchvision.ops.boxes import batched_nms, box_area # type: ignore + +from typing import Any, Dict, List, Optional, Tuple + +from .modeling import Sam +from .predictor import SamPredictor +from .utils.amg import ( + MaskData, + area_from_rle, + batch_iterator, + batched_mask_to_box, + box_xyxy_to_xywh, + build_all_layer_point_grids, + calculate_stability_score, + coco_encode_rle, + generate_crop_boxes, + is_box_near_crop_edge, + mask_to_rle_pytorch, + remove_small_regions, + rle_to_mask, + uncrop_boxes_xyxy, + uncrop_masks, + uncrop_points, +) + + +class SamAutomaticMaskGenerator: + def __init__( + self, + model: Sam, + points_per_side: Optional[int] = 32, + points_per_batch: int = 64, + pred_iou_thresh: float = 0.88, + stability_score_thresh: float = 0.95, + stability_score_offset: float = 1.0, + box_nms_thresh: float = 0.7, + crop_n_layers: int = 0, + crop_nms_thresh: float = 0.7, + crop_overlap_ratio: float = 512 / 1500, + crop_n_points_downscale_factor: int = 1, + point_grids: Optional[List[np.ndarray]] = None, + min_mask_region_area: int = 0, + output_mode: str = "binary_mask", + ) -> None: + """ + Using a SAM model, generates masks for the entire image. + Generates a grid of point prompts over the image, then filters + low quality and duplicate masks. The default settings are chosen + for SAM with a ViT-H backbone. + + Arguments: + model (Sam): The SAM model to use for mask prediction. + points_per_side (int or None): The number of points to be sampled + along one side of the image. The total number of points is + points_per_side**2. If None, 'point_grids' must provide explicit + point sampling. + points_per_batch (int): Sets the number of points run simultaneously + by the model. Higher numbers may be faster but use more GPU memory. + pred_iou_thresh (float): A filtering threshold in [0,1], using the + model's predicted mask quality. + stability_score_thresh (float): A filtering threshold in [0,1], using + the stability of the mask under changes to the cutoff used to binarize + the model's mask predictions. + stability_score_offset (float): The amount to shift the cutoff when + calculated the stability score. + box_nms_thresh (float): The box IoU cutoff used by non-maximal + suppression to filter duplicate masks. + crops_n_layers (int): If >0, mask prediction will be run again on + crops of the image. Sets the number of layers to run, where each + layer has 2**i_layer number of image crops. + crops_nms_thresh (float): The box IoU cutoff used by non-maximal + suppression to filter duplicate masks between different crops. + crop_overlap_ratio (float): Sets the degree to which crops overlap. + In the first crop layer, crops will overlap by this fraction of + the image length. Later layers with more crops scale down this overlap. + crop_n_points_downscale_factor (int): The number of points-per-side + sampled in layer n is scaled down by crop_n_points_downscale_factor**n. + point_grids (list(np.ndarray) or None): A list over explicit grids + of points used for sampling, normalized to [0,1]. The nth grid in the + list is used in the nth crop layer. Exclusive with points_per_side. + min_mask_region_area (int): If >0, postprocessing will be applied + to remove disconnected regions and holes in masks with area smaller + than min_mask_region_area. Requires opencv. + output_mode (str): The form masks are returned in. Can be 'binary_mask', + 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools. + For large resolutions, 'binary_mask' may consume large amounts of + memory. + """ + + assert (points_per_side is None) != ( + point_grids is None + ), "Exactly one of points_per_side or point_grid must be provided." + if points_per_side is not None: + self.point_grids = build_all_layer_point_grids( + points_per_side, + crop_n_layers, + crop_n_points_downscale_factor, + ) + elif point_grids is not None: + self.point_grids = point_grids + else: + raise ValueError("Can't have both points_per_side and point_grid be None.") + + assert output_mode in [ + "binary_mask", + "uncompressed_rle", + "coco_rle", + ], f"Unknown output_mode {output_mode}." + if output_mode == "coco_rle": + from pycocotools import mask as mask_utils # type: ignore # noqa: F401 + + if min_mask_region_area > 0: + import cv2 # type: ignore # noqa: F401 + + self.predictor = SamPredictor(model) + self.points_per_batch = points_per_batch + self.pred_iou_thresh = pred_iou_thresh + self.stability_score_thresh = stability_score_thresh + self.stability_score_offset = stability_score_offset + self.box_nms_thresh = box_nms_thresh + self.crop_n_layers = crop_n_layers + self.crop_nms_thresh = crop_nms_thresh + self.crop_overlap_ratio = crop_overlap_ratio + self.crop_n_points_downscale_factor = crop_n_points_downscale_factor + self.min_mask_region_area = min_mask_region_area + self.output_mode = output_mode + + @torch.no_grad() + def generate(self, image: np.ndarray) -> List[Dict[str, Any]]: + """ + Generates masks for the given image. + + Arguments: + image (np.ndarray): The image to generate masks for, in HWC uint8 format. + + Returns: + list(dict(str, any)): A list over records for masks. Each record is + a dict containing the following keys: + segmentation (dict(str, any) or np.ndarray): The mask. If + output_mode='binary_mask', is an array of shape HW. Otherwise, + is a dictionary containing the RLE. + bbox (list(float)): The box around the mask, in XYWH format. + area (int): The area in pixels of the mask. + predicted_iou (float): The model's own prediction of the mask's + quality. This is filtered by the pred_iou_thresh parameter. + point_coords (list(list(float))): The point coordinates input + to the model to generate this mask. + stability_score (float): A measure of the mask's quality. This + is filtered on using the stability_score_thresh parameter. + crop_box (list(float)): The crop of the image used to generate + the mask, given in XYWH format. + """ + + # Generate masks + mask_data = self._generate_masks(image) + + # Filter small disconnected regions and holes in masks + if self.min_mask_region_area > 0: + mask_data = self.postprocess_small_regions( + mask_data, + self.min_mask_region_area, + max(self.box_nms_thresh, self.crop_nms_thresh), + ) + + # Encode masks + if self.output_mode == "coco_rle": + mask_data["segmentations"] = [coco_encode_rle(rle) for rle in mask_data["rles"]] + elif self.output_mode == "binary_mask": + mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]] + else: + mask_data["segmentations"] = mask_data["rles"] + + # Write mask records + curr_anns = [] + for idx in range(len(mask_data["segmentations"])): + ann = { + "segmentation": mask_data["segmentations"][idx], + "area": area_from_rle(mask_data["rles"][idx]), + "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(), + "predicted_iou": mask_data["iou_preds"][idx].item(), + "point_coords": [mask_data["points"][idx].tolist()], + "stability_score": mask_data["stability_score"][idx].item(), + "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(), + } + curr_anns.append(ann) + + return curr_anns + + def _generate_masks(self, image: np.ndarray) -> MaskData: + orig_size = image.shape[:2] + crop_boxes, layer_idxs = generate_crop_boxes( + orig_size, self.crop_n_layers, self.crop_overlap_ratio + ) + + # Iterate over image crops + data = MaskData() + for crop_box, layer_idx in zip(crop_boxes, layer_idxs): + crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) + data.cat(crop_data) + + # Remove duplicate masks between crops + if len(crop_boxes) > 1: + # Prefer masks from smaller crops + scores = 1 / box_area(data["crop_boxes"]) + scores = scores.to(data["boxes"].device) + keep_by_nms = batched_nms( + data["boxes"].float(), + scores, + torch.zeros(len(data["boxes"])), # categories + iou_threshold=self.crop_nms_thresh, + ) + data.filter(keep_by_nms) + + data.to_numpy() + return data + + def _process_crop( + self, + image: np.ndarray, + crop_box: List[int], + crop_layer_idx: int, + orig_size: Tuple[int, ...], + ) -> MaskData: + # Crop the image and calculate embeddings + x0, y0, x1, y1 = crop_box + cropped_im = image[y0:y1, x0:x1, :] + cropped_im_size = cropped_im.shape[:2] + self.predictor.set_image(cropped_im) + + # Get points for this crop + points_scale = np.array(cropped_im_size)[None, ::-1] + points_for_image = self.point_grids[crop_layer_idx] * points_scale + + # Generate masks for this crop in batches + data = MaskData() + for (points,) in batch_iterator(self.points_per_batch, points_for_image): + batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size) + data.cat(batch_data) + del batch_data + self.predictor.reset_image() + + # Remove duplicates within this crop. + keep_by_nms = batched_nms( + data["boxes"].float(), + data["iou_preds"], + torch.zeros(len(data["boxes"])), # categories + iou_threshold=self.box_nms_thresh, + ) + data.filter(keep_by_nms) + + # Return to the original image frame + data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box) + data["points"] = uncrop_points(data["points"], crop_box) + data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))]) + + return data + + def _process_batch( + self, + points: np.ndarray, + im_size: Tuple[int, ...], + crop_box: List[int], + orig_size: Tuple[int, ...], + ) -> MaskData: + orig_h, orig_w = orig_size + + # Run model on this batch + transformed_points = self.predictor.transform.apply_coords(points, im_size) + in_points = torch.as_tensor(transformed_points, device=self.predictor.device) + in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device) + masks, iou_preds, _ = self.predictor.predict_torch( + in_points[:, None, :], + in_labels[:, None], + multimask_output=True, + return_logits=True, + ) + + # Serialize predictions and store in MaskData + data = MaskData( + masks=masks.flatten(0, 1), + iou_preds=iou_preds.flatten(0, 1), + points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), + ) + del masks + + # Filter by predicted IoU + if self.pred_iou_thresh > 0.0: + keep_mask = data["iou_preds"] > self.pred_iou_thresh + data.filter(keep_mask) + + # Calculate stability score + data["stability_score"] = calculate_stability_score( + data["masks"], self.predictor.model.mask_threshold, self.stability_score_offset + ) + if self.stability_score_thresh > 0.0: + keep_mask = data["stability_score"] >= self.stability_score_thresh + data.filter(keep_mask) + + # Threshold masks and calculate boxes + data["masks"] = data["masks"] > self.predictor.model.mask_threshold + data["boxes"] = batched_mask_to_box(data["masks"]) + + # Filter boxes that touch crop boundaries + keep_mask = ~is_box_near_crop_edge(data["boxes"], crop_box, [0, 0, orig_w, orig_h]) + if not torch.all(keep_mask): + data.filter(keep_mask) + + # Compress to RLE + data["masks"] = uncrop_masks(data["masks"], crop_box, orig_h, orig_w) + data["rles"] = mask_to_rle_pytorch(data["masks"]) + del data["masks"] + + return data + + @staticmethod + def postprocess_small_regions( + mask_data: MaskData, min_area: int, nms_thresh: float + ) -> MaskData: + """ + Removes small disconnected regions and holes in masks, then reruns + box NMS to remove any new duplicates. + + Edits mask_data in place. + + Requires open-cv as a dependency. + """ + if len(mask_data["rles"]) == 0: + return mask_data + + # Filter small disconnected regions and holes + new_masks = [] + scores = [] + for rle in mask_data["rles"]: + mask = rle_to_mask(rle) + + mask, changed = remove_small_regions(mask, min_area, mode="holes") + unchanged = not changed + mask, changed = remove_small_regions(mask, min_area, mode="islands") + unchanged = unchanged and not changed + + new_masks.append(torch.as_tensor(mask).unsqueeze(0)) + # Give score=0 to changed masks and score=1 to unchanged masks + # so NMS will prefer ones that didn't need postprocessing + scores.append(float(unchanged)) + + # Recalculate boxes and remove any new duplicates + masks = torch.cat(new_masks, dim=0) + boxes = batched_mask_to_box(masks) + keep_by_nms = batched_nms( + boxes.float(), + torch.as_tensor(scores), + torch.zeros(len(boxes)), # categories + iou_threshold=nms_thresh, + ) + + # Only recalculate RLEs for masks that have changed + for i_mask in keep_by_nms: + if scores[i_mask] == 0.0: + mask_torch = masks[i_mask].unsqueeze(0) + mask_data["rles"][i_mask] = mask_to_rle_pytorch(mask_torch)[0] + mask_data["boxes"][i_mask] = boxes[i_mask] # update res directly + mask_data.filter(keep_by_nms) + + return mask_data diff --git a/AllinonSAM/prompt_adapted_segment_anything/build_sam.py b/AllinonSAM/prompt_adapted_segment_anything/build_sam.py new file mode 100644 index 0000000000000000000000000000000000000000..07abfca24e96eced7f13bdefd3212ce1b77b8999 --- /dev/null +++ b/AllinonSAM/prompt_adapted_segment_anything/build_sam.py @@ -0,0 +1,107 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch + +from functools import partial + +from .modeling import ImageEncoderViT, MaskDecoder, PromptEncoder, Sam, TwoWayTransformer + + +def build_sam_vit_h(checkpoint=None): + return _build_sam( + encoder_embed_dim=1280, + encoder_depth=32, + encoder_num_heads=16, + encoder_global_attn_indexes=[7, 15, 23, 31], + checkpoint=checkpoint, + ) + + +build_sam = build_sam_vit_h + + +def build_sam_vit_l(checkpoint=None): + return _build_sam( + encoder_embed_dim=1024, + encoder_depth=24, + encoder_num_heads=16, + encoder_global_attn_indexes=[5, 11, 17, 23], + checkpoint=checkpoint, + ) + + +def build_sam_vit_b(checkpoint=None): + return _build_sam( + encoder_embed_dim=768, + encoder_depth=12, + encoder_num_heads=12, + encoder_global_attn_indexes=[2, 5, 8, 11], + checkpoint=checkpoint, + ) + + +sam_model_registry = { + "default": build_sam, + "vit_h": build_sam, + "vit_l": build_sam_vit_l, + "vit_b": build_sam_vit_b, +} + + +def _build_sam( + encoder_embed_dim, + encoder_depth, + encoder_num_heads, + encoder_global_attn_indexes, + checkpoint=None, +): + prompt_embed_dim = 256 + image_size = 1024 + vit_patch_size = 16 + image_embedding_size = image_size // vit_patch_size + sam = Sam( + image_encoder=ImageEncoderViT( + depth=encoder_depth, + embed_dim=encoder_embed_dim, + img_size=image_size, + mlp_ratio=4, + norm_layer=partial(torch.nn.LayerNorm, eps=1e-6), + num_heads=encoder_num_heads, + patch_size=vit_patch_size, + qkv_bias=True, + use_rel_pos=True, + global_attn_indexes=encoder_global_attn_indexes, + window_size=14, + out_chans=prompt_embed_dim, + ), + prompt_encoder=PromptEncoder( + embed_dim=prompt_embed_dim, + image_embedding_size=(image_embedding_size, image_embedding_size), + input_image_size=(image_size, image_size), + mask_in_chans=16, + ), + mask_decoder=MaskDecoder( + num_multimask_outputs=3, + transformer=TwoWayTransformer( + depth=2, + embedding_dim=prompt_embed_dim, + mlp_dim=2048, + num_heads=8, + ), + transformer_dim=prompt_embed_dim, + iou_head_depth=3, + iou_head_hidden_dim=256, + ), + pixel_mean=[123.675, 116.28, 103.53], + pixel_std=[58.395, 57.12, 57.375], + ) + sam.eval() + if checkpoint is not None: + with open(checkpoint, "rb") as f: + state_dict = torch.load(f) + sam.load_state_dict(state_dict) + return sam diff --git a/AllinonSAM/prompt_adapted_segment_anything/modeling/SALT_layers.py b/AllinonSAM/prompt_adapted_segment_anything/modeling/SALT_layers.py new file mode 100644 index 0000000000000000000000000000000000000000..b65097f5a2d5906e99dfd749bca493b97d8f689a --- /dev/null +++ b/AllinonSAM/prompt_adapted_segment_anything/modeling/SALT_layers.py @@ -0,0 +1,213 @@ +import torch +from torch import nn +from torch.nn import functional as F +from einops import rearrange +from typing import Type, Tuple, Optional + + +""" +SALT with LoRA only +""" + +class SALTLinear(nn.Linear): + """ + A linear layer that combines truncated SVD decomposition with LoRA-style adaptation. + Only keeps top r singular values and vectors, then adds LoRA adaptation. + """ + def __init__( + self, + in_features: int, + out_features: int, + rank: int, # truncation rank for SVD + r_lora: int = 8, # LoRA rank + bias: bool = True, + device: Optional[torch.device] = None, + dtype: Optional[torch.dtype] = None, + seed: int = 42 + ) -> None: + super().__init__(in_features, out_features, bias, device, dtype) + torch.manual_seed(seed) + + # Initialize parameters for SVD + self.weight.requires_grad = False + self.done_svd = False + self.U, self.S, self.Vt = self._initialize_svd() + + max_possible_rank = min(self.U.shape[1], self.S.shape[0], self.Vt.shape[0]) + print("\nThe max possible rank is", max_possible_rank) + + # Truncation rank for SVD + self.rank = rank + + # Initialize LoRA matrices + self.X = nn.Parameter(torch.randn(max_possible_rank, r_lora) * 0.01) + self.Y = nn.Parameter(torch.randn(r_lora, max_possible_rank) * 0.01) + + self.reset_parameters() + + def _initialize_svd(self): + """Initializes SVD decomposition on the weight matrix.""" + return torch.linalg.svd(self.weight, full_matrices=False) + + def perform_svd(self) -> None: + """Updates truncated SVD decomposition on the weight matrix.""" + self.U, self.S, self.Vt = self._initialize_svd() + self.done_svd = True + + def get_modified_singular_values(self) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Computes modified singular values using LoRA adaptation. + Returns: + Tuple containing: + - Modified singular values tensor + - LoRA adaptation term + """ + # Compute the LoRA adaptation term + loRA_term = self.X @ self.Y + + # Create a mask that matches the shape of loRA_term + mask = torch.ones_like(loRA_term, device=self.X.device) + # Example: Set the first `rank` rows of the mask to 0 + mask[:self.rank, :] = 0 # Adjust as needed + + # Apply mask to LoRA term + masked_loRA_term = loRA_term * mask + + # Compute the modified singular values + new_s = torch.diag(self.S) + masked_loRA_term + return new_s, masked_loRA_term + + def forward(self, input: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Forward pass with LoRA-modified truncated singular values. + + Args: + input: Input tensor + + Returns: + Tuple containing: + - Output tensor after linear transformation + - Regularization loss + """ + if not self.done_svd: + self.perform_svd() + + new_s, LoRA_term = self.get_modified_singular_values() + s_new = F.relu(new_s.to(input.device)) + + # Reconstruct weight matrix using truncated components + weight_updated = self.U @ s_new @ self.Vt + + # Compute regularization loss + reg_loss = torch.norm(LoRA_term) + + return F.linear(input, weight_updated, self.bias), reg_loss + + +class SALTConv2d(nn.Conv2d): + """ + A 2D convolutional layer that combines truncated SVD decomposition with LoRA-style adaptation. + The weight matrix is reshaped before applying truncated SVD and LoRA modifications. + """ + def __init__( + self, + in_channels: int, + out_channels: int, + kernel_size: int, + rank: int, # truncation rank for SVD + r_lora: int = 8, # LoRA rank + seed: int = 42, + **kwargs + ): + super().__init__(in_channels, out_channels, kernel_size, **kwargs) + torch.manual_seed(seed) + + self.done_svd = False + self.weight.requires_grad = False + + # Reshape weight and perform initial truncated SVD + weight_reshaped = rearrange(self.weight, 'co cin h w -> co (cin h w)') + self.U, self.S, self.Vt = self._initialize_svd(weight_reshaped) + + max_possible_rank = min(self.U.shape[1], self.S.shape[0], self.Vt.shape[0]) + print("\nThe max possible rank is", max_possible_rank) + + self.rank = rank + + # Initialize LoRA matrices + self.X = nn.Parameter(torch.randn(max_possible_rank, r_lora) * 0.01) + self.Y = nn.Parameter(torch.randn(r_lora, max_possible_rank) * 0.01) + + self.reset_parameters() + + def _initialize_svd(self, weight_reshaped): + """Initializes SVD decomposition on the reshaped weight matrix.""" + return torch.linalg.svd(weight_reshaped, full_matrices=False) + + def perform_svd(self) -> None: + """Updates truncated SVD decomposition on the reshaped weight matrix.""" + weight_reshaped = rearrange(self.weight, 'co cin h w -> co (cin h w)') + self.U, self.S, self.Vt = self._initialize_svd(weight_reshaped) + self.done_svd = True + + def get_modified_singular_values(self) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Computes modified singular values using LoRA adaptation. + Returns: + Tuple containing: + - Modified singular values tensor + - LoRA adaptation term + """ + # Compute the LoRA adaptation term + loRA_term = self.X @ self.Y + + # Create a mask that matches the shape of loRA_term + mask = torch.ones_like(loRA_term, device=self.X.device) + # Example: Set the first `rank` rows of the mask to 0 + mask[:self.rank, :] = 0 # Adjust as needed + + # Apply mask to LoRA term + masked_loRA_term = loRA_term * mask + + # Compute the modified singular values + new_s = torch.diag(self.S) + masked_loRA_term + return new_s, masked_loRA_term + + def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Forward pass with LoRA-modified truncated singular values. + + Args: + x: Input tensor + + Returns: + Tuple containing: + - Output tensor after convolution + - Regularization loss + """ + if not self.done_svd: + self.perform_svd() + + new_s, LoRA_term = self.get_modified_singular_values() + s_new = F.relu(new_s.to(x.device)) + + # Reconstruct weight matrix using truncated components + weight_updated = self.U @ s_new @ self.Vt + + # Reshape weight back to conv2d format + weight_updated = rearrange( + weight_updated, + 'co (cin h w) -> co cin h w', + cin=self.weight.size(1), + h=self.weight.size(2), + w=self.weight.size(3) + ) + + # Compute regularization loss + reg_loss = torch.norm(LoRA_term) + + return F.conv2d( + x, weight_updated, self.bias, + self.stride, self.padding, + self.dilation, self.groups + ), reg_loss diff --git a/AllinonSAM/prompt_adapted_segment_anything/modeling/SALT_layers_2.py b/AllinonSAM/prompt_adapted_segment_anything/modeling/SALT_layers_2.py new file mode 100644 index 0000000000000000000000000000000000000000..50c4e9fc2696d8874271b781be63af606052193a --- /dev/null +++ b/AllinonSAM/prompt_adapted_segment_anything/modeling/SALT_layers_2.py @@ -0,0 +1,183 @@ +import torch +from torch import nn +from torch.nn import functional as F +from einops import rearrange +""" +This Version of SALT uses: + - W = U (\Sigma . A + B) + XY + - we uses srLoRA +""" +class SALTLinear(nn.Linear): + def __init__( + self, + in_features: int, + out_features: int, + rank: int, # rank for truncated SVD + lora_rank: int, # rank for rsLoRA + alpha: float = 32.0, # scaling factor for rsLoRA + bias: bool = True, + device=None, + dtype=None + ) -> None: + super().__init__(in_features, out_features, bias, device, dtype) + + # Perform full SVD initially + self.U, self.S, self.Vt = torch.linalg.svd(self.weight, full_matrices=False) + self.weight.requires_grad = False + self.done_svd = False + + max_possible_rank = min(self.U.shape[1], self.S.shape[0], self.Vt.shape[0]) + print("\nThe max possible rank is", max_possible_rank) + + # Initialize A and B for singular value transformation + + self.A = nn.Parameter(torch.ones(rank)) + self.B = nn.Parameter(torch.zeros(rank)) + self.A_frozen = torch.ones(max_possible_rank - self.A.shape[0]) + self.B_frozen = torch.ones(max_possible_rank - self.B.shape[0]) + + # Initialize rsLoRA parameters with the new scaling + rs_lora_scaling = alpha / (lora_rank ** 0.5) + self.lora_X = nn.Parameter(torch.randn(out_features, lora_rank) * rs_lora_scaling) + self.lora_Y = nn.Parameter(torch.randn(lora_rank, in_features) * rs_lora_scaling) + + self.reset_parameters() + + def reset_parameters(self) -> None: + nn.Linear.reset_parameters(self) + if hasattr(self, 'A'): + nn.init.ones_(self.A) + if hasattr(self, 'B'): + nn.init.zeros_(self.B) + if hasattr(self, 'lora_X'): + nn.init.normal_(self.lora_X, std=0.01) + if hasattr(self, 'lora_Y'): + nn.init.normal_(self.lora_Y, std=0.01) + + def perform_svd(self): + self.U, self.S, self.Vt = torch.linalg.svd(self.weight, full_matrices=False) + self.done_svd = True + + def forward(self, input: torch.Tensor) -> torch.Tensor: + if not self.done_svd: + self.perform_svd() + + # Transform singular values: A·Σ_r + B + A_total = torch.cat([self.A, self.A_frozen.to(input.device)]) + B_total = torch.cat([self.B, self.B_frozen.to(input.device)]) + transformed_S = A_total * self.S + B_total + + # Compute truncated SVD part: U_r(A·Σ_r + B)V_r^T + weight_svd = self.U @ torch.diag(F.relu(transformed_S)) @ self.Vt + + # Add rsLoRA part: X·Y + weight_rslora = self.lora_X @ self.lora_Y + + # Combine both parts + weight_updated = weight_svd + weight_rslora + + # Compute regularization loss + reg_loss = ( + torch.norm(1 - self.A) + + torch.norm(self.B) + + torch.norm(self.lora_X) * torch.norm(self.lora_Y) + ) + + return F.linear(input, weight_updated, self.bias), reg_loss + +class SALTConv2d(nn.Conv2d): + def __init__( + self, + in_channels: int, + out_channels: int, + kernel_size: int, + rank: int, # rank for truncated SVD + lora_rank: int, # rank for rsLoRA + alpha: float = 1.0, # scaling factor for rsLoRA + **kwargs + ): + super().__init__(in_channels, out_channels, kernel_size, **kwargs) + assert isinstance(kernel_size, int) + + # Reshape weight and perform SVD + weight_reshaped = rearrange(self.weight, 'co cin h w -> co (cin h w)') + self.U, self.S, self.Vt = torch.linalg.svd(weight_reshaped, full_matrices=False) + self.done_svd = False + + max_possible_rank = min(self.U.shape[1], self.S.shape[0], self.Vt.shape[0]) + print("\nThe max possible rank is", max_possible_rank) + self.actual_rank = min(rank, max_possible_rank) + + # Initialize A and B for singular value transformation + self.A = nn.Parameter(torch.ones(self.actual_rank)) + self.B = nn.Parameter(torch.zeros(self.actual_rank)) + self.A_frozen = torch.ones(max_possible_rank - self.actual_rank) + self.B_frozen = torch.ones(max_possible_rank - self.actual_rank) + + # Initialize rsLoRA parameters with scaling + total_kernel_size = in_channels * kernel_size * kernel_size + rs_lora_scaling = alpha / (lora_rank ** 0.5) + self.lora_X = nn.Parameter(torch.randn(out_channels, lora_rank) * rs_lora_scaling) + self.lora_Y = nn.Parameter(torch.randn(lora_rank, total_kernel_size) * rs_lora_scaling) + + # Freeze original weights + self.weight.requires_grad = False + + # Save shapes for reshaping + self.weight_shape = self.weight.shape + self.reset_parameters() + + def perform_svd(self): + weight_reshaped = rearrange(self.weight, 'co cin h w -> co (cin h w)') + self.U, self.S, self.Vt = torch.linalg.svd(weight_reshaped, full_matrices=False) + self.done_svd = True + + def reset_parameters(self) -> None: + nn.Conv2d.reset_parameters(self) + if hasattr(self, 'A'): + nn.init.ones_(self.A) + if hasattr(self, 'B'): + nn.init.zeros_(self.B) + if hasattr(self, 'lora_X'): + nn.init.normal_(self.lora_X, std=0.01) + if hasattr(self, 'lora_Y'): + nn.init.normal_(self.lora_Y, std=0.01) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if not self.done_svd: + self.perform_svd() + + A_total = torch.cat([self.A, self.A_frozen.to(x.device)]) + B_total = torch.cat([self.B, self.B_frozen.to(x.device)]) + transformed_S = A_total * self.S + B_total + + # Compute truncated SVD part: U_r(A·Σ_r + B)V_r^T + weight_svd = self.U @ torch.diag(F.relu(transformed_S)) @ self.Vt + + # Add rsLoRA part: X·Y + weight_rslora = self.lora_X @ self.lora_Y + + # Combine both parts + weight_updated = weight_svd + weight_rslora + + # Reshape back to conv2d weight shape + weight_updated = rearrange( + weight_updated, + 'co (cin h w) -> co cin h w', + cin=self.weight_shape[1], + h=self.weight_shape[2], + w=self.weight_shape[3] + ) + + # Compute regularization loss + reg_loss = ( + torch.norm(1 - self.A) + + torch.norm(self.B) + + torch.norm(self.lora_X) * torch.norm(self.lora_Y) + ) + + return F.conv2d( + x, weight_updated, self.bias, + self.stride, self.padding, + self.dilation, self.groups + ), reg_loss diff --git a/AllinonSAM/prompt_adapted_segment_anything/modeling/SALT_layers_3.py b/AllinonSAM/prompt_adapted_segment_anything/modeling/SALT_layers_3.py new file mode 100644 index 0000000000000000000000000000000000000000..9179b40a73e69fa0d10c68719c875bc68f97d2ef --- /dev/null +++ b/AllinonSAM/prompt_adapted_segment_anything/modeling/SALT_layers_3.py @@ -0,0 +1,183 @@ +import torch +from torch import nn +from torch.nn import functional as F +from einops import rearrange +from typing import Type + + +""" +This Version of SALT uses: + - W = U (\Sigma . A + B) + XY + - we uses normal LoRA +""" + +class SALTLinear(nn.Linear): + def __init__( + self, + in_features: int, + out_features: int, + rank: int, # rank for truncated SVD + r_lora: int, # rank for LoRA + rsLora=False, + alpha=1, + bias: bool = True, + device=None, + dtype=None + ) -> None: + super().__init__(in_features, out_features, bias, device, dtype) + + # Perform full SVD initially + self.U, self.S, self.Vt = torch.linalg.svd(self.weight, full_matrices=False) + self.weight.requires_grad = False + self.done_svd = False + max_possible_rank = min(self.U.shape[1], self.S.shape[0], self.Vt.shape[0]) + print("\nThe max possible rank is " , max_possible_rank) + # Initialize A and B for singular value transformation + self.trainable_A = nn.Parameter(torch.ones(rank)) + self.trainable_B = nn.Parameter(torch.zeros(rank)) + self.A_frozen = torch.ones(max_possible_rank-self.trainable_A.shape[0]) + self.B_frozen = torch.ones(max_possible_rank-self.trainable_B.shape[0]) + + # Initialize LoRA parameters + self.trainable_lora_X = nn.Parameter(torch.randn(out_features, r_lora) * 0.01) + self.trainable_lora_Y = nn.Parameter(torch.randn(r_lora, in_features) * 0.01) + + self.reset_parameters() + + def reset_parameters(self) -> None: + nn.Linear.reset_parameters(self) + if hasattr(self, 'trainable_A'): + nn.init.ones_(self.trainable_A) + if hasattr(self, 'trainable_B'): + nn.init.zeros_(self.trainable_B) + if hasattr(self, 'trainable_lora_X'): + nn.init.normal_(self.trainable_lora_X, std=0.01) + if hasattr(self, 'trainable_lora_Y'): + nn.init.normal_(self.trainable_lora_Y, std=0.01) + # No clue why they are using this + def perform_svd(self): + self.U, self.S, self.Vt = torch.linalg.svd(self.weight, full_matrices=False) + self.done_svd = True + def forward(self, input: torch.Tensor) -> torch.Tensor: + if not self.done_svd: + self.perform_svd() + # Transform singular values: A·Σ_r + B + # We first cat the trainable + the frozen parameters + A_total = torch.cat([self.trainable_A,self.A_frozen.to(input.device)]) + B_total = torch.cat([self.trainable_B,self.B_frozen.to(input.device)]) + transformed_S = A_total * self.S + B_total + + # Compute truncated SVD part: U_r(A·Σ_r + B)V_r^T + weight_svd = self.U @ torch.diag(F.relu(transformed_S)) @ self.Vt + + # Add LoRA part: X·Y + weight_lora = self.trainable_lora_X @ self.trainable_lora_Y + + # Combine both parts + weight_updated = weight_svd + weight_lora + + # Compute regularization loss + reg_loss = ( + torch.norm(1 - self.trainable_A) + # A should be close to 1 + torch.norm(self.trainable_B) + # B should be close to 0 + torch.norm(self.trainable_lora_X) * torch.norm(self.trainable_lora_Y) # LoRA regularization + ) + + return F.linear(input, weight_updated, self.bias), reg_loss + +class SALTConv2d(nn.Conv2d): + def __init__( + self, + in_channels: int, + out_channels: int, + kernel_size: int, + rank: int, # rank for truncated SVD + r_lora: int, # rank for LoRA + rsLora=False, + alpha=1, + **kwargs + ): + super().__init__(in_channels, out_channels, kernel_size, **kwargs) + assert isinstance(kernel_size, int) + + # Reshape weight and perform SVD + weight_reshaped = rearrange(self.weight, 'co cin h w -> co (cin h w)') + self.U, self.S, self.Vt = torch.linalg.svd(weight_reshaped, full_matrices=False) + self.done_svd = False + # Ensure rank is not larger than the minimum dimension + max_possible_rank = min(self.U.shape[1], self.S.shape[0], self.Vt.shape[0]) + print("\nThe max possible rank is " , max_possible_rank) + self.actual_rank = min(rank, max_possible_rank) + + # Initialize A and B for singular value transformation with correct size + self.trainable_A = nn.Parameter(torch.ones(self.actual_rank)) + self.trainable_B = nn.Parameter(torch.zeros(self.actual_rank)) + self.A_frozen = torch.ones(max_possible_rank-self.actual_rank) + self.B_frozen = torch.ones(max_possible_rank-self.actual_rank) + # Initialize LoRA parameters + total_kernel_size = in_channels * kernel_size * kernel_size + self.trainable_lora_X = nn.Parameter(torch.randn(out_channels, r_lora) * 0.01) + self.trainable_lora_Y = nn.Parameter(torch.randn(r_lora, total_kernel_size) * 0.01) + + # Freeze original weights + self.weight.requires_grad = False + + # Save shapes for reshaping + self.weight_shape = self.weight.shape + self.reset_parameters() + # No clue why they are using this + def perform_svd(self): + # shape + weight_reshaped = rearrange(self.weight, 'co cin h w -> co (cin h w)') + self.U, self.S, self.Vt = torch.linalg.svd(weight_reshaped, full_matrices=False) + self.done_svd = True + + def reset_parameters(self) -> None: + nn.Conv2d.reset_parameters(self) + if hasattr(self, 'trainable_A'): + nn.init.ones_(self.trainable_A) + if hasattr(self, 'trainable_B'): + nn.init.zeros_(self.trainable_B) + if hasattr(self, 'trainable_lora_X'): + nn.init.normal_(self.trainable_lora_X, std=0.01) + if hasattr(self, 'trainable_lora_Y'): + nn.init.normal_(self.trainable_lora_Y, std=0.01) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if not self.done_svd: + self.perform_svd() + + A_total = torch.cat([self.trainable_A,self.A_frozen.to(x.device)]) + B_total = torch.cat([self.trainable_B,self.B_frozen.to(x.device)]) + transformed_S = A_total * self.S + B_total + + # Compute truncated SVD part: U_r(A·Σ_r + B)V_r^T + weight_svd = self.U @ torch.diag(F.relu(transformed_S)) @ self.Vt + + # Add LoRA part: X·Y + weight_lora = self.trainable_lora_X @ self.trainable_lora_Y + + # Combine both parts + weight_updated = weight_svd + weight_lora + + # Reshape back to conv2d weight shape + weight_updated = rearrange( + weight_updated, + 'co (cin h w) -> co cin h w', + cin=self.weight_shape[1], + h=self.weight_shape[2], + w=self.weight_shape[3] + ) + + # Compute regularization loss + reg_loss = ( + torch.norm(1 - self.trainable_A) + # A should be close to 1 + torch.norm(self.trainable_B) + # B should be close to 0 + torch.norm(self.trainable_lora_X) * torch.norm(self.trainable_lora_Y) # LoRA regularization + ) + + return F.conv2d( + x, weight_updated, self.bias, + self.stride, self.padding, + self.dilation, self.groups + ), reg_loss \ No newline at end of file diff --git a/AllinonSAM/prompt_adapted_segment_anything/modeling/SALT_layers_please_work.py b/AllinonSAM/prompt_adapted_segment_anything/modeling/SALT_layers_please_work.py new file mode 100644 index 0000000000000000000000000000000000000000..8d4cb505d349ce86b89ee92d56231aeacf0b53e2 --- /dev/null +++ b/AllinonSAM/prompt_adapted_segment_anything/modeling/SALT_layers_please_work.py @@ -0,0 +1,283 @@ +import torch +from torch import nn +from torch.nn import functional as F +from einops import rearrange +from typing import Type, Tuple, Optional + +class SALTLinear(nn.Linear): + def __init__( + self, + in_features: int, + out_features: int, + rank: int, # truncation rank for SVD + r_lora: int = 8, # LoRA rank + bias: bool = True, + rsLora: bool = False, #For RsLoRA + alpha:int = 1, #for RsLora too + device: Optional[torch.device] = None, + dtype: Optional[torch.dtype] = None, + seed: int = 42 + ) -> None: + super().__init__(in_features, out_features, bias, device, dtype) + torch.manual_seed(seed) + + # Initialize parameters for SVD + self.weight.requires_grad = False + self.done_svd = False + self.U, self.S, self.Vt = self._initialize_svd() + + max_possible_rank = min(self.U.shape[1], self.S.shape[0], self.Vt.shape[0]) + # print("\nThe max possible rank is", max_possible_rank) + print(f"\nLayer size: {in_features}x{out_features}") + print(f"Max possible rank: {max_possible_rank}") + print(f"Using rank: {rank}, r_lora: {r_lora}") + + # Count parameters + scale_shift_params = rank * 2 + lora_params = (max_possible_rank - rank) * r_lora * 2 + total_params = scale_shift_params + lora_params + print(f"Scale/shift parameters: {scale_shift_params}") + print(f"LoRA parameters: {lora_params}") + print(f"Total trainable parameters: {total_params}") + + # Truncation rank for SVD + self.rank = rank + + # Initialize scaling and shifting parameters for top singular values + self.trainable_scale_A = nn.Parameter(torch.ones(rank)) + self.trainable_shift_B = nn.Parameter(torch.zeros(rank)) + + # Initialize LoRA matrices for remaining singular values + remaining_rank = max_possible_rank - rank + if rsLora: + print("Using RSLORA") + rs_lora_scaling = alpha / (r_lora ** 0.5) + self.trainable_X = nn.Parameter(torch.randn(remaining_rank, r_lora) * rs_lora_scaling) + self.trainable_Y = nn.Parameter(torch.randn(r_lora, remaining_rank) * rs_lora_scaling) + else: + self.trainable_X = nn.Parameter(torch.randn(remaining_rank, r_lora) * 0.01) + self.trainable_Y = nn.Parameter(torch.randn(r_lora, remaining_rank) * 0.01) + self._verify_parameters() + self.reset_parameters() + + def _verify_parameters(self): + """Print trainable parameter information""" + trainable_params = sum(p.numel() for p in self.parameters() if p.requires_grad) + total_params = sum(p.numel() for p in self.parameters()) + print(f"\nVerifying SALTLinear parameters:") + print(f"Trainable parameters: {trainable_params}") + print(f"Total parameters: {total_params}") + for name, param in self.named_parameters(): + print(f"{name}: {param.shape} (trainable: {param.requires_grad})") + + def _initialize_svd(self): + """Initializes SVD decomposition on the weight matrix.""" + return torch.linalg.svd(self.weight, full_matrices=False) + + def perform_svd(self) -> None: + """Updates truncated SVD decomposition on the weight matrix.""" + self.U, self.S, self.Vt = self._initialize_svd() + self.done_svd = True + + def get_modified_singular_values(self) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Computes modified singular values using both scaling/shifting and LoRA adaptation. + Returns: + Tuple containing: + - Modified singular values tensor + - Scale/shift modification term + - LoRA adaptation term + """ + # Create diagonal matrix of original singular values + S_diag = torch.diag(self.S) + + # Apply scaling and shifting to top rank singular values + top_s = self.S[:self.rank] + modified_top_s = self.trainable_scale_A * top_s + self.trainable_shift_B + + # Compute LoRA term for remaining singular values + loRA_term = self.trainable_X @ self.trainable_Y + + # Create the combined singular value matrix + new_s = S_diag.clone() + new_s[:self.rank, :self.rank] = torch.diag(modified_top_s) + new_s[self.rank:, self.rank:] += loRA_term + + scale_shift_term = torch.zeros_like(S_diag) + scale_shift_term[:self.rank, :self.rank] = torch.diag(modified_top_s) - torch.diag(top_s) + + return new_s, scale_shift_term, loRA_term + + def forward(self, input: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Forward pass with both scaling/shifting and LoRA modifications. + + Args: + input: Input tensor + + Returns: + Tuple containing: + - Output tensor after linear transformation + - Combined regularization loss + """ + if not self.done_svd: + self.perform_svd() + + new_s, scale_shift_term, LoRA_term = self.get_modified_singular_values() + s_new = F.relu(new_s.to(input.device)) + + # Reconstruct weight matrix using modified components + weight_updated = self.U @ s_new @ self.Vt + + # Compute regularization losses + scale_shift_reg = torch.norm(scale_shift_term) + lora_reg = torch.norm(LoRA_term) + reg_loss = scale_shift_reg + lora_reg + + return F.linear(input, weight_updated, self.bias), reg_loss + + +class SALTConv2d(nn.Conv2d): + def __init__( + self, + in_channels: int, + out_channels: int, + kernel_size: int, + rank: int, # truncation rank for SVD + r_lora: int = 8, # LoRA rank + rsLora: bool = False, #For RsLoRA + alpha:int = 1, #for RsLora too + seed: int = 42, + **kwargs + ): + super().__init__(in_channels, out_channels, kernel_size, **kwargs) + torch.manual_seed(seed) + + self.done_svd = False + self.weight.requires_grad = False + + # Reshape weight and perform initial truncated SVD + weight_reshaped = rearrange(self.weight, 'co cin h w -> co (cin h w)') + self.U, self.S, self.Vt = self._initialize_svd(weight_reshaped) + + max_possible_rank = min(self.U.shape[1], self.S.shape[0], self.Vt.shape[0]) + print("\nThe max possible rank is", max_possible_rank) + # Count parameters + scale_shift_params = rank * 2 + lora_params = (max_possible_rank - rank) * r_lora * 2 + total_params = scale_shift_params + lora_params + print(f"Scale/shift parameters: {scale_shift_params}") + print(f"LoRA parameters: {lora_params}") + print(f"Total trainable parameters: {total_params}") + + + self.rank = rank + + # Initialize scaling and shifting parameters for top singular values + self.trainable_scale_A = nn.Parameter(torch.ones(rank)) + self.trainable_shift_B = nn.Parameter(torch.zeros(rank)) + + # Initialize LoRA matrices for remaining singular values + remaining_rank = max_possible_rank - rank + if rsLora: + print("Using RSLORA") + rs_lora_scaling = alpha / (r_lora ** 0.5) + self.trainable_X = nn.Parameter(torch.randn(remaining_rank, r_lora) * rs_lora_scaling) + self.trainable_Y = nn.Parameter(torch.randn(r_lora, remaining_rank) * rs_lora_scaling) + else: + self.trainable_X = nn.Parameter(torch.randn(remaining_rank, r_lora) * 0.01) + self.trainable_Y = nn.Parameter(torch.randn(r_lora, remaining_rank) * 0.01) + self._verify_parameters() + self.reset_parameters() + + + def _verify_parameters(self): + """Print trainable parameter information""" + trainable_params = sum(p.numel() for p in self.parameters() if p.requires_grad) + total_params = sum(p.numel() for p in self.parameters()) + print(f"\nVerifying SALTConv2d parameters:") + print(f"Trainable parameters: {trainable_params}") + print(f"Total parameters: {total_params}") + for name, param in self.named_parameters(): + print(f"{name}: {param.shape} (trainable: {param.requires_grad})") + + + def _initialize_svd(self, weight_reshaped): + """Initializes SVD decomposition on the reshaped weight matrix.""" + return torch.linalg.svd(weight_reshaped, full_matrices=False) + + def perform_svd(self) -> None: + """Updates truncated SVD decomposition on the reshaped weight matrix.""" + weight_reshaped = rearrange(self.weight, 'co cin h w -> co (cin h w)') + self.U, self.S, self.Vt = self._initialize_svd(weight_reshaped) + self.done_svd = True + + def get_modified_singular_values(self) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Computes modified singular values using both scaling/shifting and LoRA adaptation. + Returns: + Tuple containing: + - Modified singular values tensor + - Scale/shift modification term + - LoRA adaptation term + """ + # Create diagonal matrix of original singular values + S_diag = torch.diag(self.S) + + # Apply scaling and shifting to top rank singular values + top_s = self.S[:self.rank] + modified_top_s = self.trainable_scale_A * top_s + self.trainable_shift_B + + # Compute LoRA term for remaining singular values + loRA_term = self.trainable_X @ self.trainable_Y + + # Create the combined singular value matrix + new_s = S_diag.clone() + new_s[:self.rank, :self.rank] = torch.diag(modified_top_s) + new_s[self.rank:, self.rank:] += loRA_term + + scale_shift_term = torch.zeros_like(S_diag) + scale_shift_term[:self.rank, :self.rank] = torch.diag(modified_top_s) - torch.diag(top_s) + + return new_s, scale_shift_term, loRA_term + + def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Forward pass with both scaling/shifting and LoRA modifications. + + Args: + x: Input tensor + + Returns: + Tuple containing: + - Output tensor after convolution + - Combined regularization loss + """ + if not self.done_svd: + self.perform_svd() + + new_s, scale_shift_term, LoRA_term = self.get_modified_singular_values() + s_new = F.relu(new_s.to(x.device)) + + # Reconstruct weight matrix using modified components + weight_updated = self.U @ s_new @ self.Vt + + # Reshape weight back to conv2d format + weight_updated = rearrange( + weight_updated, + 'co (cin h w) -> co cin h w', + cin=self.weight.size(1), + h=self.weight.size(2), + w=self.weight.size(3) + ) + + # Compute regularization losses + scale_shift_reg = torch.norm(scale_shift_term) + lora_reg = torch.norm(LoRA_term) + reg_loss = scale_shift_reg + lora_reg + + return F.conv2d( + x, weight_updated, self.bias, + self.stride, self.padding, + self.dilation, self.groups + ), reg_loss \ No newline at end of file diff --git a/AllinonSAM/prompt_adapted_segment_anything/modeling/__init__.py b/AllinonSAM/prompt_adapted_segment_anything/modeling/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e4a3ee1cbc7f56e1b2df9c76a54b88111bc1e80e --- /dev/null +++ b/AllinonSAM/prompt_adapted_segment_anything/modeling/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from .sam import Sam +from .image_encoder import ImageEncoderViT +from .mask_decoder import MaskDecoder +from .prompt_encoder import PromptEncoder +from .transformer import TwoWayTransformer +from .svd_layers import SVDLinear, SVDConv2d +from .lora_layers import LoRAConv2D, LoRALinear \ No newline at end of file diff --git a/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/SALT_layers.cpython-38.pyc b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/SALT_layers.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..991255372e1ca303c49688424709887f71574e9a Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/SALT_layers.cpython-38.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/SALT_layers_2.cpython-38.pyc b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/SALT_layers_2.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc8ec3389ab1e25df37c29ecb41c8f36d717d449 Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/SALT_layers_2.cpython-38.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/SALT_layers_3.cpython-38.pyc b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/SALT_layers_3.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bdd5147b2414ef065ee4a3d889f0ece96642c787 Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/SALT_layers_3.cpython-38.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/SALT_layers_please_work.cpython-38.pyc b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/SALT_layers_please_work.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25cdbe16cbc010ad92e5334c6debce52751e7b16 Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/SALT_layers_please_work.cpython-38.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/__init__.cpython-312.pyc b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae89682b9d33a2a8c1def364a3649ef81dd9a0ae Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/__init__.cpython-312.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/__init__.cpython-38.pyc b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2775a97885796bf8b418254186b25f32fa0af4c1 Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/__init__.cpython-38.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/__init__.cpython-39.pyc b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e84f0fe0591f7d3e01f5a394605f896632cc5b34 Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/__init__.cpython-39.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/common.cpython-312.pyc b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/common.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9fa9ed76d480d0ca8b63158eb6b4d49df172de54 Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/common.cpython-312.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/common.cpython-38.pyc b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/common.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52d6b62d95659ac58847d1528b9f21af7229ca15 Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/common.cpython-38.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/common.cpython-39.pyc b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/common.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc35db1176de017049cee344aa54a611de400543 Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/common.cpython-39.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/image_encoder.cpython-312.pyc b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/image_encoder.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c1a6b79a189d2c178d2a7aea67b0dc1eb8fe97b Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/image_encoder.cpython-312.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/image_encoder.cpython-38.pyc b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/image_encoder.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73900c1b451b690ebf1944ad199326c3eebd32ff Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/image_encoder.cpython-38.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/image_encoder.cpython-39.pyc b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/image_encoder.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd5c3aacbd92f256c044c43c281df866abe6a640 Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/image_encoder.cpython-39.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/lora_layers.cpython-312.pyc b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/lora_layers.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..75a01eef6b45b65c426ebd284b86c9a71414cc07 Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/lora_layers.cpython-312.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/lora_layers.cpython-38.pyc b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/lora_layers.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..86a301ac8002ff23348d263a367d59a6f1be5de3 Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/lora_layers.cpython-38.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/mask_decoder.cpython-312.pyc b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/mask_decoder.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..beb2db8afd100e5c5488423ca8a4f0154b7f9ba2 Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/mask_decoder.cpython-312.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/mask_decoder.cpython-38.pyc b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/mask_decoder.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a8ae875e5565277198ae837f21bbd99ffd3407d Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/mask_decoder.cpython-38.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/mask_decoder.cpython-39.pyc b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/mask_decoder.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4c765c9c8605dfbe634cf928f0bd7bc0bbd86372 Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/mask_decoder.cpython-39.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/prompt_encoder.cpython-312.pyc b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/prompt_encoder.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa09b0ab0939cfd600ab9e01fe6c3334735ca941 Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/prompt_encoder.cpython-312.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/prompt_encoder.cpython-38.pyc b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/prompt_encoder.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bfd12ddf0894b747222c8deb6c0be586eb249d1a Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/prompt_encoder.cpython-38.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/prompt_encoder.cpython-39.pyc b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/prompt_encoder.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e0d396011f9de93d9c55cddda1dbfd09db153d15 Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/prompt_encoder.cpython-39.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/sam.cpython-312.pyc b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/sam.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2faf59031d5e43a8f8d6286b9ce95b8da6087c8 Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/sam.cpython-312.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/sam.cpython-38.pyc b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/sam.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b0b4effb168166951e172556a224dad0caf4d61 Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/sam.cpython-38.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/sam.cpython-39.pyc b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/sam.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..380eeb2ad52bacbce0ffb1f5dd4efe3b20e33882 Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/sam.cpython-39.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/svd_layers.cpython-312.pyc b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/svd_layers.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a97a4059f70e4ea36ca0022779fd2172cf219b8f Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/svd_layers.cpython-312.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/svd_layers.cpython-38.pyc b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/svd_layers.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..654b012bf9cf5e6e4d71b832e44e8c0025688391 Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/svd_layers.cpython-38.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/transformer.cpython-312.pyc b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/transformer.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cef0cf075f6a8be506e04cd76a16a17d17f6295c Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/transformer.cpython-312.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/transformer.cpython-38.pyc b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/transformer.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c30afd969f7f227d7045f3a004c91b19d69c9d8b Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/transformer.cpython-38.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/transformer.cpython-39.pyc b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/transformer.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..69290b77d665f17eb492c1cb0fb6c9096ef45369 Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/modeling/__pycache__/transformer.cpython-39.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/modeling/common.py b/AllinonSAM/prompt_adapted_segment_anything/modeling/common.py new file mode 100644 index 0000000000000000000000000000000000000000..cf41a5bba12126e37b6128ecf9bb36b3b0ac461d --- /dev/null +++ b/AllinonSAM/prompt_adapted_segment_anything/modeling/common.py @@ -0,0 +1,79 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +import torch.nn as nn + +from typing import Type +from .svd_layers import SVDLinear +# from .SALT_layers_please_work import SALTLinear +from .SALT_layers_3 import SALTLinear , SALTConv2d +from .lora_layers import LoRAConv2D, LoRALinear + +class MLPBlock(nn.Module): + def __init__( + self, + embedding_dim: int, + mlp_dim: int, + act: Type[nn.Module] = nn.GELU, + mlp_transform=False, + use_lora = False + ) -> None: + super().__init__() + if use_lora: + self.lin1 = LoRALinear(embedding_dim, mlp_dim) + self.lin2 = LoRALinear(mlp_dim, embedding_dim) + else: + # self.lin1 = SVDLinear(embedding_dim, mlp_dim, mlp_transform=mlp_transform) + # self.lin2 = SVDLinear(mlp_dim, embedding_dim, mlp_transform=mlp_transform) + rank_value = 500 + # print("\nEmbedding dim in MLP Block is" ,embedding_dim) + # print("\n no need for MLP transform" , mlp_transform) + self.lin1 = SALTLinear(embedding_dim, mlp_dim, rank=rank_value , r_lora=256 , rsLora=False,alpha=1) + self.lin2 = SALTLinear(mlp_dim, embedding_dim, rank=rank_value , r_lora=256 , rsLora=False,alpha=1) + self.act = act() + + def forward(self, x: torch.Tensor, output_loss=True) -> torch.Tensor: + out, reg_loss1 = self.lin1(x) + out, reg_loss2 = self.lin2(self.act(out)) + if output_loss: + return out, (reg_loss1+reg_loss2) + else: + return out + +class MLPBlock2(nn.Module): + def __init__( + self, + embedding_dim: int, + mlp_dim: int, + act: Type[nn.Module] = nn.GELU, + ) -> None: + super().__init__() + self.lin1 = nn.Linear(embedding_dim, mlp_dim) + self.lin2 = nn.Linear(mlp_dim, embedding_dim) + self.act = act() + + def forward(self, x: torch.Tensor) -> torch.Tensor: + out = self.lin1(x) + out = self.lin2(self.act(out)) + return out + + +# From https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/batch_norm.py # noqa +# Itself from https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119 # noqa +class LayerNorm2d(nn.Module): + def __init__(self, num_channels: int, eps: float = 1e-6) -> None: + super().__init__() + self.weight = nn.Parameter(torch.ones(num_channels)) + self.bias = nn.Parameter(torch.zeros(num_channels)) + self.eps = eps + + def forward(self, x: torch.Tensor) -> torch.Tensor: + u = x.mean(1, keepdim=True) + s = (x - u).pow(2).mean(1, keepdim=True) + x = (x - u) / torch.sqrt(s + self.eps) + x = self.weight[:, None, None] * x + self.bias[:, None, None] + return x diff --git a/AllinonSAM/prompt_adapted_segment_anything/modeling/image_encoder.py b/AllinonSAM/prompt_adapted_segment_anything/modeling/image_encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..2c2abfe3d1508863ee6b5e6bf592d3c1fc22ecbd --- /dev/null +++ b/AllinonSAM/prompt_adapted_segment_anything/modeling/image_encoder.py @@ -0,0 +1,522 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +import math +from functools import partial, reduce +from operator import mul +import torch +import torch.nn as nn +import torch.nn.functional as F + +from typing import Optional, Tuple, Type + +from .common import LayerNorm2d, MLPBlock +from .svd_layers import SVDLinear, SVDConv2d +# from .SALT_layers import SALTLinear , SALTConv2d # SALT-LoRA +# from .SALT_layers_please_work import SALTLinear , SALTConv2d #SALT-2 +from .SALT_layers_3 import SALTLinear , SALTConv2d # SALT-1 +from .lora_layers import LoRAConv2D, LoRALinear + + +# This class and its supporting functions below lightly adapted from the ViTDet backbone available at: https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py # noqa +class ImageEncoderViT(nn.Module): + def __init__( + self, + img_size: int = 1024, + patch_size: int = 16, + in_chans: int = 3, + embed_dim: int = 768, + depth: int = 12, + num_heads: int = 12, + mlp_ratio: float = 4.0, + out_chans: int = 256, + qkv_bias: bool = True, + norm_layer: Type[nn.Module] = nn.LayerNorm, + act_layer: Type[nn.Module] = nn.GELU, + use_abs_pos: bool = True, + use_rel_pos: bool = False, + rel_pos_zero_init: bool = True, + window_size: int = 0, + global_attn_indexes: Tuple[int, ...] = (), + prompt_config = { + 'USE_PROMPT': False, + 'LOCATION': 'prepend', + 'DROPOUT': 0.1, + 'NUM_TOKENS': 5 + }, + mlp_transform = False, + use_lora=False + ) -> None: + """ + Args: + img_size (int): Input image size. + patch_size (int): Patch size. + in_chans (int): Number of input image channels. + embed_dim (int): Patch embedding dimension. + depth (int): Depth of ViT. + num_heads (int): Number of attention heads in each ViT block. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool): If True, add a learnable bias to query, key, value. + norm_layer (nn.Module): Normalization layer. + act_layer (nn.Module): Activation layer. + use_abs_pos (bool): If True, use absolute positional embeddings. + use_rel_pos (bool): If True, add relative positional embeddings to the attention map. + rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. + window_size (int): Window size for window attention blocks. + global_attn_indexes (list): Indexes for blocks using global attention. + """ + super().__init__() + self.img_size = img_size + self.embed_dim = embed_dim + self.patch_size = (patch_size,patch_size) + self.prompt_config = prompt_config + + self.patch_embed = PatchEmbed( + kernel_size=(patch_size, patch_size), + stride=(patch_size, patch_size), + in_chans=in_chans, + embed_dim=embed_dim, + ) + + self.pos_embed: Optional[nn.Parameter] = None + if use_abs_pos: + # Initialize absolute positional embedding with pretrain image size. + #if image prompts are used, flatten the embeds along the image size dimensions + if self.prompt_config['USE_IMAGE_PROMPT']: + + self.pos_embed = nn.Parameter( + torch.zeros(1, (img_size // patch_size)* (img_size // patch_size), embed_dim) + ) + else: + self.pos_embed = nn.Parameter( + torch.zeros(1, (img_size // patch_size), (img_size // patch_size), embed_dim) + ) + + + self.blocks = nn.ModuleList() + for i in range(depth): + block = Block( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + norm_layer=norm_layer, + act_layer=act_layer, + use_rel_pos=use_rel_pos, + rel_pos_zero_init=rel_pos_zero_init, + window_size=window_size if i not in global_attn_indexes else 0, + input_size=(img_size // patch_size, img_size // patch_size), + mlp_transform=mlp_transform, + use_lora = use_lora + ) + self.blocks.append(block) + + self.neck = Neck(embed_dim, out_chans, mlp_transform = mlp_transform, use_lora=use_lora) + # self.neck = nn.Sequential( + # SVDConv2d( + # embed_dim, + # out_chans, + # kernel_size=1, + # bias=False, + # ), + # LayerNorm2d(out_chans), + # SVDConv2d( + # out_chans, + # out_chans, + # kernel_size=3, + # padding=1, + # bias=False, + # ), + # LayerNorm2d(out_chans), + # ) + if self.prompt_config['USE_IMAGE_PROMPT']: + val = math.sqrt(6. / float(3 * reduce(mul, self.patch_size, 1) + self.embed_dim)) # noqa + self.prompt_dropout = nn.Dropout(self.prompt_config['DROPOUT']) + self.prompt_embeddings = nn.Parameter(torch.zeros(1, self.prompt_config['NUM_TOKENS'], self.embed_dim)) + nn.init.uniform_(self.prompt_embeddings.data, -val,val) + + self.deep_prompt_embeddings = nn.Parameter(torch.zeros( + len(self.blocks) - 1, + self.prompt_config['NUM_TOKENS'], + self.embed_dim + )) + nn.init.uniform_(self.deep_prompt_embeddings.data, -val, val) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.patch_embed(x) + reg_loss = 0 + if self.pos_embed is not None: + if self.prompt_config['USE_IMAGE_PROMPT']: + x = x + self.pos_embed + else: + p1,p2,p3,p4 = self.pos_embed.shape + x = x + self.pos_embed.view(p1,p2*p3,p4) + + B = x.shape[0] + if self.prompt_config['USE_IMAGE_PROMPT']: + x = self.incorporate_prompt(x) + B = x.shape[0] + # print("x shape: ",x.shape) + num_layers = len(self.blocks) + for i in range(num_layers): + if i==0: + x, loss = self.blocks[i](x) + reg_loss += loss + else: + x = torch.cat(( + x[:,:1,:], + self.prompt_dropout(self.deep_prompt_embeddings[i-1].expand(B,-1,-1)), + x[:,(1+self.prompt_config['NUM_TOKENS']):,:] + ), dim=1) + x, loss = self.blocks[i](x) + reg_loss += loss + + x = torch.cat(( + x[:,:1,:], + x[:,(1+self.prompt_config['NUM_TOKENS']):,:] + ), dim=1) + else: + for blk in self.blocks: + x, loss = blk(x) + reg_loss += loss + + + resize_dim = self.img_size // self.patch_size[0] + x = x.view(B, resize_dim, resize_dim, -1) + x = self.neck(x.permute(0, 3, 1, 2)) + + return x, reg_loss + + def incorporate_prompt(self, x): + B = x.shape[0] + if self.prompt_config['LOCATION'] == 'prepend': + x = torch.cat(( + x[:,:1,:], + self.prompt_dropout(self.prompt_embeddings.expand(B,-1,-1)), + x[:,1:,:] + ), dim=1) + else: + raise ValueError("Other prompt location not supported") + return x + +class Neck(nn.Module): + """Neck which is a MLP at the end""" + def __init__(self, embed_dim, out_chans, mlp_transform=False, use_lora=False): + super().__init__() + if use_lora: + self.conv1 = LoRAConv2D(embed_dim, out_chans, kernel_size=1, bias=False) + self.conv2 = LoRAConv2D(out_chans, out_chans, kernel_size=3, padding=1, bias=False) + else: + rank_value = 150 + self.conv1 = SALTConv2d(embed_dim, out_chans, kernel_size=1, bias=False , rank=rank_value , r_lora=256 , rsLora=False , alpha=1) + self.conv2 = SALTConv2d(out_chans, out_chans, kernel_size=3, padding=1, bias=False , rank=rank_value , r_lora=256 , rsLora=False, alpha=1) + # self.conv1 = SVDConv2d(embed_dim, out_chans, kernel_size=1, bias=False, mlp_transform=mlp_transform) + # self.conv2 = SVDConv2d(out_chans, out_chans, kernel_size=3, padding=1, bias=False, mlp_transform=mlp_transform) + + + self.ln1 = LayerNorm2d(out_chans) + self.ln2 = LayerNorm2d(out_chans) + + def forward(self, x): + out, reg_loss1 = self.conv1(x) + out = self.ln1(out) + out, reg_loss2 = self.conv2(out) + out = self.ln2(out) + return out + + +class Block(nn.Module): + """Transformer blocks with support of window attention and residual propagation blocks""" + + def __init__( + self, + dim: int, + num_heads: int, + mlp_ratio: float = 4.0, + qkv_bias: bool = True, + norm_layer: Type[nn.Module] = nn.LayerNorm, + act_layer: Type[nn.Module] = nn.GELU, + use_rel_pos: bool = False, + rel_pos_zero_init: bool = True, + window_size: int = 0, + input_size: Optional[Tuple[int, int]] = None, + mlp_transform = False, + use_lora = False + ) -> None: + """ + Args: + dim (int): Number of input channels. + num_heads (int): Number of attention heads in each ViT block. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool): If True, add a learnable bias to query, key, value. + norm_layer (nn.Module): Normalization layer. + act_layer (nn.Module): Activation layer. + use_rel_pos (bool): If True, add relative positional embeddings to the attention map. + rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. + window_size (int): Window size for window attention blocks. If it equals 0, then + use global attention. + input_size (int or None): Input resolution for calculating the relative positional + parameter size. + """ + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + use_rel_pos=use_rel_pos, + rel_pos_zero_init=rel_pos_zero_init, + input_size=input_size if window_size == 0 else (window_size, window_size), + mlp_transform = mlp_transform, + use_lora = use_lora + ) + + self.norm2 = norm_layer(dim) + self.mlp = MLPBlock(embedding_dim=dim, mlp_dim=int(dim * mlp_ratio), act=act_layer, mlp_transform=mlp_transform, use_lora=use_lora) + + self.window_size = window_size + + def forward(self, x: torch.Tensor) -> torch.Tensor: + shortcut = x + x = self.norm1(x) + # Window partition + if self.window_size > 0: + H, W = x.shape[1], x.shape[2] + x, pad_hw = window_partition(x, self.window_size) + + x, reg_loss1 = self.attn(x) + # Reverse window partition + if self.window_size > 0: + x = window_unpartition(x, self.window_size, pad_hw, (H, W)) + + x = shortcut + x + mlp_out, reg_loss2 = self.mlp(self.norm2(x)) + x = x + mlp_out + + return x, (reg_loss1 + reg_loss2) + + +class Attention(nn.Module): + """Multi-head Attention block with relative position embeddings.""" + + def __init__( + self, + dim: int, + num_heads: int = 8, + qkv_bias: bool = True, + use_rel_pos: bool = False, + rel_pos_zero_init: bool = True, + input_size: Optional[Tuple[int, int]] = None, + mlp_transform = False, + use_lora=False + ) -> None: + """ + Args: + dim (int): Number of input channels. + num_heads (int): Number of attention heads. + qkv_bias (bool: If True, add a learnable bias to query, key, value. + rel_pos (bool): If True, add relative positional embeddings to the attention map. + rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. + input_size (int or None): Input resolution for calculating the relative positional + parameter size. + """ + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim**-0.5 + if use_lora: + self.qkv = LoRALinear(dim, dim * 3, bias=qkv_bias) + self.proj = LoRALinear(dim, dim) + else: + rank_value = 500 + self.qkv = SALTLinear(dim, dim * 3, bias=qkv_bias , r_lora=256 , rank=rank_value , rsLora=False,alpha=1) + self.proj = SALTLinear(dim, dim , r_lora=256 , rank=rank_value , rsLora=False,alpha=1) + # self.qkv = SVDLinear(dim, dim * 3, bias=qkv_bias, mlp_transform=mlp_transform) + # self.proj = SVDLinear(dim, dim, mlp_transform=mlp_transform) + + self.use_rel_pos = use_rel_pos + if self.use_rel_pos: + assert ( + input_size is not None + ), "Input size must be provided if using relative positional encoding." + # initialize relative positional embeddings + self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim)) + self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim)) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + B, HW, _ = x.shape + # qkv with shape (3, B, nHead, H * W, C) + qkv, reg_loss1 = self.qkv(x) + qkv = qkv.reshape(B, HW, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + # q, k, v with shape (B * nHead, H * W, C) + q, k, v = qkv.reshape(3, B * self.num_heads, HW, -1).unbind(0) + + attn = (q * self.scale) @ k.transpose(-2, -1) + + # if self.use_rel_pos: + # attn = add_decomposed_rel_pos(attn, q, self.rel_pos_h, self.rel_pos_w, (H, W), (H, W)) + + attn = attn.softmax(dim=-1) + x = (attn @ v).view(B, self.num_heads, HW, -1).permute(0, 2, 1, 3).reshape(B, HW, -1) + x, reg_loss2 = self.proj(x) + + return x, (reg_loss2 + reg_loss1) + + +def window_partition(x: torch.Tensor, window_size: int) -> Tuple[torch.Tensor, Tuple[int, int]]: + """ + Partition into non-overlapping windows with padding if needed. + Args: + x (tensor): input tokens with [B, H, W, C]. + window_size (int): window size. + + Returns: + windows: windows after partition with [B * num_windows, window_size, window_size, C]. + (Hp, Wp): padded height and width before partition + """ + B, H, W, C = x.shape + + pad_h = (window_size - H % window_size) % window_size + pad_w = (window_size - W % window_size) % window_size + if pad_h > 0 or pad_w > 0: + x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h)) + Hp, Wp = H + pad_h, W + pad_w + + x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) + return windows, (Hp, Wp) + + +def window_unpartition( + windows: torch.Tensor, window_size: int, pad_hw: Tuple[int, int], hw: Tuple[int, int] +) -> torch.Tensor: + """ + Window unpartition into original sequences and removing padding. + Args: + x (tensor): input tokens with [B * num_windows, window_size, window_size, C]. + window_size (int): window size. + pad_hw (Tuple): padded height and width (Hp, Wp). + hw (Tuple): original height and width (H, W) before padding. + + Returns: + x: unpartitioned sequences with [B, H, W, C]. + """ + Hp, Wp = pad_hw + H, W = hw + B = windows.shape[0] // (Hp * Wp // window_size // window_size) + x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1) + + if Hp > H or Wp > W: + x = x[:, :H, :W, :].contiguous() + return x + + +def get_rel_pos(q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor: + """ + Get relative positional embeddings according to the relative positions of + query and key sizes. + Args: + q_size (int): size of query q. + k_size (int): size of key k. + rel_pos (Tensor): relative position embeddings (L, C). + + Returns: + Extracted positional embeddings according to relative positions. + """ + max_rel_dist = int(2 * max(q_size, k_size) - 1) + # Interpolate rel pos if needed. + if rel_pos.shape[0] != max_rel_dist: + # Interpolate rel pos. + rel_pos_resized = F.interpolate( + rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), + size=max_rel_dist, + mode="linear", + ) + rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0) + else: + rel_pos_resized = rel_pos + + # Scale the coords with short length if shapes for q and k are different. + q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0) + k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0) + relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0) + + return rel_pos_resized[relative_coords.long()] + + +def add_decomposed_rel_pos( + attn: torch.Tensor, + q: torch.Tensor, + rel_pos_h: torch.Tensor, + rel_pos_w: torch.Tensor, + q_size: Tuple[int, int], + k_size: Tuple[int, int], +) -> torch.Tensor: + """ + Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`. + https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py # noqa B950 + Args: + attn (Tensor): attention map. + q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C). + rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis. + rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis. + q_size (Tuple): spatial sequence size of query q with (q_h, q_w). + k_size (Tuple): spatial sequence size of key k with (k_h, k_w). + + Returns: + attn (Tensor): attention map with added relative positional embeddings. + """ + q_h, q_w = q_size + k_h, k_w = k_size + Rh = get_rel_pos(q_h, k_h, rel_pos_h) + Rw = get_rel_pos(q_w, k_w, rel_pos_w) + + B, _, dim = q.shape + r_q = q.reshape(B, q_h, q_w, dim) + rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh) + rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw) + + attn = ( + attn.view(B, q_h, q_w, k_h, k_w) + rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :] + ).view(B, q_h * q_w, k_h * k_w) + + return attn + + +class PatchEmbed(nn.Module): + """ + Image to Patch Embedding. + """ + + def __init__( + self, + kernel_size: Tuple[int, int] = (16, 16), + stride: Tuple[int, int] = (16, 16), + padding: Tuple[int, int] = (0, 0), + in_chans: int = 3, + embed_dim: int = 768, + ) -> None: + """ + Args: + kernel_size (Tuple): kernel size of the projection layer. + stride (Tuple): stride of the projection layer. + padding (Tuple): padding size of the projection layer. + in_chans (int): Number of input image channels. + embed_dim (int): embed_dim (int): Patch embedding dimension. + """ + super().__init__() + + self.proj = nn.Conv2d( + in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.proj(x) + # B C H W -> B H W C + x = x.permute(0, 2, 3, 1) + B,H,W,C = x.shape + x = x.view((B,H*W,C)) + return x diff --git a/AllinonSAM/prompt_adapted_segment_anything/modeling/lora_layers.py b/AllinonSAM/prompt_adapted_segment_anything/modeling/lora_layers.py new file mode 100644 index 0000000000000000000000000000000000000000..867fafaf14205da6e7a5d1c676d726c6ac5254b4 --- /dev/null +++ b/AllinonSAM/prompt_adapted_segment_anything/modeling/lora_layers.py @@ -0,0 +1,56 @@ +import torch +from torch import nn +from torch.nn import functional as F +from typing import Type + +class LoRALinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, bias: bool = True, device=None, dtype=None, r=4, scale=1) -> None: + super().__init__(in_features, out_features, bias, device, dtype) + self.r = r + self.trainable_lora_down = nn.Linear(in_features, r, bias=False) + self.dropout = nn.Dropout(0.1) + self.trainable_lora_up = nn.Linear(r, out_features, bias=False) + self.scale = scale + self.selector = nn.Identity() + + nn.init.normal_(self.trainable_lora_down.weight, std=1/r) + nn.init.zeros_(self.trainable_lora_up.weight) + + def forward(self, input): + out = F.linear(input, self.weight, self.bias) + self.scale*self.dropout(self.trainable_lora_up(self.selector(self.trainable_lora_down(input)))) + return out,0 + +class LoRAConv2D(nn.Conv2d): + def __init__(self, in_channels: int, out_channels: int, kernel_size, stride = 1, padding = 0, dilation = 1, groups = 1, bias = True, padding_mode: str = 'zeros', device=None, dtype=None, r=4, scale=1) -> None: + super().__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, padding_mode, device, dtype) + assert type(kernel_size) is int + self.r = r + self.scale = scale + + self.trainable_lora_down = nn.Conv2d( + in_channels = in_channels, + out_channels = r, + kernel_size = kernel_size, + bias=False + ) + + self.dropout = nn.Dropout(0.1) + + self.trainable_lora_up = nn.Conv2d( + in_channels=r, + out_channels=out_channels, + kernel_size=1, + bias=False + ) + self.selector = nn.Identity() + self.scale = scale + + nn.init.normal_(self.trainable_lora_down.weight, std=1/r) + nn.init.zeros_(self.trainable_lora_up.weight) + + def forward(self, input): + out = F.conv2d(input, self.weight, self.bias, self.stride) + out = out + self.scale*self.dropout(self.trainable_lora_up(self.selector(self.trainable_lora_down(input)))) + return out,0 + + diff --git a/AllinonSAM/prompt_adapted_segment_anything/modeling/mask_decoder.py b/AllinonSAM/prompt_adapted_segment_anything/modeling/mask_decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..732d55add3a42cc15ead3cae653da7d46bf64053 --- /dev/null +++ b/AllinonSAM/prompt_adapted_segment_anything/modeling/mask_decoder.py @@ -0,0 +1,206 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +from torch import nn +from torch.nn import functional as F +from torch.autograd import Function +from typing import List, Tuple, Type + +from .common import LayerNorm2d + +class ReverseLayerF(Function): + + @staticmethod + def forward(ctx, x, alpha): + ctx.alpha = alpha + + return x.view_as(x) + + @staticmethod + def backward(ctx, grad_output): + output = grad_output.neg() * ctx.alpha + + return output, None + +class MaskDecoder(nn.Module): + def __init__( + self, + *, + transformer_dim: int, + transformer: nn.Module, + num_multimask_outputs: int = 3, + activation: Type[nn.Module] = nn.GELU, + iou_head_depth: int = 3, + iou_head_hidden_dim: int = 256, + num_actions: int = 5, + num_classes: int = 14 + ) -> None: + """ + Predicts masks given an image and prompt embeddings, using a + tranformer architecture. + + Arguments: + transformer_dim (int): the channel dimension of the transformer + transformer (nn.Module): the transformer used to predict masks + num_multimask_outputs (int): the number of masks to predict + when disambiguating masks + activation (nn.Module): the type of activation to use when + upscaling masks + iou_head_depth (int): the depth of the MLP used to predict + mask quality + iou_head_hidden_dim (int): the hidden dimension of the MLP + used to predict mask quality + """ + super().__init__() + + self.transformer_dim = transformer_dim + self.transformer = transformer + + self.num_multimask_outputs = num_multimask_outputs + + self.iou_token = nn.Embedding(1, transformer_dim) + self.num_mask_tokens = num_multimask_outputs + 1 + self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim) + + self.output_upscaling = nn.Sequential( + nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2), + LayerNorm2d(transformer_dim // 4), + activation(), + nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2), + activation(), + ) + self.output_hypernetworks_mlps = nn.ModuleList( + [ + MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3) + for i in range(self.num_mask_tokens) + ] + ) + + self.iou_prediction_head = MLP( + transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth + ) + + def forward( + self, + image_embeddings: torch.Tensor, + image_pe: torch.Tensor, + sparse_prompt_embeddings: torch.Tensor, + dense_prompt_embeddings: torch.Tensor, + multimask_output: bool, + use_gsam: bool = True, + output_masks: bool = True + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Predict masks given image and prompt embeddings. + + Arguments: + image_embeddings (torch.Tensor): the embeddings from the image encoder + image_pe (torch.Tensor): positional encoding with the shape of image_embeddings + sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes + dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs + multimask_output (bool): Whether to return multiple masks or a single + mask. + + Returns: + torch.Tensor: batched predicted masks + torch.Tensor: batched predictions of mask quality + """ + + iou_pred, masks = self.predict_masks( + image_embeddings=image_embeddings, + image_pe=image_pe, + sparse_prompt_embeddings=sparse_prompt_embeddings, + dense_prompt_embeddings=dense_prompt_embeddings, + use_gsam=use_gsam, + output_masks=output_masks + ) + + # Select the correct mask or masks for outptu + if multimask_output: + mask_slice = slice(1, None) + else: + mask_slice = slice(0, 1) + masks = masks[:, mask_slice, :, :] + iou_pred = iou_pred[:, mask_slice] + + # Prepare output + return masks, iou_pred + + def predict_masks( + self, + image_embeddings: torch.Tensor, + image_pe: torch.Tensor, + sparse_prompt_embeddings: torch.Tensor, + dense_prompt_embeddings: torch.Tensor, + use_gsam: bool = True, + output_masks: bool = True + ) -> Tuple[torch.Tensor, torch.Tensor]: + """Predicts masks. See 'forward' for more details.""" + # Concatenate output tokens + output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0) + output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.size(0), -1, -1) + + tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1) + + # Expand per-image data in batch direction to be per-mask + if use_gsam: + src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0) #comment if not using Grounding SAM + else: + src = image_embeddings + + # src = src + dense_prompt_embeddings + pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0) + # print("image_pe.shape: ", image_pe.shape) + b, c, h, w = src.shape + + # Run the transformer + hs, src = self.transformer(src, pos_src, tokens) + + iou_token_out = hs[:, 0, :] + mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :] + # Upscale mask embeddings and predict masks using the mask tokens + src = src.transpose(1, 2).view(b, c, h, w) + upscaled_embedding = self.output_upscaling(src) + hyper_in_list: List[torch.Tensor] = [] + for i in range(self.num_mask_tokens): + hyper_in_list.append(self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :])) + hyper_in = torch.stack(hyper_in_list, dim=1) + # print("hyper_in_shape: ",hyper_in.shape) + b, c, h, w = upscaled_embedding.shape + masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w) + + # Generate mask quality predictions + iou_pred = self.iou_prediction_head(iou_token_out) + + return iou_pred, masks + + +# Lightly adapted from +# https://github.com/facebookresearch/MaskFormer/blob/main/mask_former/modeling/transformer/transformer_predictor.py # noqa +class MLP(nn.Module): + def __init__( + self, + input_dim: int, + hidden_dim: int, + output_dim: int, + num_layers: int, + sigmoid_output: bool = False, + ) -> None: + super().__init__() + self.num_layers = num_layers + h = [hidden_dim] * (num_layers - 1) + self.layers = nn.ModuleList( + nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]) + ) + self.sigmoid_output = sigmoid_output + + def forward(self, x): + for i, layer in enumerate(self.layers): + x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) + if self.sigmoid_output: + x = F.sigmoid(x) + return x diff --git a/AllinonSAM/prompt_adapted_segment_anything/modeling/prompt_encoder.py b/AllinonSAM/prompt_adapted_segment_anything/modeling/prompt_encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..df5ea37328c283b08c4a1b74eb582fee8428abcd --- /dev/null +++ b/AllinonSAM/prompt_adapted_segment_anything/modeling/prompt_encoder.py @@ -0,0 +1,218 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import numpy as np +import torch +from torch import nn + +from typing import Any, Optional, Tuple, Type +from .svd_layers import SVDLinear, SVDConv2d + +from .common import LayerNorm2d + + +class PromptEncoder(nn.Module): + def __init__( + self, + embed_dim: int, + image_embedding_size: Tuple[int, int], + input_image_size: Tuple[int, int], + mask_in_chans: int, + activation: Type[nn.Module] = nn.GELU, + ) -> None: + """ + Encodes prompts for input to SAM's mask decoder. + + Arguments: + embed_dim (int): The prompts' embedding dimension + image_embedding_size (tuple(int, int)): The spatial size of the + image embedding, as (H, W). + input_image_size (int): The padded size of the image as input + to the image encoder, as (H, W). + mask_in_chans (int): The number of hidden channels used for + encoding input masks. + activation (nn.Module): The activation to use when encoding + input masks. + """ + super().__init__() + self.embed_dim = embed_dim + self.input_image_size = input_image_size + self.image_embedding_size = image_embedding_size + self.pe_layer = PositionEmbeddingRandom(embed_dim // 2) + + self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners + point_embeddings = [nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings)] + self.point_embeddings = nn.ModuleList(point_embeddings) + self.not_a_point_embed = nn.Embedding(1, embed_dim) + + self.mask_input_size = (4 * image_embedding_size[0], 4 * image_embedding_size[1]) + self.mask_downscaling = nn.Sequential( + nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2), + LayerNorm2d(mask_in_chans // 4), + activation(), + nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2), + LayerNorm2d(mask_in_chans), + activation(), + nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1), + ) + self.no_mask_embed = nn.Embedding(1, embed_dim) + + def get_dense_pe(self) -> torch.Tensor: + """ + Returns the positional encoding used to encode point prompts, + applied to a dense set of points the shape of the image encoding. + + Returns: + torch.Tensor: Positional encoding with shape + 1x(embed_dim)x(embedding_h)x(embedding_w) + """ + return self.pe_layer(self.image_embedding_size).unsqueeze(0) + + def _embed_points( + self, + points: torch.Tensor, + labels: torch.Tensor, + pad: bool, + ) -> torch.Tensor: + """Embeds point prompts.""" + points = points + 0.5 # Shift to center of pixel + if pad: + padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device) + padding_label = -torch.ones((labels.shape[0], 1), device=labels.device) + # print("points in embed: ", points.shape) + # print("labels in embed: ", labels.shape) + # print("padding point shape: ", padding_point.shape) + points = torch.cat([points, padding_point], dim=1) + labels = torch.cat([labels, padding_label], dim=1) + point_embedding = self.pe_layer.forward_with_coords(points, self.input_image_size) + point_embedding[labels == -1] = 0.0 + point_embedding[labels == -1] += self.not_a_point_embed.weight + point_embedding[labels == 0] += self.point_embeddings[0].weight + point_embedding[labels == 1] += self.point_embeddings[1].weight + return point_embedding + + def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor: + """Embeds box prompts.""" + boxes = boxes + 0.5 # Shift to center of pixel + coords = boxes.reshape(-1, 2, 2) + corner_embedding = self.pe_layer.forward_with_coords(coords, self.input_image_size) + corner_embedding[:, 0, :] += self.point_embeddings[2].weight + corner_embedding[:, 1, :] += self.point_embeddings[3].weight + return corner_embedding + + def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor: + """Embeds mask inputs.""" + mask_embedding = self.mask_downscaling(masks) + return mask_embedding + + def _get_batch_size( + self, + points: Optional[Tuple[torch.Tensor, torch.Tensor]], + boxes: Optional[torch.Tensor], + masks: Optional[torch.Tensor], + ) -> int: + """ + Gets the batch size of the output given the batch size of the input prompts. + """ + if points is not None: + return points[0].shape[0] + elif boxes is not None: + return boxes.shape[0] + elif masks is not None: + return masks.shape[0] + else: + return 1 + + def _get_device(self) -> torch.device: + return self.point_embeddings[0].weight.device + + def forward( + self, + points: Optional[Tuple[torch.Tensor, torch.Tensor]], + boxes: Optional[torch.Tensor], + masks: Optional[torch.Tensor], + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Embeds different types of prompts, returning both sparse and dense + embeddings. + + Arguments: + points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates + and labels to embed. + boxes (torch.Tensor or none): boxes to embed + masks (torch.Tensor or none): masks to embed + + Returns: + torch.Tensor: sparse embeddings for the points and boxes, with shape + BxNx(embed_dim), where N is determined by the number of input points + and boxes. + torch.Tensor: dense embeddings for the masks, in the shape + Bx(embed_dim)x(embed_H)x(embed_W) + """ + bs = self._get_batch_size(points, boxes, masks) + sparse_embeddings = torch.empty((bs, 0, self.embed_dim), device=self._get_device()) + if points is not None: + coords, labels = points + point_embeddings = self._embed_points(coords, labels, pad=(boxes is None)) + sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1) + if boxes is not None: + box_embeddings = self._embed_boxes(boxes) + sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1) + + if masks is not None: + dense_embeddings = self._embed_masks(masks) + else: + dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand( + bs, -1, self.image_embedding_size[0], self.image_embedding_size[1] + ) + + return sparse_embeddings, dense_embeddings + + +class PositionEmbeddingRandom(nn.Module): + """ + Positional encoding using random spatial frequencies. + """ + + def __init__(self, num_pos_feats: int = 64, scale: Optional[float] = None) -> None: + super().__init__() + if scale is None or scale <= 0.0: + scale = 1.0 + self.register_buffer( + "positional_encoding_gaussian_matrix", + scale * torch.randn((2, num_pos_feats)), + ) + + def _pe_encoding(self, coords: torch.Tensor) -> torch.Tensor: + """Positionally encode points that are normalized to [0,1].""" + # assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape + coords = 2 * coords - 1 + coords = coords @ self.positional_encoding_gaussian_matrix + coords = 2 * np.pi * coords + # outputs d_1 x ... x d_n x C shape + return torch.cat([torch.sin(coords), torch.cos(coords)], dim=-1) + + def forward(self, size: Tuple[int, int]) -> torch.Tensor: + """Generate positional encoding for a grid of the specified size.""" + h, w = size + device: Any = self.positional_encoding_gaussian_matrix.device + grid = torch.ones((h, w), device=device, dtype=torch.float32) + y_embed = grid.cumsum(dim=0) - 0.5 + x_embed = grid.cumsum(dim=1) - 0.5 + y_embed = y_embed / h + x_embed = x_embed / w + + pe = self._pe_encoding(torch.stack([x_embed, y_embed], dim=-1)) + return pe.permute(2, 0, 1) # C x H x W + + def forward_with_coords( + self, coords_input: torch.Tensor, image_size: Tuple[int, int] + ) -> torch.Tensor: + """Positionally encode points that are not normalized to [0,1].""" + coords = coords_input.clone() + coords[:, :, 0] = coords[:, :, 0] / image_size[1] + coords[:, :, 1] = coords[:, :, 1] / image_size[0] + return self._pe_encoding(coords.to(torch.float)) # B x N x C diff --git a/AllinonSAM/prompt_adapted_segment_anything/modeling/sam.py b/AllinonSAM/prompt_adapted_segment_anything/modeling/sam.py new file mode 100644 index 0000000000000000000000000000000000000000..303bc2f40c3dbc84f5d4286bb73336e075a86589 --- /dev/null +++ b/AllinonSAM/prompt_adapted_segment_anything/modeling/sam.py @@ -0,0 +1,174 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +from torch import nn +from torch.nn import functional as F + +from typing import Any, Dict, List, Tuple + +from .image_encoder import ImageEncoderViT +from .mask_decoder import MaskDecoder +from .prompt_encoder import PromptEncoder + + +class Sam(nn.Module): + mask_threshold: float = 0.0 + image_format: str = "RGB" + + def __init__( + self, + image_encoder: ImageEncoderViT, + prompt_encoder: PromptEncoder, + mask_decoder: MaskDecoder, + pixel_mean: List[float] = [123.675, 116.28, 103.53], + pixel_std: List[float] = [58.395, 57.12, 57.375], + ) -> None: + """ + SAM predicts object masks from an image and input prompts. + + Arguments: + image_encoder (ImageEncoderViT): The backbone used to encode the + image into image embeddings that allow for efficient mask prediction. + prompt_encoder (PromptEncoder): Encodes various types of input prompts. + mask_decoder (MaskDecoder): Predicts masks from the image embeddings + and encoded prompts. + pixel_mean (list(float)): Mean values for normalizing pixels in the input image. + pixel_std (list(float)): Std values for normalizing pixels in the input image. + """ + super().__init__() + self.image_encoder = image_encoder + self.prompt_encoder = prompt_encoder + self.mask_decoder = mask_decoder + self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False) + self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False) + + @property + def device(self) -> Any: + return self.pixel_mean.device + + @torch.no_grad() + def forward( + self, + batched_input: List[Dict[str, Any]], + multimask_output: bool, + ) -> List[Dict[str, torch.Tensor]]: + """ + Predicts masks end-to-end from provided images and prompts. + If prompts are not known in advance, using SamPredictor is + recommended over calling the model directly. + + Arguments: + batched_input (list(dict)): A list over input images, each a + dictionary with the following keys. A prompt key can be + excluded if it is not present. + 'image': The image as a torch tensor in 3xHxW format, + already transformed for input to the model. + 'original_size': (tuple(int, int)) The original size of + the image before transformation, as (H, W). + 'point_coords': (torch.Tensor) Batched point prompts for + this image, with shape BxNx2. Already transformed to the + input frame of the model. + 'point_labels': (torch.Tensor) Batched labels for point prompts, + with shape BxN. + 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4. + Already transformed to the input frame of the model. + 'mask_inputs': (torch.Tensor) Batched mask inputs to the model, + in the form Bx1xHxW. + multimask_output (bool): Whether the model should predict multiple + disambiguating masks, or return a single mask. + + Returns: + (list(dict)): A list over input images, where each element is + as dictionary with the following keys. + 'masks': (torch.Tensor) Batched binary mask predictions, + with shape BxCxHxW, where B is the number of input promts, + C is determiend by multimask_output, and (H, W) is the + original size of the image. + 'iou_predictions': (torch.Tensor) The model's predictions + of mask quality, in shape BxC. + 'low_res_logits': (torch.Tensor) Low resolution logits with + shape BxCxHxW, where H=W=256. Can be passed as mask input + to subsequent iterations of prediction. + """ + input_images = torch.stack([self.preprocess(x["image"]) for x in batched_input], dim=0) + image_embeddings = self.image_encoder(input_images) + + outputs = [] + for image_record, curr_embedding in zip(batched_input, image_embeddings): + if "point_coords" in image_record: + points = (image_record["point_coords"], image_record["point_labels"]) + else: + points = None + sparse_embeddings, dense_embeddings = self.prompt_encoder( + points=points, + boxes=image_record.get("boxes", None), + masks=image_record.get("mask_inputs", None), + ) + low_res_masks, iou_predictions = self.mask_decoder( + image_embeddings=curr_embedding.unsqueeze(0), + image_pe=self.prompt_encoder.get_dense_pe(), + sparse_prompt_embeddings=sparse_embeddings, + dense_prompt_embeddings=dense_embeddings, + multimask_output=multimask_output, + ) + masks = self.postprocess_masks( + low_res_masks, + input_size=image_record["image"].shape[-2:], + original_size=image_record["original_size"], + ) + masks = masks > self.mask_threshold + outputs.append( + { + "masks": masks, + "iou_predictions": iou_predictions, + "low_res_logits": low_res_masks, + } + ) + return outputs + + def postprocess_masks( + self, + masks: torch.Tensor, + input_size: Tuple[int, ...], + original_size: Tuple[int, ...], + ) -> torch.Tensor: + """ + Remove padding and upscale masks to the original image size. + + Arguments: + masks (torch.Tensor): Batched masks from the mask_decoder, + in BxCxHxW format. + input_size (tuple(int, int)): The size of the image input to the + model, in (H, W) format. Used to remove padding. + original_size (tuple(int, int)): The original size of the image + before resizing for input to the model, in (H, W) format. + + Returns: + (torch.Tensor): Batched masks in BxCxHxW format, where (H, W) + is given by original_size. + """ + masks = F.interpolate( + masks, + (self.image_encoder.img_size, self.image_encoder.img_size), + mode="bilinear", + align_corners=False, + ) + masks = masks[..., : input_size[0], : input_size[1]] + masks = F.interpolate(masks, original_size, mode="bilinear", align_corners=False) + return masks + + def preprocess(self, x: torch.Tensor) -> torch.Tensor: + """Normalize pixel values and pad to a square input.""" + # Normalize colors + x = (x - self.pixel_mean) / self.pixel_std + + # Pad + h, w = x.shape[-2:] + padh = self.image_encoder.img_size - h + padw = self.image_encoder.img_size - w + x = F.pad(x, (0, padw, 0, padh)) + return x diff --git a/AllinonSAM/prompt_adapted_segment_anything/modeling/svd_layers.py b/AllinonSAM/prompt_adapted_segment_anything/modeling/svd_layers.py new file mode 100644 index 0000000000000000000000000000000000000000..fa9b63ff7c1b3b5549c7972ab1608e7b353cfe1c --- /dev/null +++ b/AllinonSAM/prompt_adapted_segment_anything/modeling/svd_layers.py @@ -0,0 +1,140 @@ +import torch +from torch import nn +from torch.nn import functional as F +from einops import rearrange +from typing import Type + + +class SVDLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, bias: bool = True, device=None, dtype=None, mlp_transform=False, fraction_trainable=1) -> None: + super().__init__(in_features, out_features, bias, device, dtype) + self.U, self.S, self.Vt = torch.linalg.svd(self.weight, full_matrices=False) + self.weight.requires_grad = False + self.done_svd = False + self.mlp_transform = mlp_transform + if mlp_transform: + self.trainable_mlp = MLPBlock2( + embedding_dim=self.S.shape[0], + mlp_dim=256 + ) + else: + S_len = (self.S.shape[0]) + # self.trainable_scale = nn.Parameter(torch.ones(int(S_len*1))) + self.trainable_scale = nn.Parameter(torch.ones(int(S_len*fraction_trainable))) + # self.trainable_shift = nn.Parameter(torch.zeros(int(S_len*0))) + self.trainable_shift = nn.Parameter(torch.zeros(int(S_len*fraction_trainable))) + self.frozen_scale = torch.ones(S_len-self.trainable_scale.shape[0]) + self.frozen_shift = torch.ones(S_len - self.trainable_shift.shape[0]) + self.reset_parameters() + + def perform_svd(self): + self.U, self.S, self.Vt = torch.linalg.svd(self.weight, full_matrices=False) + self.done_svd = True + + def reset_parameters(self): + nn.Linear.reset_parameters(self) + if hasattr(self, 'trainable_shift'): + nn.init.zeros_(self.trainable_shift) + if hasattr(self, 'trainable_scale'): + nn.init.ones_(self.trainable_scale) + + def forward(self, input: torch.Tensor): + if not self.done_svd: + self.perform_svd() + if self.mlp_transform: + s_new = (self.trainable_mlp((self.S.to(input.device)).flatten())).reshape(self.S.shape) + weight_updated = self.U.to(input.device, dtype=input.dtype) @ torch.diag(F.relu(s_new)).to(input.device) @ self.Vt.to(device=input.device, dtype=input.dtype) + reg_loss = torch.norm(s_new - self.S) + else: + scale = torch.cat([self.trainable_scale,self.frozen_scale.to(input.device)]) + shift = torch.cat([self.trainable_shift, self.frozen_shift.to(input.device)]) + weight_updated = self.U.to(input.device, dtype=input.dtype) @ torch.diag(F.relu(scale.to(input.device, dtype=input.dtype)*self.S.to(input.device, dtype=input.dtype) + shift)) @ self.Vt.to(device=input.device, dtype=input.dtype) + reg_loss = torch.norm(1 - self.trainable_scale) + torch.norm(self.trainable_shift) + return F.linear(input, weight_updated, self.bias), reg_loss + +#adapted from https://github.com/phymhan/SVDiff +class SVDConv2d(nn.Conv2d): + def __init__( + self, + in_channels: int, + out_channels: int, + kernel_size: int, + scale: float = 1.0, + mlp_transform: bool = False, + fraction_trainable=1, + **kwargs + ): + nn.Conv2d.__init__(self, in_channels, out_channels, kernel_size, **kwargs) + assert type(kernel_size) is int + weight_reshaped = rearrange(self.weight, 'co cin h w -> co (cin h w)') + self.U, self.S, self.Vt = torch.linalg.svd(weight_reshaped, full_matrices=False) + # initialize to 0 for smooth tuning + + self.weight.requires_grad = False + self.done_svd = False + self.mlp_transform = mlp_transform + if mlp_transform: + self.trainable_mlp = MLPBlock2( + embedding_dim=self.S.shape[0], + mlp_dim=256 + ) + else: + S_len = (self.S.shape[0]) + # self.trainable_scale = nn.Parameter(torch.ones(int(S_len*1))) + self.trainable_scale = nn.Parameter(torch.ones(int(S_len*fraction_trainable))) + # self.trainable_shift = nn.Parameter(torch.zeros(int(S_len*0))) + self.trainable_shift = nn.Parameter(torch.zeros(int(S_len*fraction_trainable))) + self.frozen_scale = torch.ones(S_len-self.trainable_scale.shape[0]) + self.frozen_shift = torch.ones(S_len - self.trainable_shift.shape[0]) + self.reset_parameters() + + def perform_svd(self): + # shape + weight_reshaped = rearrange(self.weight, 'co cin h w -> co (cin h w)') + self.U, self.S, self.Vt = torch.linalg.svd(weight_reshaped, full_matrices=False) + self.done_svd = True + + def reset_parameters(self): + nn.Conv2d.reset_parameters(self) + if hasattr(self, 'trainable_shift'): + nn.init.zeros_(self.trainable_shift) + if hasattr(self, 'trainable_scale'): + nn.init.ones_(self.trainable_scale) + + def forward(self, x: torch.Tensor): + if not self.done_svd: + # this happens after loading the state dict + self.perform_svd() + + if self.mlp_transform: + s_new = (self.trainable_mlp((self.S.to(x.device)).flatten())).reshape(self.S.shape) + weight_updated = self.U.to(x.device, dtype=x.dtype) @ torch.diag(F.relu(s_new)).to(x.device) @ self.Vt.to(device=x.device, dtype=x.dtype) + reg_loss = torch.norm(s_new - self.S) + + else: + scale = torch.cat([self.trainable_scale,self.frozen_scale.to(x.device)]) + shift = torch.cat([self.trainable_shift, self.frozen_shift.to(x.device)]) + weight_updated = self.U.to(x.device, dtype=x.dtype) @ torch.diag(F.relu(scale.to(x.device, dtype=x.dtype)*self.S.to(x.device, dtype=x.dtype) + shift)) @ self.Vt.to(device=x.device, dtype=x.dtype) + reg_loss = torch.norm(1 - self.trainable_scale) + torch.norm(self.trainable_shift) + + weight_updated = rearrange(weight_updated, 'co (cin h w) -> co cin h w', cin=self.weight.size(1), h=self.weight.size(2), w=self.weight.size(3)) + + return F.conv2d(x, weight_updated, self.bias, self.stride, self.padding, self.dilation, self.groups), reg_loss + + +class MLPBlock2(nn.Module): + def __init__( + self, + embedding_dim: int, + mlp_dim: int, + act: Type[nn.Module] = nn.GELU, + ) -> None: + super().__init__() + self.lin1 = nn.Linear(embedding_dim, mlp_dim) + self.lin2 = nn.Linear(mlp_dim, embedding_dim) + self.act = act() + + def forward(self, x: torch.Tensor) -> torch.Tensor: + out = self.lin1(x) + out = self.lin2(self.act(out)) + return out diff --git a/AllinonSAM/prompt_adapted_segment_anything/modeling/transformer.py b/AllinonSAM/prompt_adapted_segment_anything/modeling/transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..2496f699977baed40c1be7f3ca3373882b77cecd --- /dev/null +++ b/AllinonSAM/prompt_adapted_segment_anything/modeling/transformer.py @@ -0,0 +1,246 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +from torch import Tensor, nn + +import math +from typing import Tuple, Type + +from .common import MLPBlock +from .common import MLPBlock2 + +class TwoWayTransformer(nn.Module): + def __init__( + self, + depth: int, + embedding_dim: int, + num_heads: int, + mlp_dim: int, + activation: Type[nn.Module] = nn.ReLU, + attention_downsample_rate: int = 2, + ) -> None: + """ + A transformer decoder that attends to an input image using + queries whose positional embedding is supplied. + + Args: + depth (int): number of layers in the transformer + embedding_dim (int): the channel dimension for the input embeddings + num_heads (int): the number of heads for multihead attention. Must + divide embedding_dim + mlp_dim (int): the channel dimension internal to the MLP block + activation (nn.Module): the activation to use in the MLP block + """ + super().__init__() + self.depth = depth + self.embedding_dim = embedding_dim + self.num_heads = num_heads + self.mlp_dim = mlp_dim + self.layers = nn.ModuleList() + + for i in range(depth): + self.layers.append( + TwoWayAttentionBlock( + embedding_dim=embedding_dim, + num_heads=num_heads, + mlp_dim=mlp_dim, + activation=activation, + attention_downsample_rate=attention_downsample_rate, + skip_first_layer_pe=(i == 0), + ) + ) + + self.final_attn_token_to_image = Attention( + embedding_dim, num_heads, downsample_rate=attention_downsample_rate + ) + self.norm_final_attn = nn.LayerNorm(embedding_dim) + + def forward( + self, + image_embedding: Tensor, + image_pe: Tensor, + point_embedding: Tensor, + ) -> Tuple[Tensor, Tensor]: + """ + Args: + image_embedding (torch.Tensor): image to attend to. Should be shape + B x embedding_dim x h x w for any h and w. + image_pe (torch.Tensor): the positional encoding to add to the image. Must + have the same shape as image_embedding. + point_embedding (torch.Tensor): the embedding to add to the query points. + Must have shape B x N_points x embedding_dim for any N_points. + + Returns: + torch.Tensor: the processed point_embedding + torch.Tensor: the processed image_embedding + """ + # BxCxHxW -> BxHWxC == B x N_image_tokens x C + bs, c, h, w = image_embedding.shape + # print("image embedding shape: ",image_embedding.shape) + image_embedding = image_embedding.flatten(2).permute(0, 2, 1) + image_pe = image_pe.flatten(2).permute(0, 2, 1) + + # Prepare queries + queries = point_embedding + keys = image_embedding + + # Apply transformer blocks and final layernorm + for layer in self.layers: + queries, keys = layer( + queries=queries, + keys=keys, + query_pe=point_embedding, + key_pe=image_pe, + ) + + # Apply the final attenion layer from the points to the image + q = queries + point_embedding + k = keys + attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys) + queries = queries + attn_out + queries = self.norm_final_attn(queries) + + return queries, keys + + +class TwoWayAttentionBlock(nn.Module): + def __init__( + self, + embedding_dim: int, + num_heads: int, + mlp_dim: int = 2048, + activation: Type[nn.Module] = nn.ReLU, + attention_downsample_rate: int = 2, + skip_first_layer_pe: bool = False, + ) -> None: + """ + A transformer block with four layers: (1) self-attention of sparse + inputs, (2) cross attention of sparse inputs to dense inputs, (3) mlp + block on sparse inputs, and (4) cross attention of dense inputs to sparse + inputs. + + Arguments: + embedding_dim (int): the channel dimension of the embeddings + num_heads (int): the number of heads in the attention layers + mlp_dim (int): the hidden dimension of the mlp block + activation (nn.Module): the activation of the mlp block + skip_first_layer_pe (bool): skip the PE on the first layer + """ + super().__init__() + self.self_attn = Attention(embedding_dim, num_heads) + self.norm1 = nn.LayerNorm(embedding_dim) + + self.cross_attn_token_to_image = Attention( + embedding_dim, num_heads, downsample_rate=attention_downsample_rate + ) + self.norm2 = nn.LayerNorm(embedding_dim) + + # self.mlp = MLPBlock(embedding_dim, mlp_dim, activation) + self.mlp = MLPBlock2(embedding_dim, mlp_dim, activation) + self.norm3 = nn.LayerNorm(embedding_dim) + + self.norm4 = nn.LayerNorm(embedding_dim) + self.cross_attn_image_to_token = Attention( + embedding_dim, num_heads, downsample_rate=attention_downsample_rate + ) + + self.skip_first_layer_pe = skip_first_layer_pe + + def forward( + self, queries: Tensor, keys: Tensor, query_pe: Tensor, key_pe: Tensor + ) -> Tuple[Tensor, Tensor]: + # Self attention block + # print("queries shape: ", queries.shape) + # print("keys shape: ", keys.shape) + if self.skip_first_layer_pe: + queries = self.self_attn(q=queries, k=queries, v=queries) + else: + q = queries + attn_out = self.self_attn(q=q, k=q, v=queries) + queries = queries + attn_out + queries = self.norm1(queries) + + # Cross attention block, tokens attending to image embedding + #only using text prompt so dont need positional embedding + q = queries + k = keys + attn_out = self.cross_attn_token_to_image(q=q, k=k, v=keys) + queries = queries + attn_out + queries = self.norm2(queries) + + # MLP block + mlp_out = self.mlp(queries) + # mlp_out, reg_loss = self.mlp(queries) + queries = queries + mlp_out + queries = self.norm3(queries) + + # Cross attention block, image embedding attending to tokens + q = queries + k = keys + attn_out = self.cross_attn_image_to_token(q=k, k=q, v=queries) + keys = keys + attn_out + keys = self.norm4(keys) + + return queries, keys + + +class Attention(nn.Module): + """ + An attention layer that allows for downscaling the size of the embedding + after projection to queries, keys, and values. + """ + + def __init__( + self, + embedding_dim: int, + num_heads: int, + downsample_rate: int = 1, + ) -> None: + super().__init__() + self.embedding_dim = embedding_dim + self.internal_dim = embedding_dim // downsample_rate + self.num_heads = num_heads + assert self.internal_dim % num_heads == 0, "num_heads must divide embedding_dim." + + self.q_proj = nn.Linear(embedding_dim, self.internal_dim) + self.k_proj = nn.Linear(embedding_dim, self.internal_dim) + self.v_proj = nn.Linear(embedding_dim, self.internal_dim) + self.out_proj = nn.Linear(self.internal_dim, embedding_dim) + + def _separate_heads(self, x: Tensor, num_heads: int) -> Tensor: + b, n, c = x.shape + x = x.reshape(b, n, num_heads, c // num_heads) + return x.transpose(1, 2) # B x N_heads x N_tokens x C_per_head + + def _recombine_heads(self, x: Tensor) -> Tensor: + b, n_heads, n_tokens, c_per_head = x.shape + x = x.transpose(1, 2) + return x.reshape(b, n_tokens, n_heads * c_per_head) # B x N_tokens x C + + def forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tensor: + # Input projections + q = self.q_proj(q) + k = self.k_proj(k) + v = self.v_proj(v) + + # Separate into heads + q = self._separate_heads(q, self.num_heads) + k = self._separate_heads(k, self.num_heads) + v = self._separate_heads(v, self.num_heads) + + # Attention + _, _, _, c_per_head = q.shape + attn = q @ k.permute(0, 1, 3, 2) # B x N_heads x N_tokens x N_tokens + attn = attn / math.sqrt(c_per_head) + attn = torch.softmax(attn, dim=-1) + + # Get output + out = attn @ v + out = self._recombine_heads(out) + out = self.out_proj(out) + + return out diff --git a/AllinonSAM/prompt_adapted_segment_anything/predictor.py b/AllinonSAM/prompt_adapted_segment_anything/predictor.py new file mode 100644 index 0000000000000000000000000000000000000000..602ae1fbf0eed19fdf812a9f2a427e7ed996f04b --- /dev/null +++ b/AllinonSAM/prompt_adapted_segment_anything/predictor.py @@ -0,0 +1,269 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import numpy as np +import torch + +from .modeling import Sam + +from typing import Optional, Tuple + +from .utils.transforms import ResizeLongestSide + + +class SamPredictor: + def __init__( + self, + sam_model: Sam, + ) -> None: + """ + Uses SAM to calculate the image embedding for an image, and then + allow repeated, efficient mask prediction given prompts. + + Arguments: + sam_model (Sam): The model to use for mask prediction. + """ + super().__init__() + self.model = sam_model + self.transform = ResizeLongestSide(sam_model.image_encoder.img_size) + self.reset_image() + + def set_image( + self, + image: np.ndarray, + image_format: str = "RGB", + ) -> None: + """ + Calculates the image embeddings for the provided image, allowing + masks to be predicted with the 'predict' method. + + Arguments: + image (np.ndarray): The image for calculating masks. Expects an + image in HWC uint8 format, with pixel values in [0, 255]. + image_format (str): The color format of the image, in ['RGB', 'BGR']. + """ + assert image_format in [ + "RGB", + "BGR", + ], f"image_format must be in ['RGB', 'BGR'], is {image_format}." + if image_format != self.model.image_format: + image = image[..., ::-1] + + # Transform the image to the form expected by the model + input_image = self.transform.apply_image(image) + input_image_torch = torch.as_tensor(input_image, device=self.device) + input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[None, :, :, :] + + self.set_torch_image(input_image_torch, image.shape[:2]) + + @torch.no_grad() + def set_torch_image( + self, + transformed_image: torch.Tensor, + original_image_size: Tuple[int, ...], + ) -> None: + """ + Calculates the image embeddings for the provided image, allowing + masks to be predicted with the 'predict' method. Expects the input + image to be already transformed to the format expected by the model. + + Arguments: + transformed_image (torch.Tensor): The input image, with shape + 1x3xHxW, which has been transformed with ResizeLongestSide. + original_image_size (tuple(int, int)): The size of the image + before transformation, in (H, W) format. + """ + assert ( + len(transformed_image.shape) == 4 + and transformed_image.shape[1] == 3 + and max(*transformed_image.shape[2:]) == self.model.image_encoder.img_size + ), f"set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}." + self.reset_image() + + self.original_size = original_image_size + self.input_size = tuple(transformed_image.shape[-2:]) + input_image = self.model.preprocess(transformed_image) + self.features = self.model.image_encoder(input_image) + self.is_image_set = True + + def predict( + self, + point_coords: Optional[np.ndarray] = None, + point_labels: Optional[np.ndarray] = None, + box: Optional[np.ndarray] = None, + mask_input: Optional[np.ndarray] = None, + multimask_output: bool = True, + return_logits: bool = False, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Predict masks for the given input prompts, using the currently set image. + + Arguments: + point_coords (np.ndarray or None): A Nx2 array of point prompts to the + model. Each point is in (X,Y) in pixels. + point_labels (np.ndarray or None): A length N array of labels for the + point prompts. 1 indicates a foreground point and 0 indicates a + background point. + box (np.ndarray or None): A length 4 array given a box prompt to the + model, in XYXY format. + mask_input (np.ndarray): A low resolution mask input to the model, typically + coming from a previous prediction iteration. Has form 1xHxW, where + for SAM, H=W=256. + multimask_output (bool): If true, the model will return three masks. + For ambiguous input prompts (such as a single click), this will often + produce better masks than a single prediction. If only a single + mask is needed, the model's predicted quality score can be used + to select the best mask. For non-ambiguous prompts, such as multiple + input prompts, multimask_output=False can give better results. + return_logits (bool): If true, returns un-thresholded masks logits + instead of a binary mask. + + Returns: + (np.ndarray): The output masks in CxHxW format, where C is the + number of masks, and (H, W) is the original image size. + (np.ndarray): An array of length C containing the model's + predictions for the quality of each mask. + (np.ndarray): An array of shape CxHxW, where C is the number + of masks and H=W=256. These low resolution logits can be passed to + a subsequent iteration as mask input. + """ + if not self.is_image_set: + raise RuntimeError("An image must be set with .set_image(...) before mask prediction.") + + # Transform input prompts + coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None + if point_coords is not None: + assert ( + point_labels is not None + ), "point_labels must be supplied if point_coords is supplied." + point_coords = self.transform.apply_coords(point_coords, self.original_size) + coords_torch = torch.as_tensor(point_coords, dtype=torch.float, device=self.device) + labels_torch = torch.as_tensor(point_labels, dtype=torch.int, device=self.device) + coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :] + if box is not None: + box = self.transform.apply_boxes(box, self.original_size) + box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device) + box_torch = box_torch[None, :] + if mask_input is not None: + mask_input_torch = torch.as_tensor(mask_input, dtype=torch.float, device=self.device) + mask_input_torch = mask_input_torch[None, :, :, :] + + masks, iou_predictions, low_res_masks = self.predict_torch( + coords_torch, + labels_torch, + box_torch, + mask_input_torch, + multimask_output, + return_logits=return_logits, + ) + + masks = masks[0].detach().cpu().numpy() + iou_predictions = iou_predictions[0].detach().cpu().numpy() + low_res_masks = low_res_masks[0].detach().cpu().numpy() + return masks, iou_predictions, low_res_masks + + @torch.no_grad() + def predict_torch( + self, + point_coords: Optional[torch.Tensor], + point_labels: Optional[torch.Tensor], + boxes: Optional[torch.Tensor] = None, + mask_input: Optional[torch.Tensor] = None, + multimask_output: bool = True, + return_logits: bool = False, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Predict masks for the given input prompts, using the currently set image. + Input prompts are batched torch tensors and are expected to already be + transformed to the input frame using ResizeLongestSide. + + Arguments: + point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the + model. Each point is in (X,Y) in pixels. + point_labels (torch.Tensor or None): A BxN array of labels for the + point prompts. 1 indicates a foreground point and 0 indicates a + background point. + box (np.ndarray or None): A Bx4 array given a box prompt to the + model, in XYXY format. + mask_input (np.ndarray): A low resolution mask input to the model, typically + coming from a previous prediction iteration. Has form Bx1xHxW, where + for SAM, H=W=256. Masks returned by a previous iteration of the + predict method do not need further transformation. + multimask_output (bool): If true, the model will return three masks. + For ambiguous input prompts (such as a single click), this will often + produce better masks than a single prediction. If only a single + mask is needed, the model's predicted quality score can be used + to select the best mask. For non-ambiguous prompts, such as multiple + input prompts, multimask_output=False can give better results. + return_logits (bool): If true, returns un-thresholded masks logits + instead of a binary mask. + + Returns: + (torch.Tensor): The output masks in BxCxHxW format, where C is the + number of masks, and (H, W) is the original image size. + (torch.Tensor): An array of shape BxC containing the model's + predictions for the quality of each mask. + (torch.Tensor): An array of shape BxCxHxW, where C is the number + of masks and H=W=256. These low res logits can be passed to + a subsequent iteration as mask input. + """ + if not self.is_image_set: + raise RuntimeError("An image must be set with .set_image(...) before mask prediction.") + + if point_coords is not None: + points = (point_coords, point_labels) + else: + points = None + + # Embed prompts + sparse_embeddings, dense_embeddings = self.model.prompt_encoder( + points=points, + boxes=boxes, + masks=mask_input, + ) + + # Predict masks + low_res_masks, iou_predictions = self.model.mask_decoder( + image_embeddings=self.features, + image_pe=self.model.prompt_encoder.get_dense_pe(), + sparse_prompt_embeddings=sparse_embeddings, + dense_prompt_embeddings=dense_embeddings, + multimask_output=multimask_output, + ) + + # Upscale the masks to the original image resolution + masks = self.model.postprocess_masks(low_res_masks, self.input_size, self.original_size) + + if not return_logits: + masks = masks > self.model.mask_threshold + + return masks, iou_predictions, low_res_masks + + def get_image_embedding(self) -> torch.Tensor: + """ + Returns the image embeddings for the currently set image, with + shape 1xCxHxW, where C is the embedding dimension and (H,W) are + the embedding spatial dimension of SAM (typically C=256, H=W=64). + """ + if not self.is_image_set: + raise RuntimeError( + "An image must be set with .set_image(...) to generate an embedding." + ) + assert self.features is not None, "Features must exist if an image has been set." + return self.features + + @property + def device(self) -> torch.device: + return self.model.device + + def reset_image(self) -> None: + """Resets the currently set image.""" + self.is_image_set = False + self.features = None + self.orig_h = None + self.orig_w = None + self.input_h = None + self.input_w = None diff --git a/AllinonSAM/prompt_adapted_segment_anything/utils/__init__.py b/AllinonSAM/prompt_adapted_segment_anything/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5277f46157403e47fd830fc519144b97ef69d4ae --- /dev/null +++ b/AllinonSAM/prompt_adapted_segment_anything/utils/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. diff --git a/AllinonSAM/prompt_adapted_segment_anything/utils/__pycache__/__init__.cpython-312.pyc b/AllinonSAM/prompt_adapted_segment_anything/utils/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e6827950a0d5fbd21f221f73d178a829bb1dc914 Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/utils/__pycache__/__init__.cpython-312.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/utils/__pycache__/__init__.cpython-38.pyc b/AllinonSAM/prompt_adapted_segment_anything/utils/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f8964a2fa72097d886bd6276848b96a7ac88330 Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/utils/__pycache__/__init__.cpython-38.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/utils/__pycache__/__init__.cpython-39.pyc b/AllinonSAM/prompt_adapted_segment_anything/utils/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cee6c4db640fbc0d27fdc7d32708c17028fa22b9 Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/utils/__pycache__/__init__.cpython-39.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/utils/__pycache__/amg.cpython-312.pyc b/AllinonSAM/prompt_adapted_segment_anything/utils/__pycache__/amg.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..986d1dd6d4b6ca7d0d0f8ac04412d58bb98cd06d Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/utils/__pycache__/amg.cpython-312.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/utils/__pycache__/amg.cpython-38.pyc b/AllinonSAM/prompt_adapted_segment_anything/utils/__pycache__/amg.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3851c6d61adb06af60c55dd745479267c9e6650b Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/utils/__pycache__/amg.cpython-38.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/utils/__pycache__/amg.cpython-39.pyc b/AllinonSAM/prompt_adapted_segment_anything/utils/__pycache__/amg.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d6e14816da1d700bdb6059f1a6e4d4aa5f14024 Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/utils/__pycache__/amg.cpython-39.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/utils/__pycache__/transforms.cpython-312.pyc b/AllinonSAM/prompt_adapted_segment_anything/utils/__pycache__/transforms.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e25c88d98c60f58abe822d29dd04bbe561fed7cf Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/utils/__pycache__/transforms.cpython-312.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/utils/__pycache__/transforms.cpython-38.pyc b/AllinonSAM/prompt_adapted_segment_anything/utils/__pycache__/transforms.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93cfcb3eba7a8381cabed8b0bc16dfbe788a4d76 Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/utils/__pycache__/transforms.cpython-38.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/utils/__pycache__/transforms.cpython-39.pyc b/AllinonSAM/prompt_adapted_segment_anything/utils/__pycache__/transforms.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4fc752da649542cb52ec51601ac7ea493ab5c59a Binary files /dev/null and b/AllinonSAM/prompt_adapted_segment_anything/utils/__pycache__/transforms.cpython-39.pyc differ diff --git a/AllinonSAM/prompt_adapted_segment_anything/utils/amg.py b/AllinonSAM/prompt_adapted_segment_anything/utils/amg.py new file mode 100644 index 0000000000000000000000000000000000000000..3a137778e45c464c079658ecb87ec53270e789f7 --- /dev/null +++ b/AllinonSAM/prompt_adapted_segment_anything/utils/amg.py @@ -0,0 +1,346 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import numpy as np +import torch + +import math +from copy import deepcopy +from itertools import product +from typing import Any, Dict, Generator, ItemsView, List, Tuple + + +class MaskData: + """ + A structure for storing masks and their related data in batched format. + Implements basic filtering and concatenation. + """ + + def __init__(self, **kwargs) -> None: + for v in kwargs.values(): + assert isinstance( + v, (list, np.ndarray, torch.Tensor) + ), "MaskData only supports list, numpy arrays, and torch tensors." + self._stats = dict(**kwargs) + + def __setitem__(self, key: str, item: Any) -> None: + assert isinstance( + item, (list, np.ndarray, torch.Tensor) + ), "MaskData only supports list, numpy arrays, and torch tensors." + self._stats[key] = item + + def __delitem__(self, key: str) -> None: + del self._stats[key] + + def __getitem__(self, key: str) -> Any: + return self._stats[key] + + def items(self) -> ItemsView[str, Any]: + return self._stats.items() + + def filter(self, keep: torch.Tensor) -> None: + for k, v in self._stats.items(): + if v is None: + self._stats[k] = None + elif isinstance(v, torch.Tensor): + self._stats[k] = v[torch.as_tensor(keep, device=v.device)] + elif isinstance(v, np.ndarray): + self._stats[k] = v[keep.detach().cpu().numpy()] + elif isinstance(v, list) and keep.dtype == torch.bool: + self._stats[k] = [a for i, a in enumerate(v) if keep[i]] + elif isinstance(v, list): + self._stats[k] = [v[i] for i in keep] + else: + raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.") + + def cat(self, new_stats: "MaskData") -> None: + for k, v in new_stats.items(): + if k not in self._stats or self._stats[k] is None: + self._stats[k] = deepcopy(v) + elif isinstance(v, torch.Tensor): + self._stats[k] = torch.cat([self._stats[k], v], dim=0) + elif isinstance(v, np.ndarray): + self._stats[k] = np.concatenate([self._stats[k], v], axis=0) + elif isinstance(v, list): + self._stats[k] = self._stats[k] + deepcopy(v) + else: + raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.") + + def to_numpy(self) -> None: + for k, v in self._stats.items(): + if isinstance(v, torch.Tensor): + self._stats[k] = v.detach().cpu().numpy() + + +def is_box_near_crop_edge( + boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0 +) -> torch.Tensor: + """Filter masks at the edge of a crop, but not at the edge of the original image.""" + crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device) + orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device) + boxes = uncrop_boxes_xyxy(boxes, crop_box).float() + near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0) + near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0) + near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge) + return torch.any(near_crop_edge, dim=1) + + +def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor: + box_xywh = deepcopy(box_xyxy) + box_xywh[2] = box_xywh[2] - box_xywh[0] + box_xywh[3] = box_xywh[3] - box_xywh[1] + return box_xywh + + +def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]: + assert len(args) > 0 and all( + len(a) == len(args[0]) for a in args + ), "Batched iteration must have inputs of all the same size." + n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0) + for b in range(n_batches): + yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args] + + +def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]: + """ + Encodes masks to an uncompressed RLE, in the format expected by + pycoco tools. + """ + # Put in fortran order and flatten h,w + b, h, w = tensor.shape + tensor = tensor.permute(0, 2, 1).flatten(1) + + # Compute change indices + diff = tensor[:, 1:] ^ tensor[:, :-1] + change_indices = diff.nonzero() + + # Encode run length + out = [] + for i in range(b): + cur_idxs = change_indices[change_indices[:, 0] == i, 1] + cur_idxs = torch.cat( + [ + torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device), + cur_idxs + 1, + torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device), + ] + ) + btw_idxs = cur_idxs[1:] - cur_idxs[:-1] + counts = [] if tensor[i, 0] == 0 else [0] + counts.extend(btw_idxs.detach().cpu().tolist()) + out.append({"size": [h, w], "counts": counts}) + return out + + +def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray: + """Compute a binary mask from an uncompressed RLE.""" + h, w = rle["size"] + mask = np.empty(h * w, dtype=bool) + idx = 0 + parity = False + for count in rle["counts"]: + mask[idx : idx + count] = parity + idx += count + parity ^= True + mask = mask.reshape(w, h) + return mask.transpose() # Put in C order + + +def area_from_rle(rle: Dict[str, Any]) -> int: + return sum(rle["counts"][1::2]) + + +def calculate_stability_score( + masks: torch.Tensor, mask_threshold: float, threshold_offset: float +) -> torch.Tensor: + """ + Computes the stability score for a batch of masks. The stability + score is the IoU between the binary masks obtained by thresholding + the predicted mask logits at high and low values. + """ + # One mask is always contained inside the other. + # Save memory by preventing unnecesary cast to torch.int64 + intersections = ( + (masks > (mask_threshold + threshold_offset)) + .sum(-1, dtype=torch.int16) + .sum(-1, dtype=torch.int32) + ) + unions = ( + (masks > (mask_threshold - threshold_offset)) + .sum(-1, dtype=torch.int16) + .sum(-1, dtype=torch.int32) + ) + return intersections / unions + + +def build_point_grid(n_per_side: int) -> np.ndarray: + """Generates a 2D grid of points evenly spaced in [0,1]x[0,1].""" + offset = 1 / (2 * n_per_side) + points_one_side = np.linspace(offset, 1 - offset, n_per_side) + points_x = np.tile(points_one_side[None, :], (n_per_side, 1)) + points_y = np.tile(points_one_side[:, None], (1, n_per_side)) + points = np.stack([points_x, points_y], axis=-1).reshape(-1, 2) + return points + + +def build_all_layer_point_grids( + n_per_side: int, n_layers: int, scale_per_layer: int +) -> List[np.ndarray]: + """Generates point grids for all crop layers.""" + points_by_layer = [] + for i in range(n_layers + 1): + n_points = int(n_per_side / (scale_per_layer**i)) + points_by_layer.append(build_point_grid(n_points)) + return points_by_layer + + +def generate_crop_boxes( + im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float +) -> Tuple[List[List[int]], List[int]]: + """ + Generates a list of crop boxes of different sizes. Each layer + has (2**i)**2 boxes for the ith layer. + """ + crop_boxes, layer_idxs = [], [] + im_h, im_w = im_size + short_side = min(im_h, im_w) + + # Original image + crop_boxes.append([0, 0, im_w, im_h]) + layer_idxs.append(0) + + def crop_len(orig_len, n_crops, overlap): + return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops)) + + for i_layer in range(n_layers): + n_crops_per_side = 2 ** (i_layer + 1) + overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side)) + + crop_w = crop_len(im_w, n_crops_per_side, overlap) + crop_h = crop_len(im_h, n_crops_per_side, overlap) + + crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)] + crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)] + + # Crops in XYWH format + for x0, y0 in product(crop_box_x0, crop_box_y0): + box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)] + crop_boxes.append(box) + layer_idxs.append(i_layer + 1) + + return crop_boxes, layer_idxs + + +def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor: + x0, y0, _, _ = crop_box + offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device) + # Check if boxes has a channel dimension + if len(boxes.shape) == 3: + offset = offset.unsqueeze(1) + return boxes + offset + + +def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor: + x0, y0, _, _ = crop_box + offset = torch.tensor([[x0, y0]], device=points.device) + # Check if points has a channel dimension + if len(points.shape) == 3: + offset = offset.unsqueeze(1) + return points + offset + + +def uncrop_masks( + masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int +) -> torch.Tensor: + x0, y0, x1, y1 = crop_box + if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h: + return masks + # Coordinate transform masks + pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0) + pad = (x0, pad_x - x0, y0, pad_y - y0) + return torch.nn.functional.pad(masks, pad, value=0) + + +def remove_small_regions( + mask: np.ndarray, area_thresh: float, mode: str +) -> Tuple[np.ndarray, bool]: + """ + Removes small disconnected regions and holes in a mask. Returns the + mask and an indicator of if the mask has been modified. + """ + import cv2 # type: ignore + + assert mode in ["holes", "islands"] + correct_holes = mode == "holes" + working_mask = (correct_holes ^ mask).astype(np.uint8) + n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8) + sizes = stats[:, -1][1:] # Row 0 is background label + small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh] + if len(small_regions) == 0: + return mask, False + fill_labels = [0] + small_regions + if not correct_holes: + fill_labels = [i for i in range(n_labels) if i not in fill_labels] + # If every region is below threshold, keep largest + if len(fill_labels) == 0: + fill_labels = [int(np.argmax(sizes)) + 1] + mask = np.isin(regions, fill_labels) + return mask, True + + +def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]: + from pycocotools import mask as mask_utils # type: ignore + + h, w = uncompressed_rle["size"] + rle = mask_utils.frPyObjects(uncompressed_rle, h, w) + rle["counts"] = rle["counts"].decode("utf-8") # Necessary to serialize with json + return rle + + +def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor: + """ + Calculates boxes in XYXY format around masks. Return [0,0,0,0] for + an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4. + """ + # torch.max below raises an error on empty inputs, just skip in this case + if torch.numel(masks) == 0: + return torch.zeros(*masks.shape[:-2], 4, device=masks.device) + + # Normalize shape to CxHxW + shape = masks.shape + h, w = shape[-2:] + if len(shape) > 2: + masks = masks.flatten(0, -3) + else: + masks = masks.unsqueeze(0) + + # Get top and bottom edges + in_height, _ = torch.max(masks, dim=-1) + in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :] + bottom_edges, _ = torch.max(in_height_coords, dim=-1) + in_height_coords = in_height_coords + h * (~in_height) + top_edges, _ = torch.min(in_height_coords, dim=-1) + + # Get left and right edges + in_width, _ = torch.max(masks, dim=-2) + in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :] + right_edges, _ = torch.max(in_width_coords, dim=-1) + in_width_coords = in_width_coords + w * (~in_width) + left_edges, _ = torch.min(in_width_coords, dim=-1) + + # If the mask is empty the right edge will be to the left of the left edge. + # Replace these boxes with [0, 0, 0, 0] + empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges) + out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1) + out = out * (~empty_filter).unsqueeze(-1) + + # Return to original shape + if len(shape) > 2: + out = out.reshape(*shape[:-2], 4) + else: + out = out[0] + + return out diff --git a/AllinonSAM/prompt_adapted_segment_anything/utils/onnx.py b/AllinonSAM/prompt_adapted_segment_anything/utils/onnx.py new file mode 100644 index 0000000000000000000000000000000000000000..4297b31291e036700d6ad0b818afb7dd72da3054 --- /dev/null +++ b/AllinonSAM/prompt_adapted_segment_anything/utils/onnx.py @@ -0,0 +1,144 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +import torch.nn as nn +from torch.nn import functional as F + +from typing import Tuple + +from ..modeling import Sam +from .amg import calculate_stability_score + + +class SamOnnxModel(nn.Module): + """ + This model should not be called directly, but is used in ONNX export. + It combines the prompt encoder, mask decoder, and mask postprocessing of Sam, + with some functions modified to enable model tracing. Also supports extra + options controlling what information. See the ONNX export script for details. + """ + + def __init__( + self, + model: Sam, + return_single_mask: bool, + use_stability_score: bool = False, + return_extra_metrics: bool = False, + ) -> None: + super().__init__() + self.mask_decoder = model.mask_decoder + self.model = model + self.img_size = model.image_encoder.img_size + self.return_single_mask = return_single_mask + self.use_stability_score = use_stability_score + self.stability_score_offset = 1.0 + self.return_extra_metrics = return_extra_metrics + + @staticmethod + def resize_longest_image_size( + input_image_size: torch.Tensor, longest_side: int + ) -> torch.Tensor: + input_image_size = input_image_size.to(torch.float32) + scale = longest_side / torch.max(input_image_size) + transformed_size = scale * input_image_size + transformed_size = torch.floor(transformed_size + 0.5).to(torch.int64) + return transformed_size + + def _embed_points(self, point_coords: torch.Tensor, point_labels: torch.Tensor) -> torch.Tensor: + point_coords = point_coords + 0.5 + point_coords = point_coords / self.img_size + point_embedding = self.model.prompt_encoder.pe_layer._pe_encoding(point_coords) + point_labels = point_labels.unsqueeze(-1).expand_as(point_embedding) + + point_embedding = point_embedding * (point_labels != -1) + point_embedding = point_embedding + self.model.prompt_encoder.not_a_point_embed.weight * ( + point_labels == -1 + ) + + for i in range(self.model.prompt_encoder.num_point_embeddings): + point_embedding = point_embedding + self.model.prompt_encoder.point_embeddings[ + i + ].weight * (point_labels == i) + + return point_embedding + + def _embed_masks(self, input_mask: torch.Tensor, has_mask_input: torch.Tensor) -> torch.Tensor: + mask_embedding = has_mask_input * self.model.prompt_encoder.mask_downscaling(input_mask) + mask_embedding = mask_embedding + ( + 1 - has_mask_input + ) * self.model.prompt_encoder.no_mask_embed.weight.reshape(1, -1, 1, 1) + return mask_embedding + + def mask_postprocessing(self, masks: torch.Tensor, orig_im_size: torch.Tensor) -> torch.Tensor: + masks = F.interpolate( + masks, + size=(self.img_size, self.img_size), + mode="bilinear", + align_corners=False, + ) + + prepadded_size = self.resize_longest_image_size(orig_im_size, self.img_size) + masks = masks[..., : int(prepadded_size[0]), : int(prepadded_size[1])] + + orig_im_size = orig_im_size.to(torch.int64) + h, w = orig_im_size[0], orig_im_size[1] + masks = F.interpolate(masks, size=(h, w), mode="bilinear", align_corners=False) + return masks + + def select_masks( + self, masks: torch.Tensor, iou_preds: torch.Tensor, num_points: int + ) -> Tuple[torch.Tensor, torch.Tensor]: + # Determine if we should return the multiclick mask or not from the number of points. + # The reweighting is used to avoid control flow. + score_reweight = torch.tensor( + [[1000] + [0] * (self.model.mask_decoder.num_mask_tokens - 1)] + ).to(iou_preds.device) + score = iou_preds + (num_points - 2.5) * score_reweight + best_idx = torch.argmax(score, dim=1) + masks = masks[torch.arange(masks.shape[0]), best_idx, :, :].unsqueeze(1) + iou_preds = iou_preds[torch.arange(masks.shape[0]), best_idx].unsqueeze(1) + + return masks, iou_preds + + @torch.no_grad() + def forward( + self, + image_embeddings: torch.Tensor, + point_coords: torch.Tensor, + point_labels: torch.Tensor, + mask_input: torch.Tensor, + has_mask_input: torch.Tensor, + orig_im_size: torch.Tensor, + ): + sparse_embedding = self._embed_points(point_coords, point_labels) + dense_embedding = self._embed_masks(mask_input, has_mask_input) + + masks, scores = self.model.mask_decoder.predict_masks( + image_embeddings=image_embeddings, + image_pe=self.model.prompt_encoder.get_dense_pe(), + sparse_prompt_embeddings=sparse_embedding, + dense_prompt_embeddings=dense_embedding, + ) + + if self.use_stability_score: + scores = calculate_stability_score( + masks, self.model.mask_threshold, self.stability_score_offset + ) + + if self.return_single_mask: + masks, scores = self.select_masks(masks, scores, point_coords.shape[1]) + + upscaled_masks = self.mask_postprocessing(masks, orig_im_size) + + if self.return_extra_metrics: + stability_scores = calculate_stability_score( + upscaled_masks, self.model.mask_threshold, self.stability_score_offset + ) + areas = (upscaled_masks > self.model.mask_threshold).sum(-1).sum(-1) + return upscaled_masks, scores, stability_scores, areas, masks + + return upscaled_masks, scores, masks diff --git a/AllinonSAM/prompt_adapted_segment_anything/utils/transforms.py b/AllinonSAM/prompt_adapted_segment_anything/utils/transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..3ad346661f84b0647026e130a552c4b38b83e2ac --- /dev/null +++ b/AllinonSAM/prompt_adapted_segment_anything/utils/transforms.py @@ -0,0 +1,102 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import numpy as np +import torch +from torch.nn import functional as F +from torchvision.transforms.functional import resize, to_pil_image # type: ignore + +from copy import deepcopy +from typing import Tuple + + +class ResizeLongestSide: + """ + Resizes images to longest side 'target_length', as well as provides + methods for resizing coordinates and boxes. Provides methods for + transforming both numpy array and batched torch tensors. + """ + + def __init__(self, target_length: int) -> None: + self.target_length = target_length + + def apply_image(self, image: np.ndarray) -> np.ndarray: + """ + Expects a numpy array with shape HxWxC in uint8 format. + """ + target_size = self.get_preprocess_shape(image.shape[0], image.shape[1], self.target_length) + return np.array(resize(to_pil_image(image), target_size)) + + def apply_coords(self, coords: np.ndarray, original_size: Tuple[int, ...]) -> np.ndarray: + """ + Expects a numpy array of length 2 in the final dimension. Requires the + original image size in (H, W) format. + """ + old_h, old_w = original_size + new_h, new_w = self.get_preprocess_shape( + original_size[0], original_size[1], self.target_length + ) + coords = deepcopy(coords).astype(float) + coords[..., 0] = coords[..., 0] * (new_w / old_w) + coords[..., 1] = coords[..., 1] * (new_h / old_h) + return coords + + def apply_boxes(self, boxes: np.ndarray, original_size: Tuple[int, ...]) -> np.ndarray: + """ + Expects a numpy array shape Bx4. Requires the original image size + in (H, W) format. + """ + boxes = self.apply_coords(boxes.reshape(-1, 2, 2), original_size) + return boxes.reshape(-1, 4) + + def apply_image_torch(self, image: torch.Tensor) -> torch.Tensor: + """ + Expects batched images with shape BxCxHxW and float format. This + transformation may not exactly match apply_image. apply_image is + the transformation expected by the model. + """ + # Expects an image in BCHW format. May not exactly match apply_image. + target_size = self.get_preprocess_shape(image.shape[0], image.shape[1], self.target_length) + return F.interpolate( + image, target_size, mode="bilinear", align_corners=False, antialias=True + ) + + def apply_coords_torch( + self, coords: torch.Tensor, original_size: Tuple[int, ...] + ) -> torch.Tensor: + """ + Expects a torch tensor with length 2 in the last dimension. Requires the + original image size in (H, W) format. + """ + old_h, old_w = original_size + new_h, new_w = self.get_preprocess_shape( + original_size[0], original_size[1], self.target_length + ) + coords = deepcopy(coords).to(torch.float) + coords[..., 0] = coords[..., 0] * (new_w / old_w) + coords[..., 1] = coords[..., 1] * (new_h / old_h) + return coords + + def apply_boxes_torch( + self, boxes: torch.Tensor, original_size: Tuple[int, ...] + ) -> torch.Tensor: + """ + Expects a torch tensor with shape Bx4. Requires the original image + size in (H, W) format. + """ + boxes = self.apply_coords_torch(boxes.reshape(-1, 2, 2), original_size) + return boxes.reshape(-1, 4) + + @staticmethod + def get_preprocess_shape(oldh: int, oldw: int, long_side_length: int) -> Tuple[int, int]: + """ + Compute the output size given input size and target long side length. + """ + scale = long_side_length * 1.0 / max(oldh, oldw) + newh, neww = oldh * scale, oldw * scale + neww = int(neww + 0.5) + newh = int(newh + 0.5) + return (newh, neww) diff --git a/AllinonSAM/scratchpad.ipynb b/AllinonSAM/scratchpad.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..1a5c9ffb3b61c6184d75e45a2e765c28a2d6b960 --- /dev/null +++ b/AllinonSAM/scratchpad.ipynb @@ -0,0 +1,1034 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "import torch\n", + "import os\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "from PIL import Image\n" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "import matplotlib.pyplot as plt" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "im_path = \"/media/ubuntu/New Volume/jay/fundus_images/archive/REFUGE/val/Masks/V0001.png\"\n", + "img = torch.as_tensor(np.array(Image.open(im_path).convert(\"RGB\")))\n" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([0, 1, 2], dtype=uint8)" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "np.unique(img[:,:,0])" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "model_dict_path = \"./eval/cholec8k/svd_shiftscale_cholec_tal_focal075_1e-4.pth\"\n", + "model_dict = torch.load(model_dict_path, map_location='cpu')" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "keys = list(model_dict.keys())" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['sam_encoder.pos_embed',\n", + " 'sam_encoder.patch_embed.proj.weight',\n", + " 'sam_encoder.patch_embed.proj.bias',\n", + " 'sam_encoder.blocks.0.norm1.weight',\n", + " 'sam_encoder.blocks.0.norm1.bias',\n", + " 'sam_encoder.blocks.0.attn.qkv.weight',\n", + " 'sam_encoder.blocks.0.attn.qkv.bias',\n", + " 'sam_encoder.blocks.0.attn.qkv.trainable_scale',\n", + " 'sam_encoder.blocks.0.attn.qkv.trainable_shift',\n", + " 'sam_encoder.blocks.0.attn.proj.weight',\n", + " 'sam_encoder.blocks.0.attn.proj.bias',\n", + " 'sam_encoder.blocks.0.attn.proj.trainable_scale',\n", + " 'sam_encoder.blocks.0.attn.proj.trainable_shift',\n", + " 'sam_encoder.blocks.0.norm2.weight',\n", + " 'sam_encoder.blocks.0.norm2.bias',\n", + " 'sam_encoder.blocks.0.mlp.lin1.weight',\n", + " 'sam_encoder.blocks.0.mlp.lin1.bias',\n", + " 'sam_encoder.blocks.0.mlp.lin1.trainable_scale',\n", + " 'sam_encoder.blocks.0.mlp.lin1.trainable_shift',\n", + " 'sam_encoder.blocks.0.mlp.lin2.weight',\n", + " 'sam_encoder.blocks.0.mlp.lin2.bias',\n", + " 'sam_encoder.blocks.0.mlp.lin2.trainable_scale',\n", + " 'sam_encoder.blocks.0.mlp.lin2.trainable_shift',\n", + " 'sam_encoder.blocks.1.norm1.weight',\n", + " 'sam_encoder.blocks.1.norm1.bias',\n", + " 'sam_encoder.blocks.1.attn.qkv.weight',\n", + " 'sam_encoder.blocks.1.attn.qkv.bias',\n", + " 'sam_encoder.blocks.1.attn.qkv.trainable_scale',\n", + " 'sam_encoder.blocks.1.attn.qkv.trainable_shift',\n", + " 'sam_encoder.blocks.1.attn.proj.weight',\n", + " 'sam_encoder.blocks.1.attn.proj.bias',\n", + " 'sam_encoder.blocks.1.attn.proj.trainable_scale',\n", + " 'sam_encoder.blocks.1.attn.proj.trainable_shift',\n", + " 'sam_encoder.blocks.1.norm2.weight',\n", + " 'sam_encoder.blocks.1.norm2.bias',\n", + " 'sam_encoder.blocks.1.mlp.lin1.weight',\n", + " 'sam_encoder.blocks.1.mlp.lin1.bias',\n", + " 'sam_encoder.blocks.1.mlp.lin1.trainable_scale',\n", + " 'sam_encoder.blocks.1.mlp.lin1.trainable_shift',\n", + " 'sam_encoder.blocks.1.mlp.lin2.weight',\n", + " 'sam_encoder.blocks.1.mlp.lin2.bias',\n", + " 'sam_encoder.blocks.1.mlp.lin2.trainable_scale',\n", + " 'sam_encoder.blocks.1.mlp.lin2.trainable_shift',\n", + " 'sam_encoder.blocks.2.norm1.weight',\n", + " 'sam_encoder.blocks.2.norm1.bias',\n", + " 'sam_encoder.blocks.2.attn.qkv.weight',\n", + " 'sam_encoder.blocks.2.attn.qkv.bias',\n", + " 'sam_encoder.blocks.2.attn.qkv.trainable_scale',\n", + " 'sam_encoder.blocks.2.attn.qkv.trainable_shift',\n", + " 'sam_encoder.blocks.2.attn.proj.weight',\n", + " 'sam_encoder.blocks.2.attn.proj.bias',\n", + " 'sam_encoder.blocks.2.attn.proj.trainable_scale',\n", + " 'sam_encoder.blocks.2.attn.proj.trainable_shift',\n", + " 'sam_encoder.blocks.2.norm2.weight',\n", + " 'sam_encoder.blocks.2.norm2.bias',\n", + " 'sam_encoder.blocks.2.mlp.lin1.weight',\n", + " 'sam_encoder.blocks.2.mlp.lin1.bias',\n", + " 'sam_encoder.blocks.2.mlp.lin1.trainable_scale',\n", + " 'sam_encoder.blocks.2.mlp.lin1.trainable_shift',\n", + " 'sam_encoder.blocks.2.mlp.lin2.weight',\n", + " 'sam_encoder.blocks.2.mlp.lin2.bias',\n", + " 'sam_encoder.blocks.2.mlp.lin2.trainable_scale',\n", + " 'sam_encoder.blocks.2.mlp.lin2.trainable_shift',\n", + " 'sam_encoder.blocks.3.norm1.weight',\n", + " 'sam_encoder.blocks.3.norm1.bias',\n", + " 'sam_encoder.blocks.3.attn.qkv.weight',\n", + " 'sam_encoder.blocks.3.attn.qkv.bias',\n", + " 'sam_encoder.blocks.3.attn.qkv.trainable_scale',\n", + " 'sam_encoder.blocks.3.attn.qkv.trainable_shift',\n", + " 'sam_encoder.blocks.3.attn.proj.weight',\n", + " 'sam_encoder.blocks.3.attn.proj.bias',\n", + " 'sam_encoder.blocks.3.attn.proj.trainable_scale',\n", + " 'sam_encoder.blocks.3.attn.proj.trainable_shift',\n", + " 'sam_encoder.blocks.3.norm2.weight',\n", + " 'sam_encoder.blocks.3.norm2.bias',\n", + " 'sam_encoder.blocks.3.mlp.lin1.weight',\n", + " 'sam_encoder.blocks.3.mlp.lin1.bias',\n", + " 'sam_encoder.blocks.3.mlp.lin1.trainable_scale',\n", + " 'sam_encoder.blocks.3.mlp.lin1.trainable_shift',\n", + " 'sam_encoder.blocks.3.mlp.lin2.weight',\n", + " 'sam_encoder.blocks.3.mlp.lin2.bias',\n", + " 'sam_encoder.blocks.3.mlp.lin2.trainable_scale',\n", + " 'sam_encoder.blocks.3.mlp.lin2.trainable_shift',\n", + " 'sam_encoder.blocks.4.norm1.weight',\n", + " 'sam_encoder.blocks.4.norm1.bias',\n", + " 'sam_encoder.blocks.4.attn.qkv.weight',\n", + " 'sam_encoder.blocks.4.attn.qkv.bias',\n", + " 'sam_encoder.blocks.4.attn.qkv.trainable_scale',\n", + " 'sam_encoder.blocks.4.attn.qkv.trainable_shift',\n", + " 'sam_encoder.blocks.4.attn.proj.weight',\n", + " 'sam_encoder.blocks.4.attn.proj.bias',\n", + " 'sam_encoder.blocks.4.attn.proj.trainable_scale',\n", + " 'sam_encoder.blocks.4.attn.proj.trainable_shift',\n", + " 'sam_encoder.blocks.4.norm2.weight',\n", + " 'sam_encoder.blocks.4.norm2.bias',\n", + " 'sam_encoder.blocks.4.mlp.lin1.weight',\n", + " 'sam_encoder.blocks.4.mlp.lin1.bias',\n", + " 'sam_encoder.blocks.4.mlp.lin1.trainable_scale',\n", + " 'sam_encoder.blocks.4.mlp.lin1.trainable_shift',\n", + " 'sam_encoder.blocks.4.mlp.lin2.weight',\n", + " 'sam_encoder.blocks.4.mlp.lin2.bias',\n", + " 'sam_encoder.blocks.4.mlp.lin2.trainable_scale',\n", + " 'sam_encoder.blocks.4.mlp.lin2.trainable_shift',\n", + " 'sam_encoder.blocks.5.norm1.weight',\n", + " 'sam_encoder.blocks.5.norm1.bias',\n", + " 'sam_encoder.blocks.5.attn.qkv.weight',\n", + " 'sam_encoder.blocks.5.attn.qkv.bias',\n", + " 'sam_encoder.blocks.5.attn.qkv.trainable_scale',\n", + " 'sam_encoder.blocks.5.attn.qkv.trainable_shift',\n", + " 'sam_encoder.blocks.5.attn.proj.weight',\n", + " 'sam_encoder.blocks.5.attn.proj.bias',\n", + " 'sam_encoder.blocks.5.attn.proj.trainable_scale',\n", + " 'sam_encoder.blocks.5.attn.proj.trainable_shift',\n", + " 'sam_encoder.blocks.5.norm2.weight',\n", + " 'sam_encoder.blocks.5.norm2.bias',\n", + " 'sam_encoder.blocks.5.mlp.lin1.weight',\n", + " 'sam_encoder.blocks.5.mlp.lin1.bias',\n", + " 'sam_encoder.blocks.5.mlp.lin1.trainable_scale',\n", + " 'sam_encoder.blocks.5.mlp.lin1.trainable_shift',\n", + " 'sam_encoder.blocks.5.mlp.lin2.weight',\n", + " 'sam_encoder.blocks.5.mlp.lin2.bias',\n", + " 'sam_encoder.blocks.5.mlp.lin2.trainable_scale',\n", + " 'sam_encoder.blocks.5.mlp.lin2.trainable_shift',\n", + " 'sam_encoder.blocks.6.norm1.weight',\n", + " 'sam_encoder.blocks.6.norm1.bias',\n", + " 'sam_encoder.blocks.6.attn.qkv.weight',\n", + " 'sam_encoder.blocks.6.attn.qkv.bias',\n", + " 'sam_encoder.blocks.6.attn.qkv.trainable_scale',\n", + " 'sam_encoder.blocks.6.attn.qkv.trainable_shift',\n", + " 'sam_encoder.blocks.6.attn.proj.weight',\n", + " 'sam_encoder.blocks.6.attn.proj.bias',\n", + " 'sam_encoder.blocks.6.attn.proj.trainable_scale',\n", + " 'sam_encoder.blocks.6.attn.proj.trainable_shift',\n", + " 'sam_encoder.blocks.6.norm2.weight',\n", + " 'sam_encoder.blocks.6.norm2.bias',\n", + " 'sam_encoder.blocks.6.mlp.lin1.weight',\n", + " 'sam_encoder.blocks.6.mlp.lin1.bias',\n", + " 'sam_encoder.blocks.6.mlp.lin1.trainable_scale',\n", + " 'sam_encoder.blocks.6.mlp.lin1.trainable_shift',\n", + " 'sam_encoder.blocks.6.mlp.lin2.weight',\n", + " 'sam_encoder.blocks.6.mlp.lin2.bias',\n", + " 'sam_encoder.blocks.6.mlp.lin2.trainable_scale',\n", + " 'sam_encoder.blocks.6.mlp.lin2.trainable_shift',\n", + " 'sam_encoder.blocks.7.norm1.weight',\n", + " 'sam_encoder.blocks.7.norm1.bias',\n", + " 'sam_encoder.blocks.7.attn.qkv.weight',\n", + " 'sam_encoder.blocks.7.attn.qkv.bias',\n", + " 'sam_encoder.blocks.7.attn.qkv.trainable_scale',\n", + " 'sam_encoder.blocks.7.attn.qkv.trainable_shift',\n", + " 'sam_encoder.blocks.7.attn.proj.weight',\n", + " 'sam_encoder.blocks.7.attn.proj.bias',\n", + " 'sam_encoder.blocks.7.attn.proj.trainable_scale',\n", + " 'sam_encoder.blocks.7.attn.proj.trainable_shift',\n", + " 'sam_encoder.blocks.7.norm2.weight',\n", + " 'sam_encoder.blocks.7.norm2.bias',\n", + " 'sam_encoder.blocks.7.mlp.lin1.weight',\n", + " 'sam_encoder.blocks.7.mlp.lin1.bias',\n", + " 'sam_encoder.blocks.7.mlp.lin1.trainable_scale',\n", + " 'sam_encoder.blocks.7.mlp.lin1.trainable_shift',\n", + " 'sam_encoder.blocks.7.mlp.lin2.weight',\n", + " 'sam_encoder.blocks.7.mlp.lin2.bias',\n", + " 'sam_encoder.blocks.7.mlp.lin2.trainable_scale',\n", + " 'sam_encoder.blocks.7.mlp.lin2.trainable_shift',\n", + " 'sam_encoder.blocks.8.norm1.weight',\n", + " 'sam_encoder.blocks.8.norm1.bias',\n", + " 'sam_encoder.blocks.8.attn.qkv.weight',\n", + " 'sam_encoder.blocks.8.attn.qkv.bias',\n", + " 'sam_encoder.blocks.8.attn.qkv.trainable_scale',\n", + " 'sam_encoder.blocks.8.attn.qkv.trainable_shift',\n", + " 'sam_encoder.blocks.8.attn.proj.weight',\n", + " 'sam_encoder.blocks.8.attn.proj.bias',\n", + " 'sam_encoder.blocks.8.attn.proj.trainable_scale',\n", + " 'sam_encoder.blocks.8.attn.proj.trainable_shift',\n", + " 'sam_encoder.blocks.8.norm2.weight',\n", + " 'sam_encoder.blocks.8.norm2.bias',\n", + " 'sam_encoder.blocks.8.mlp.lin1.weight',\n", + " 'sam_encoder.blocks.8.mlp.lin1.bias',\n", + " 'sam_encoder.blocks.8.mlp.lin1.trainable_scale',\n", + " 'sam_encoder.blocks.8.mlp.lin1.trainable_shift',\n", + " 'sam_encoder.blocks.8.mlp.lin2.weight',\n", + " 'sam_encoder.blocks.8.mlp.lin2.bias',\n", + " 'sam_encoder.blocks.8.mlp.lin2.trainable_scale',\n", + " 'sam_encoder.blocks.8.mlp.lin2.trainable_shift',\n", + " 'sam_encoder.blocks.9.norm1.weight',\n", + " 'sam_encoder.blocks.9.norm1.bias',\n", + " 'sam_encoder.blocks.9.attn.qkv.weight',\n", + " 'sam_encoder.blocks.9.attn.qkv.bias',\n", + " 'sam_encoder.blocks.9.attn.qkv.trainable_scale',\n", + " 'sam_encoder.blocks.9.attn.qkv.trainable_shift',\n", + " 'sam_encoder.blocks.9.attn.proj.weight',\n", + " 'sam_encoder.blocks.9.attn.proj.bias',\n", + " 'sam_encoder.blocks.9.attn.proj.trainable_scale',\n", + " 'sam_encoder.blocks.9.attn.proj.trainable_shift',\n", + " 'sam_encoder.blocks.9.norm2.weight',\n", + " 'sam_encoder.blocks.9.norm2.bias',\n", + " 'sam_encoder.blocks.9.mlp.lin1.weight',\n", + " 'sam_encoder.blocks.9.mlp.lin1.bias',\n", + " 'sam_encoder.blocks.9.mlp.lin1.trainable_scale',\n", + " 'sam_encoder.blocks.9.mlp.lin1.trainable_shift',\n", + " 'sam_encoder.blocks.9.mlp.lin2.weight',\n", + " 'sam_encoder.blocks.9.mlp.lin2.bias',\n", + " 'sam_encoder.blocks.9.mlp.lin2.trainable_scale',\n", + " 'sam_encoder.blocks.9.mlp.lin2.trainable_shift',\n", + " 'sam_encoder.blocks.10.norm1.weight',\n", + " 'sam_encoder.blocks.10.norm1.bias',\n", + " 'sam_encoder.blocks.10.attn.qkv.weight',\n", + " 'sam_encoder.blocks.10.attn.qkv.bias',\n", + " 'sam_encoder.blocks.10.attn.qkv.trainable_scale',\n", + " 'sam_encoder.blocks.10.attn.qkv.trainable_shift',\n", + " 'sam_encoder.blocks.10.attn.proj.weight',\n", + " 'sam_encoder.blocks.10.attn.proj.bias',\n", + " 'sam_encoder.blocks.10.attn.proj.trainable_scale',\n", + " 'sam_encoder.blocks.10.attn.proj.trainable_shift',\n", + " 'sam_encoder.blocks.10.norm2.weight',\n", + " 'sam_encoder.blocks.10.norm2.bias',\n", + " 'sam_encoder.blocks.10.mlp.lin1.weight',\n", + " 'sam_encoder.blocks.10.mlp.lin1.bias',\n", + " 'sam_encoder.blocks.10.mlp.lin1.trainable_scale',\n", + " 'sam_encoder.blocks.10.mlp.lin1.trainable_shift',\n", + " 'sam_encoder.blocks.10.mlp.lin2.weight',\n", + " 'sam_encoder.blocks.10.mlp.lin2.bias',\n", + " 'sam_encoder.blocks.10.mlp.lin2.trainable_scale',\n", + " 'sam_encoder.blocks.10.mlp.lin2.trainable_shift',\n", + " 'sam_encoder.blocks.11.norm1.weight',\n", + " 'sam_encoder.blocks.11.norm1.bias',\n", + " 'sam_encoder.blocks.11.attn.qkv.weight',\n", + " 'sam_encoder.blocks.11.attn.qkv.bias',\n", + " 'sam_encoder.blocks.11.attn.qkv.trainable_scale',\n", + " 'sam_encoder.blocks.11.attn.qkv.trainable_shift',\n", + " 'sam_encoder.blocks.11.attn.proj.weight',\n", + " 'sam_encoder.blocks.11.attn.proj.bias',\n", + " 'sam_encoder.blocks.11.attn.proj.trainable_scale',\n", + " 'sam_encoder.blocks.11.attn.proj.trainable_shift',\n", + " 'sam_encoder.blocks.11.norm2.weight',\n", + " 'sam_encoder.blocks.11.norm2.bias',\n", + " 'sam_encoder.blocks.11.mlp.lin1.weight',\n", + " 'sam_encoder.blocks.11.mlp.lin1.bias',\n", + " 'sam_encoder.blocks.11.mlp.lin1.trainable_scale',\n", + " 'sam_encoder.blocks.11.mlp.lin1.trainable_shift',\n", + " 'sam_encoder.blocks.11.mlp.lin2.weight',\n", + " 'sam_encoder.blocks.11.mlp.lin2.bias',\n", + " 'sam_encoder.blocks.11.mlp.lin2.trainable_scale',\n", + " 'sam_encoder.blocks.11.mlp.lin2.trainable_shift',\n", + " 'sam_encoder.neck.0.weight',\n", + " 'sam_encoder.neck.0.trainable_scale',\n", + " 'sam_encoder.neck.0.trainable_shift',\n", + " 'sam_encoder.neck.1.weight',\n", + " 'sam_encoder.neck.1.bias',\n", + " 'sam_encoder.neck.2.weight',\n", + " 'sam_encoder.neck.2.trainable_scale',\n", + " 'sam_encoder.neck.2.trainable_shift',\n", + " 'sam_encoder.neck.3.weight',\n", + " 'sam_encoder.neck.3.bias',\n", + " 'clip_model.positional_embedding',\n", + " 'clip_model.text_projection',\n", + " 'clip_model.logit_scale',\n", + " 'clip_model.visual.class_embedding',\n", + " 'clip_model.visual.positional_embedding',\n", + " 'clip_model.visual.proj',\n", + " 'clip_model.visual.conv1.weight',\n", + " 'clip_model.visual.ln_pre.weight',\n", + " 'clip_model.visual.ln_pre.bias',\n", + " 'clip_model.visual.transformer.resblocks.0.attn.in_proj_weight',\n", + " 'clip_model.visual.transformer.resblocks.0.attn.in_proj_bias',\n", + " 'clip_model.visual.transformer.resblocks.0.attn.out_proj.weight',\n", + " 'clip_model.visual.transformer.resblocks.0.attn.out_proj.bias',\n", + " 'clip_model.visual.transformer.resblocks.0.ln_1.weight',\n", + " 'clip_model.visual.transformer.resblocks.0.ln_1.bias',\n", + " 'clip_model.visual.transformer.resblocks.0.mlp.c_fc.weight',\n", + " 'clip_model.visual.transformer.resblocks.0.mlp.c_fc.bias',\n", + " 'clip_model.visual.transformer.resblocks.0.mlp.c_proj.weight',\n", + " 'clip_model.visual.transformer.resblocks.0.mlp.c_proj.bias',\n", + " 'clip_model.visual.transformer.resblocks.0.ln_2.weight',\n", + " 'clip_model.visual.transformer.resblocks.0.ln_2.bias',\n", + " 'clip_model.visual.transformer.resblocks.1.attn.in_proj_weight',\n", + " 'clip_model.visual.transformer.resblocks.1.attn.in_proj_bias',\n", + " 'clip_model.visual.transformer.resblocks.1.attn.out_proj.weight',\n", + " 'clip_model.visual.transformer.resblocks.1.attn.out_proj.bias',\n", + " 'clip_model.visual.transformer.resblocks.1.ln_1.weight',\n", + " 'clip_model.visual.transformer.resblocks.1.ln_1.bias',\n", + " 'clip_model.visual.transformer.resblocks.1.mlp.c_fc.weight',\n", + " 'clip_model.visual.transformer.resblocks.1.mlp.c_fc.bias',\n", + " 'clip_model.visual.transformer.resblocks.1.mlp.c_proj.weight',\n", + " 'clip_model.visual.transformer.resblocks.1.mlp.c_proj.bias',\n", + " 'clip_model.visual.transformer.resblocks.1.ln_2.weight',\n", + " 'clip_model.visual.transformer.resblocks.1.ln_2.bias',\n", + " 'clip_model.visual.transformer.resblocks.2.attn.in_proj_weight',\n", + " 'clip_model.visual.transformer.resblocks.2.attn.in_proj_bias',\n", + " 'clip_model.visual.transformer.resblocks.2.attn.out_proj.weight',\n", + " 'clip_model.visual.transformer.resblocks.2.attn.out_proj.bias',\n", + " 'clip_model.visual.transformer.resblocks.2.ln_1.weight',\n", + " 'clip_model.visual.transformer.resblocks.2.ln_1.bias',\n", + " 'clip_model.visual.transformer.resblocks.2.mlp.c_fc.weight',\n", + " 'clip_model.visual.transformer.resblocks.2.mlp.c_fc.bias',\n", + " 'clip_model.visual.transformer.resblocks.2.mlp.c_proj.weight',\n", + " 'clip_model.visual.transformer.resblocks.2.mlp.c_proj.bias',\n", + " 'clip_model.visual.transformer.resblocks.2.ln_2.weight',\n", + " 'clip_model.visual.transformer.resblocks.2.ln_2.bias',\n", + " 'clip_model.visual.transformer.resblocks.3.attn.in_proj_weight',\n", + " 'clip_model.visual.transformer.resblocks.3.attn.in_proj_bias',\n", + " 'clip_model.visual.transformer.resblocks.3.attn.out_proj.weight',\n", + " 'clip_model.visual.transformer.resblocks.3.attn.out_proj.bias',\n", + " 'clip_model.visual.transformer.resblocks.3.ln_1.weight',\n", + " 'clip_model.visual.transformer.resblocks.3.ln_1.bias',\n", + " 'clip_model.visual.transformer.resblocks.3.mlp.c_fc.weight',\n", + " 'clip_model.visual.transformer.resblocks.3.mlp.c_fc.bias',\n", + " 'clip_model.visual.transformer.resblocks.3.mlp.c_proj.weight',\n", + " 'clip_model.visual.transformer.resblocks.3.mlp.c_proj.bias',\n", + " 'clip_model.visual.transformer.resblocks.3.ln_2.weight',\n", + " 'clip_model.visual.transformer.resblocks.3.ln_2.bias',\n", + " 'clip_model.visual.transformer.resblocks.4.attn.in_proj_weight',\n", + " 'clip_model.visual.transformer.resblocks.4.attn.in_proj_bias',\n", + " 'clip_model.visual.transformer.resblocks.4.attn.out_proj.weight',\n", + " 'clip_model.visual.transformer.resblocks.4.attn.out_proj.bias',\n", + " 'clip_model.visual.transformer.resblocks.4.ln_1.weight',\n", + " 'clip_model.visual.transformer.resblocks.4.ln_1.bias',\n", + " 'clip_model.visual.transformer.resblocks.4.mlp.c_fc.weight',\n", + " 'clip_model.visual.transformer.resblocks.4.mlp.c_fc.bias',\n", + " 'clip_model.visual.transformer.resblocks.4.mlp.c_proj.weight',\n", + " 'clip_model.visual.transformer.resblocks.4.mlp.c_proj.bias',\n", + " 'clip_model.visual.transformer.resblocks.4.ln_2.weight',\n", + " 'clip_model.visual.transformer.resblocks.4.ln_2.bias',\n", + " 'clip_model.visual.transformer.resblocks.5.attn.in_proj_weight',\n", + " 'clip_model.visual.transformer.resblocks.5.attn.in_proj_bias',\n", + " 'clip_model.visual.transformer.resblocks.5.attn.out_proj.weight',\n", + " 'clip_model.visual.transformer.resblocks.5.attn.out_proj.bias',\n", + " 'clip_model.visual.transformer.resblocks.5.ln_1.weight',\n", + " 'clip_model.visual.transformer.resblocks.5.ln_1.bias',\n", + " 'clip_model.visual.transformer.resblocks.5.mlp.c_fc.weight',\n", + " 'clip_model.visual.transformer.resblocks.5.mlp.c_fc.bias',\n", + " 'clip_model.visual.transformer.resblocks.5.mlp.c_proj.weight',\n", + " 'clip_model.visual.transformer.resblocks.5.mlp.c_proj.bias',\n", + " 'clip_model.visual.transformer.resblocks.5.ln_2.weight',\n", + " 'clip_model.visual.transformer.resblocks.5.ln_2.bias',\n", + " 'clip_model.visual.transformer.resblocks.6.attn.in_proj_weight',\n", + " 'clip_model.visual.transformer.resblocks.6.attn.in_proj_bias',\n", + " 'clip_model.visual.transformer.resblocks.6.attn.out_proj.weight',\n", + " 'clip_model.visual.transformer.resblocks.6.attn.out_proj.bias',\n", + " 'clip_model.visual.transformer.resblocks.6.ln_1.weight',\n", + " 'clip_model.visual.transformer.resblocks.6.ln_1.bias',\n", + " 'clip_model.visual.transformer.resblocks.6.mlp.c_fc.weight',\n", + " 'clip_model.visual.transformer.resblocks.6.mlp.c_fc.bias',\n", + " 'clip_model.visual.transformer.resblocks.6.mlp.c_proj.weight',\n", + " 'clip_model.visual.transformer.resblocks.6.mlp.c_proj.bias',\n", + " 'clip_model.visual.transformer.resblocks.6.ln_2.weight',\n", + " 'clip_model.visual.transformer.resblocks.6.ln_2.bias',\n", + " 'clip_model.visual.transformer.resblocks.7.attn.in_proj_weight',\n", + " 'clip_model.visual.transformer.resblocks.7.attn.in_proj_bias',\n", + " 'clip_model.visual.transformer.resblocks.7.attn.out_proj.weight',\n", + " 'clip_model.visual.transformer.resblocks.7.attn.out_proj.bias',\n", + " 'clip_model.visual.transformer.resblocks.7.ln_1.weight',\n", + " 'clip_model.visual.transformer.resblocks.7.ln_1.bias',\n", + " 'clip_model.visual.transformer.resblocks.7.mlp.c_fc.weight',\n", + " 'clip_model.visual.transformer.resblocks.7.mlp.c_fc.bias',\n", + " 'clip_model.visual.transformer.resblocks.7.mlp.c_proj.weight',\n", + " 'clip_model.visual.transformer.resblocks.7.mlp.c_proj.bias',\n", + " 'clip_model.visual.transformer.resblocks.7.ln_2.weight',\n", + " 'clip_model.visual.transformer.resblocks.7.ln_2.bias',\n", + " 'clip_model.visual.transformer.resblocks.8.attn.in_proj_weight',\n", + " 'clip_model.visual.transformer.resblocks.8.attn.in_proj_bias',\n", + " 'clip_model.visual.transformer.resblocks.8.attn.out_proj.weight',\n", + " 'clip_model.visual.transformer.resblocks.8.attn.out_proj.bias',\n", + " 'clip_model.visual.transformer.resblocks.8.ln_1.weight',\n", + " 'clip_model.visual.transformer.resblocks.8.ln_1.bias',\n", + " 'clip_model.visual.transformer.resblocks.8.mlp.c_fc.weight',\n", + " 'clip_model.visual.transformer.resblocks.8.mlp.c_fc.bias',\n", + " 'clip_model.visual.transformer.resblocks.8.mlp.c_proj.weight',\n", + " 'clip_model.visual.transformer.resblocks.8.mlp.c_proj.bias',\n", + " 'clip_model.visual.transformer.resblocks.8.ln_2.weight',\n", + " 'clip_model.visual.transformer.resblocks.8.ln_2.bias',\n", + " 'clip_model.visual.transformer.resblocks.9.attn.in_proj_weight',\n", + " 'clip_model.visual.transformer.resblocks.9.attn.in_proj_bias',\n", + " 'clip_model.visual.transformer.resblocks.9.attn.out_proj.weight',\n", + " 'clip_model.visual.transformer.resblocks.9.attn.out_proj.bias',\n", + " 'clip_model.visual.transformer.resblocks.9.ln_1.weight',\n", + " 'clip_model.visual.transformer.resblocks.9.ln_1.bias',\n", + " 'clip_model.visual.transformer.resblocks.9.mlp.c_fc.weight',\n", + " 'clip_model.visual.transformer.resblocks.9.mlp.c_fc.bias',\n", + " 'clip_model.visual.transformer.resblocks.9.mlp.c_proj.weight',\n", + " 'clip_model.visual.transformer.resblocks.9.mlp.c_proj.bias',\n", + " 'clip_model.visual.transformer.resblocks.9.ln_2.weight',\n", + " 'clip_model.visual.transformer.resblocks.9.ln_2.bias',\n", + " 'clip_model.visual.transformer.resblocks.10.attn.in_proj_weight',\n", + " 'clip_model.visual.transformer.resblocks.10.attn.in_proj_bias',\n", + " 'clip_model.visual.transformer.resblocks.10.attn.out_proj.weight',\n", + " 'clip_model.visual.transformer.resblocks.10.attn.out_proj.bias',\n", + " 'clip_model.visual.transformer.resblocks.10.ln_1.weight',\n", + " 'clip_model.visual.transformer.resblocks.10.ln_1.bias',\n", + " 'clip_model.visual.transformer.resblocks.10.mlp.c_fc.weight',\n", + " 'clip_model.visual.transformer.resblocks.10.mlp.c_fc.bias',\n", + " 'clip_model.visual.transformer.resblocks.10.mlp.c_proj.weight',\n", + " 'clip_model.visual.transformer.resblocks.10.mlp.c_proj.bias',\n", + " 'clip_model.visual.transformer.resblocks.10.ln_2.weight',\n", + " 'clip_model.visual.transformer.resblocks.10.ln_2.bias',\n", + " 'clip_model.visual.transformer.resblocks.11.attn.in_proj_weight',\n", + " 'clip_model.visual.transformer.resblocks.11.attn.in_proj_bias',\n", + " 'clip_model.visual.transformer.resblocks.11.attn.out_proj.weight',\n", + " 'clip_model.visual.transformer.resblocks.11.attn.out_proj.bias',\n", + " 'clip_model.visual.transformer.resblocks.11.ln_1.weight',\n", + " 'clip_model.visual.transformer.resblocks.11.ln_1.bias',\n", + " 'clip_model.visual.transformer.resblocks.11.mlp.c_fc.weight',\n", + " 'clip_model.visual.transformer.resblocks.11.mlp.c_fc.bias',\n", + " 'clip_model.visual.transformer.resblocks.11.mlp.c_proj.weight',\n", + " 'clip_model.visual.transformer.resblocks.11.mlp.c_proj.bias',\n", + " 'clip_model.visual.transformer.resblocks.11.ln_2.weight',\n", + " 'clip_model.visual.transformer.resblocks.11.ln_2.bias',\n", + " 'clip_model.visual.ln_post.weight',\n", + " 'clip_model.visual.ln_post.bias',\n", + " 'clip_model.transformer.resblocks.0.attn.in_proj_weight',\n", + " 'clip_model.transformer.resblocks.0.attn.in_proj_bias',\n", + " 'clip_model.transformer.resblocks.0.attn.out_proj.weight',\n", + " 'clip_model.transformer.resblocks.0.attn.out_proj.bias',\n", + " 'clip_model.transformer.resblocks.0.ln_1.weight',\n", + " 'clip_model.transformer.resblocks.0.ln_1.bias',\n", + " 'clip_model.transformer.resblocks.0.mlp.c_fc.weight',\n", + " 'clip_model.transformer.resblocks.0.mlp.c_fc.bias',\n", + " 'clip_model.transformer.resblocks.0.mlp.c_proj.weight',\n", + " 'clip_model.transformer.resblocks.0.mlp.c_proj.bias',\n", + " 'clip_model.transformer.resblocks.0.ln_2.weight',\n", + " 'clip_model.transformer.resblocks.0.ln_2.bias',\n", + " 'clip_model.transformer.resblocks.1.attn.in_proj_weight',\n", + " 'clip_model.transformer.resblocks.1.attn.in_proj_bias',\n", + " 'clip_model.transformer.resblocks.1.attn.out_proj.weight',\n", + " 'clip_model.transformer.resblocks.1.attn.out_proj.bias',\n", + " 'clip_model.transformer.resblocks.1.ln_1.weight',\n", + " 'clip_model.transformer.resblocks.1.ln_1.bias',\n", + " 'clip_model.transformer.resblocks.1.mlp.c_fc.weight',\n", + " 'clip_model.transformer.resblocks.1.mlp.c_fc.bias',\n", + " 'clip_model.transformer.resblocks.1.mlp.c_proj.weight',\n", + " 'clip_model.transformer.resblocks.1.mlp.c_proj.bias',\n", + " 'clip_model.transformer.resblocks.1.ln_2.weight',\n", + " 'clip_model.transformer.resblocks.1.ln_2.bias',\n", + " 'clip_model.transformer.resblocks.2.attn.in_proj_weight',\n", + " 'clip_model.transformer.resblocks.2.attn.in_proj_bias',\n", + " 'clip_model.transformer.resblocks.2.attn.out_proj.weight',\n", + " 'clip_model.transformer.resblocks.2.attn.out_proj.bias',\n", + " 'clip_model.transformer.resblocks.2.ln_1.weight',\n", + " 'clip_model.transformer.resblocks.2.ln_1.bias',\n", + " 'clip_model.transformer.resblocks.2.mlp.c_fc.weight',\n", + " 'clip_model.transformer.resblocks.2.mlp.c_fc.bias',\n", + " 'clip_model.transformer.resblocks.2.mlp.c_proj.weight',\n", + " 'clip_model.transformer.resblocks.2.mlp.c_proj.bias',\n", + " 'clip_model.transformer.resblocks.2.ln_2.weight',\n", + " 'clip_model.transformer.resblocks.2.ln_2.bias',\n", + " 'clip_model.transformer.resblocks.3.attn.in_proj_weight',\n", + " 'clip_model.transformer.resblocks.3.attn.in_proj_bias',\n", + " 'clip_model.transformer.resblocks.3.attn.out_proj.weight',\n", + " 'clip_model.transformer.resblocks.3.attn.out_proj.bias',\n", + " 'clip_model.transformer.resblocks.3.ln_1.weight',\n", + " 'clip_model.transformer.resblocks.3.ln_1.bias',\n", + " 'clip_model.transformer.resblocks.3.mlp.c_fc.weight',\n", + " 'clip_model.transformer.resblocks.3.mlp.c_fc.bias',\n", + " 'clip_model.transformer.resblocks.3.mlp.c_proj.weight',\n", + " 'clip_model.transformer.resblocks.3.mlp.c_proj.bias',\n", + " 'clip_model.transformer.resblocks.3.ln_2.weight',\n", + " 'clip_model.transformer.resblocks.3.ln_2.bias',\n", + " 'clip_model.transformer.resblocks.4.attn.in_proj_weight',\n", + " 'clip_model.transformer.resblocks.4.attn.in_proj_bias',\n", + " 'clip_model.transformer.resblocks.4.attn.out_proj.weight',\n", + " 'clip_model.transformer.resblocks.4.attn.out_proj.bias',\n", + " 'clip_model.transformer.resblocks.4.ln_1.weight',\n", + " 'clip_model.transformer.resblocks.4.ln_1.bias',\n", + " 'clip_model.transformer.resblocks.4.mlp.c_fc.weight',\n", + " 'clip_model.transformer.resblocks.4.mlp.c_fc.bias',\n", + " 'clip_model.transformer.resblocks.4.mlp.c_proj.weight',\n", + " 'clip_model.transformer.resblocks.4.mlp.c_proj.bias',\n", + " 'clip_model.transformer.resblocks.4.ln_2.weight',\n", + " 'clip_model.transformer.resblocks.4.ln_2.bias',\n", + " 'clip_model.transformer.resblocks.5.attn.in_proj_weight',\n", + " 'clip_model.transformer.resblocks.5.attn.in_proj_bias',\n", + " 'clip_model.transformer.resblocks.5.attn.out_proj.weight',\n", + " 'clip_model.transformer.resblocks.5.attn.out_proj.bias',\n", + " 'clip_model.transformer.resblocks.5.ln_1.weight',\n", + " 'clip_model.transformer.resblocks.5.ln_1.bias',\n", + " 'clip_model.transformer.resblocks.5.mlp.c_fc.weight',\n", + " 'clip_model.transformer.resblocks.5.mlp.c_fc.bias',\n", + " 'clip_model.transformer.resblocks.5.mlp.c_proj.weight',\n", + " 'clip_model.transformer.resblocks.5.mlp.c_proj.bias',\n", + " 'clip_model.transformer.resblocks.5.ln_2.weight',\n", + " 'clip_model.transformer.resblocks.5.ln_2.bias',\n", + " 'clip_model.transformer.resblocks.6.attn.in_proj_weight',\n", + " 'clip_model.transformer.resblocks.6.attn.in_proj_bias',\n", + " 'clip_model.transformer.resblocks.6.attn.out_proj.weight',\n", + " 'clip_model.transformer.resblocks.6.attn.out_proj.bias',\n", + " 'clip_model.transformer.resblocks.6.ln_1.weight',\n", + " 'clip_model.transformer.resblocks.6.ln_1.bias',\n", + " 'clip_model.transformer.resblocks.6.mlp.c_fc.weight',\n", + " 'clip_model.transformer.resblocks.6.mlp.c_fc.bias',\n", + " 'clip_model.transformer.resblocks.6.mlp.c_proj.weight',\n", + " 'clip_model.transformer.resblocks.6.mlp.c_proj.bias',\n", + " 'clip_model.transformer.resblocks.6.ln_2.weight',\n", + " 'clip_model.transformer.resblocks.6.ln_2.bias',\n", + " 'clip_model.transformer.resblocks.7.attn.in_proj_weight',\n", + " 'clip_model.transformer.resblocks.7.attn.in_proj_bias',\n", + " 'clip_model.transformer.resblocks.7.attn.out_proj.weight',\n", + " 'clip_model.transformer.resblocks.7.attn.out_proj.bias',\n", + " 'clip_model.transformer.resblocks.7.ln_1.weight',\n", + " 'clip_model.transformer.resblocks.7.ln_1.bias',\n", + " 'clip_model.transformer.resblocks.7.mlp.c_fc.weight',\n", + " 'clip_model.transformer.resblocks.7.mlp.c_fc.bias',\n", + " 'clip_model.transformer.resblocks.7.mlp.c_proj.weight',\n", + " 'clip_model.transformer.resblocks.7.mlp.c_proj.bias',\n", + " 'clip_model.transformer.resblocks.7.ln_2.weight',\n", + " 'clip_model.transformer.resblocks.7.ln_2.bias',\n", + " 'clip_model.transformer.resblocks.8.attn.in_proj_weight',\n", + " 'clip_model.transformer.resblocks.8.attn.in_proj_bias',\n", + " 'clip_model.transformer.resblocks.8.attn.out_proj.weight',\n", + " 'clip_model.transformer.resblocks.8.attn.out_proj.bias',\n", + " 'clip_model.transformer.resblocks.8.ln_1.weight',\n", + " 'clip_model.transformer.resblocks.8.ln_1.bias',\n", + " 'clip_model.transformer.resblocks.8.mlp.c_fc.weight',\n", + " 'clip_model.transformer.resblocks.8.mlp.c_fc.bias',\n", + " 'clip_model.transformer.resblocks.8.mlp.c_proj.weight',\n", + " 'clip_model.transformer.resblocks.8.mlp.c_proj.bias',\n", + " 'clip_model.transformer.resblocks.8.ln_2.weight',\n", + " 'clip_model.transformer.resblocks.8.ln_2.bias',\n", + " 'clip_model.transformer.resblocks.9.attn.in_proj_weight',\n", + " 'clip_model.transformer.resblocks.9.attn.in_proj_bias',\n", + " 'clip_model.transformer.resblocks.9.attn.out_proj.weight',\n", + " 'clip_model.transformer.resblocks.9.attn.out_proj.bias',\n", + " 'clip_model.transformer.resblocks.9.ln_1.weight',\n", + " 'clip_model.transformer.resblocks.9.ln_1.bias',\n", + " 'clip_model.transformer.resblocks.9.mlp.c_fc.weight',\n", + " 'clip_model.transformer.resblocks.9.mlp.c_fc.bias',\n", + " 'clip_model.transformer.resblocks.9.mlp.c_proj.weight',\n", + " 'clip_model.transformer.resblocks.9.mlp.c_proj.bias',\n", + " 'clip_model.transformer.resblocks.9.ln_2.weight',\n", + " 'clip_model.transformer.resblocks.9.ln_2.bias',\n", + " 'clip_model.transformer.resblocks.10.attn.in_proj_weight',\n", + " 'clip_model.transformer.resblocks.10.attn.in_proj_bias',\n", + " 'clip_model.transformer.resblocks.10.attn.out_proj.weight',\n", + " 'clip_model.transformer.resblocks.10.attn.out_proj.bias',\n", + " 'clip_model.transformer.resblocks.10.ln_1.weight',\n", + " 'clip_model.transformer.resblocks.10.ln_1.bias',\n", + " 'clip_model.transformer.resblocks.10.mlp.c_fc.weight',\n", + " 'clip_model.transformer.resblocks.10.mlp.c_fc.bias',\n", + " 'clip_model.transformer.resblocks.10.mlp.c_proj.weight',\n", + " 'clip_model.transformer.resblocks.10.mlp.c_proj.bias',\n", + " 'clip_model.transformer.resblocks.10.ln_2.weight',\n", + " 'clip_model.transformer.resblocks.10.ln_2.bias',\n", + " 'clip_model.transformer.resblocks.11.attn.in_proj_weight',\n", + " 'clip_model.transformer.resblocks.11.attn.in_proj_bias',\n", + " 'clip_model.transformer.resblocks.11.attn.out_proj.weight',\n", + " 'clip_model.transformer.resblocks.11.attn.out_proj.bias',\n", + " 'clip_model.transformer.resblocks.11.ln_1.weight',\n", + " 'clip_model.transformer.resblocks.11.ln_1.bias',\n", + " 'clip_model.transformer.resblocks.11.mlp.c_fc.weight',\n", + " 'clip_model.transformer.resblocks.11.mlp.c_fc.bias',\n", + " 'clip_model.transformer.resblocks.11.mlp.c_proj.weight',\n", + " 'clip_model.transformer.resblocks.11.mlp.c_proj.bias',\n", + " 'clip_model.transformer.resblocks.11.ln_2.weight',\n", + " 'clip_model.transformer.resblocks.11.ln_2.bias',\n", + " 'clip_model.token_embedding.weight',\n", + " 'clip_model.ln_final.weight',\n", + " 'clip_model.ln_final.bias',\n", + " 'prompt_encoder.pe_layer.positional_encoding_gaussian_matrix',\n", + " 'prompt_encoder.point_embeddings.0.weight',\n", + " 'prompt_encoder.point_embeddings.1.weight',\n", + " 'prompt_encoder.point_embeddings.2.weight',\n", + " 'prompt_encoder.point_embeddings.3.weight',\n", + " 'prompt_encoder.not_a_point_embed.weight',\n", + " 'prompt_encoder.mask_downscaling.0.weight',\n", + " 'prompt_encoder.mask_downscaling.0.bias',\n", + " 'prompt_encoder.mask_downscaling.1.weight',\n", + " 'prompt_encoder.mask_downscaling.1.bias',\n", + " 'prompt_encoder.mask_downscaling.3.weight',\n", + " 'prompt_encoder.mask_downscaling.3.bias',\n", + " 'prompt_encoder.mask_downscaling.4.weight',\n", + " 'prompt_encoder.mask_downscaling.4.bias',\n", + " 'prompt_encoder.mask_downscaling.6.weight',\n", + " 'prompt_encoder.mask_downscaling.6.bias',\n", + " 'prompt_encoder.no_mask_embed.weight',\n", + " 'mask_decoder.transformer.layers.0.self_attn.q_proj.weight',\n", + " 'mask_decoder.transformer.layers.0.self_attn.q_proj.bias',\n", + " 'mask_decoder.transformer.layers.0.self_attn.k_proj.weight',\n", + " 'mask_decoder.transformer.layers.0.self_attn.k_proj.bias',\n", + " 'mask_decoder.transformer.layers.0.self_attn.v_proj.weight',\n", + " 'mask_decoder.transformer.layers.0.self_attn.v_proj.bias',\n", + " 'mask_decoder.transformer.layers.0.self_attn.out_proj.weight',\n", + " 'mask_decoder.transformer.layers.0.self_attn.out_proj.bias',\n", + " 'mask_decoder.transformer.layers.0.norm1.weight',\n", + " 'mask_decoder.transformer.layers.0.norm1.bias',\n", + " 'mask_decoder.transformer.layers.0.cross_attn_token_to_image.q_proj.weight',\n", + " 'mask_decoder.transformer.layers.0.cross_attn_token_to_image.q_proj.bias',\n", + " 'mask_decoder.transformer.layers.0.cross_attn_token_to_image.k_proj.weight',\n", + " 'mask_decoder.transformer.layers.0.cross_attn_token_to_image.k_proj.bias',\n", + " 'mask_decoder.transformer.layers.0.cross_attn_token_to_image.v_proj.weight',\n", + " 'mask_decoder.transformer.layers.0.cross_attn_token_to_image.v_proj.bias',\n", + " 'mask_decoder.transformer.layers.0.cross_attn_token_to_image.out_proj.weight',\n", + " 'mask_decoder.transformer.layers.0.cross_attn_token_to_image.out_proj.bias',\n", + " 'mask_decoder.transformer.layers.0.norm2.weight',\n", + " 'mask_decoder.transformer.layers.0.norm2.bias',\n", + " 'mask_decoder.transformer.layers.0.mlp.lin1.weight',\n", + " 'mask_decoder.transformer.layers.0.mlp.lin1.bias',\n", + " 'mask_decoder.transformer.layers.0.mlp.lin1.trainable_scale',\n", + " 'mask_decoder.transformer.layers.0.mlp.lin1.trainable_shift',\n", + " 'mask_decoder.transformer.layers.0.mlp.lin2.weight',\n", + " 'mask_decoder.transformer.layers.0.mlp.lin2.bias',\n", + " 'mask_decoder.transformer.layers.0.mlp.lin2.trainable_scale',\n", + " 'mask_decoder.transformer.layers.0.mlp.lin2.trainable_shift',\n", + " 'mask_decoder.transformer.layers.0.norm3.weight',\n", + " 'mask_decoder.transformer.layers.0.norm3.bias',\n", + " 'mask_decoder.transformer.layers.0.norm4.weight',\n", + " 'mask_decoder.transformer.layers.0.norm4.bias',\n", + " 'mask_decoder.transformer.layers.0.cross_attn_image_to_token.q_proj.weight',\n", + " 'mask_decoder.transformer.layers.0.cross_attn_image_to_token.q_proj.bias',\n", + " 'mask_decoder.transformer.layers.0.cross_attn_image_to_token.k_proj.weight',\n", + " 'mask_decoder.transformer.layers.0.cross_attn_image_to_token.k_proj.bias',\n", + " 'mask_decoder.transformer.layers.0.cross_attn_image_to_token.v_proj.weight',\n", + " 'mask_decoder.transformer.layers.0.cross_attn_image_to_token.v_proj.bias',\n", + " 'mask_decoder.transformer.layers.0.cross_attn_image_to_token.out_proj.weight',\n", + " 'mask_decoder.transformer.layers.0.cross_attn_image_to_token.out_proj.bias',\n", + " 'mask_decoder.transformer.layers.1.self_attn.q_proj.weight',\n", + " 'mask_decoder.transformer.layers.1.self_attn.q_proj.bias',\n", + " 'mask_decoder.transformer.layers.1.self_attn.k_proj.weight',\n", + " 'mask_decoder.transformer.layers.1.self_attn.k_proj.bias',\n", + " 'mask_decoder.transformer.layers.1.self_attn.v_proj.weight',\n", + " 'mask_decoder.transformer.layers.1.self_attn.v_proj.bias',\n", + " 'mask_decoder.transformer.layers.1.self_attn.out_proj.weight',\n", + " 'mask_decoder.transformer.layers.1.self_attn.out_proj.bias',\n", + " 'mask_decoder.transformer.layers.1.norm1.weight',\n", + " 'mask_decoder.transformer.layers.1.norm1.bias',\n", + " 'mask_decoder.transformer.layers.1.cross_attn_token_to_image.q_proj.weight',\n", + " 'mask_decoder.transformer.layers.1.cross_attn_token_to_image.q_proj.bias',\n", + " 'mask_decoder.transformer.layers.1.cross_attn_token_to_image.k_proj.weight',\n", + " 'mask_decoder.transformer.layers.1.cross_attn_token_to_image.k_proj.bias',\n", + " 'mask_decoder.transformer.layers.1.cross_attn_token_to_image.v_proj.weight',\n", + " 'mask_decoder.transformer.layers.1.cross_attn_token_to_image.v_proj.bias',\n", + " 'mask_decoder.transformer.layers.1.cross_attn_token_to_image.out_proj.weight',\n", + " 'mask_decoder.transformer.layers.1.cross_attn_token_to_image.out_proj.bias',\n", + " 'mask_decoder.transformer.layers.1.norm2.weight',\n", + " 'mask_decoder.transformer.layers.1.norm2.bias',\n", + " 'mask_decoder.transformer.layers.1.mlp.lin1.weight',\n", + " 'mask_decoder.transformer.layers.1.mlp.lin1.bias',\n", + " 'mask_decoder.transformer.layers.1.mlp.lin1.trainable_scale',\n", + " 'mask_decoder.transformer.layers.1.mlp.lin1.trainable_shift',\n", + " 'mask_decoder.transformer.layers.1.mlp.lin2.weight',\n", + " 'mask_decoder.transformer.layers.1.mlp.lin2.bias',\n", + " 'mask_decoder.transformer.layers.1.mlp.lin2.trainable_scale',\n", + " 'mask_decoder.transformer.layers.1.mlp.lin2.trainable_shift',\n", + " 'mask_decoder.transformer.layers.1.norm3.weight',\n", + " 'mask_decoder.transformer.layers.1.norm3.bias',\n", + " 'mask_decoder.transformer.layers.1.norm4.weight',\n", + " 'mask_decoder.transformer.layers.1.norm4.bias',\n", + " 'mask_decoder.transformer.layers.1.cross_attn_image_to_token.q_proj.weight',\n", + " 'mask_decoder.transformer.layers.1.cross_attn_image_to_token.q_proj.bias',\n", + " 'mask_decoder.transformer.layers.1.cross_attn_image_to_token.k_proj.weight',\n", + " 'mask_decoder.transformer.layers.1.cross_attn_image_to_token.k_proj.bias',\n", + " 'mask_decoder.transformer.layers.1.cross_attn_image_to_token.v_proj.weight',\n", + " 'mask_decoder.transformer.layers.1.cross_attn_image_to_token.v_proj.bias',\n", + " 'mask_decoder.transformer.layers.1.cross_attn_image_to_token.out_proj.weight',\n", + " 'mask_decoder.transformer.layers.1.cross_attn_image_to_token.out_proj.bias',\n", + " 'mask_decoder.transformer.final_attn_token_to_image.q_proj.weight',\n", + " 'mask_decoder.transformer.final_attn_token_to_image.q_proj.bias',\n", + " 'mask_decoder.transformer.final_attn_token_to_image.k_proj.weight',\n", + " 'mask_decoder.transformer.final_attn_token_to_image.k_proj.bias',\n", + " 'mask_decoder.transformer.final_attn_token_to_image.v_proj.weight',\n", + " 'mask_decoder.transformer.final_attn_token_to_image.v_proj.bias',\n", + " 'mask_decoder.transformer.final_attn_token_to_image.out_proj.weight',\n", + " 'mask_decoder.transformer.final_attn_token_to_image.out_proj.bias',\n", + " 'mask_decoder.transformer.norm_final_attn.weight',\n", + " 'mask_decoder.transformer.norm_final_attn.bias',\n", + " 'mask_decoder.iou_token.weight',\n", + " 'mask_decoder.mask_tokens.weight',\n", + " 'mask_decoder.output_upscaling.0.weight',\n", + " 'mask_decoder.output_upscaling.0.bias',\n", + " 'mask_decoder.output_upscaling.1.weight',\n", + " 'mask_decoder.output_upscaling.1.bias',\n", + " 'mask_decoder.output_upscaling.3.weight',\n", + " 'mask_decoder.output_upscaling.3.bias',\n", + " 'mask_decoder.output_hypernetworks_mlps.0.layers.0.weight',\n", + " 'mask_decoder.output_hypernetworks_mlps.0.layers.0.bias',\n", + " 'mask_decoder.output_hypernetworks_mlps.0.layers.1.weight',\n", + " 'mask_decoder.output_hypernetworks_mlps.0.layers.1.bias',\n", + " 'mask_decoder.output_hypernetworks_mlps.0.layers.2.weight',\n", + " 'mask_decoder.output_hypernetworks_mlps.0.layers.2.bias',\n", + " 'mask_decoder.output_hypernetworks_mlps.1.layers.0.weight',\n", + " 'mask_decoder.output_hypernetworks_mlps.1.layers.0.bias',\n", + " 'mask_decoder.output_hypernetworks_mlps.1.layers.1.weight',\n", + " 'mask_decoder.output_hypernetworks_mlps.1.layers.1.bias',\n", + " 'mask_decoder.output_hypernetworks_mlps.1.layers.2.weight',\n", + " 'mask_decoder.output_hypernetworks_mlps.1.layers.2.bias',\n", + " 'mask_decoder.output_hypernetworks_mlps.2.layers.0.weight',\n", + " 'mask_decoder.output_hypernetworks_mlps.2.layers.0.bias',\n", + " 'mask_decoder.output_hypernetworks_mlps.2.layers.1.weight',\n", + " 'mask_decoder.output_hypernetworks_mlps.2.layers.1.bias',\n", + " 'mask_decoder.output_hypernetworks_mlps.2.layers.2.weight',\n", + " 'mask_decoder.output_hypernetworks_mlps.2.layers.2.bias',\n", + " 'mask_decoder.output_hypernetworks_mlps.3.layers.0.weight',\n", + " 'mask_decoder.output_hypernetworks_mlps.3.layers.0.bias',\n", + " 'mask_decoder.output_hypernetworks_mlps.3.layers.1.weight',\n", + " 'mask_decoder.output_hypernetworks_mlps.3.layers.1.bias',\n", + " 'mask_decoder.output_hypernetworks_mlps.3.layers.2.weight',\n", + " 'mask_decoder.output_hypernetworks_mlps.3.layers.2.bias',\n", + " 'mask_decoder.iou_prediction_head.layers.0.weight',\n", + " 'mask_decoder.iou_prediction_head.layers.0.bias',\n", + " 'mask_decoder.iou_prediction_head.layers.1.weight',\n", + " 'mask_decoder.iou_prediction_head.layers.1.bias',\n", + " 'mask_decoder.iou_prediction_head.layers.2.weight',\n", + " 'mask_decoder.iou_prediction_head.layers.2.bias',\n", + " 'Text_Embedding_Affine.0.weight',\n", + " 'Text_Embedding_Affine.0.bias',\n", + " 'Text_Embedding_Affine.2.weight',\n", + " 'Text_Embedding_Affine.2.bias',\n", + " 'Text_Embedding_Affine.2.running_mean',\n", + " 'Text_Embedding_Affine.2.running_var',\n", + " 'Text_Embedding_Affine.2.num_batches_tracked']" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "keys" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "def basic_stats(key):\n", + " print('min: ', model_dict[key].min())\n", + " print('max: ', model_dict[key].max())\n", + " print('mean: ', model_dict[key].mean())\n", + " print('norm: ', np.linalg.norm(model_dict[key].numpy()))\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "min: tensor(0.)\n", + "max: tensor(0.)\n", + "mean: tensor(0.)\n", + "norm: 0.0\n" + ] + } + ], + "source": [ + "basic_stats('mask_decoder.transformer.layers.0.mlp.lin1.trainable_shift')" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'a.b.c.d'" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "a = \"a.b.c.d\"\n", + "a.replace(\"c\",\"e\")\n", + "a" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "import nibabel as nib\n", + "import numpy as np\n", + "import os" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['case0005_slice018.npz',\n", + " 'case0027_slice043.npz',\n", + " 'case0027_slice044.npz',\n", + " 'case0027_slice045.npz',\n", + " 'case0027_slice046.npz',\n", + " 'case0027_slice047.npz',\n", + " 'case0027_slice048.npz',\n", + " 'case0027_slice049.npz',\n", + " 'case0027_slice050.npz',\n", + " 'case0027_slice051.npz',\n", + " 'case0027_slice052.npz',\n", + " 'case0027_slice053.npz',\n", + " 'case0027_slice054.npz',\n", + " 'case0027_slice055.npz',\n", + " 'case0027_slice056.npz',\n", + " 'case0027_slice057.npz',\n", + " 'case0027_slice058.npz',\n", + " 'case0027_slice059.npz',\n", + " 'case0027_slice060.npz',\n", + " 'case0027_slice061.npz',\n", + " 'case0027_slice062.npz',\n", + " 'case0027_slice063.npz',\n", + " 'case0027_slice064.npz',\n", + " 'case0027_slice065.npz',\n", + " 'case0027_slice066.npz',\n", + " 'case0027_slice067.npz',\n", + " 'case0027_slice068.npz',\n", + " 'case0010_slice080.npz',\n", + " 'case0010_slice081.npz',\n", + " 'case0010_slice082.npz',\n", + " 'case0010_slice083.npz',\n", + " 'case0010_slice084.npz']" + ] + }, + "execution_count": 18, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "os.listdir('/media/ubuntu/New Volume/jay/BTCV/train_npz')[18:50]" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "# im = nib.load('/media/ubuntu/New Volume/jay/LiTS/images/1.nii')\n", + "im = np.load('/media/ubuntu/New Volume/jay/BTCV/train_npz/case0005_slice000.npz')" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "(512, 512)\n" + ] + } + ], + "source": [ + "list(im.keys())\n", + "print(im['image'].shape)" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import torch" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(tensor([0, 0, 1, 1]), tensor([0, 4, 3, 4]))" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "a = torch.Tensor([\n", + " [1,0,0,0,1],\n", + " [0,0,0,1,1]\n", + "])\n", + "torch.where(a==1)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "tensor([[315., 581.],\n", + " [339., 340.]])\n" + ] + } + ], + "source": [ + "a = [[torch.tensor(315), torch.tensor(581)], [torch.tensor(339), torch.tensor(340)]]\n", + "print(torch.Tensor(a))" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3.8.16 ('dassl')", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.16" + }, + "orig_nbformat": 4, + "vscode": { + "interpreter": { + "hash": "5b0d24c0401191df5ff06ef3cb04a21077c1fd7ca08d243336ea8a8a1206ff02" + } + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/AllinonSAM/slurm-44737.out b/AllinonSAM/slurm-44737.out new file mode 100644 index 0000000000000000000000000000000000000000..097c8397996369bead8c0798355a68c94682addd --- /dev/null +++ b/AllinonSAM/slurm-44737.out @@ -0,0 +1,92083 @@ +Fri Oct 18 16:19:28 2024 ++---------------------------------------------------------------------------------------+ +| NVIDIA-SMI 535.104.05 Driver Version: 535.104.05 CUDA Version: 12.2 | +|-----------------------------------------+----------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+======================+======================| +| 0 NVIDIA GeForce RTX 4090 On | 00000000:41:00.0 Off | Off | +| 0% 43C P8 20W / 450W | 7186MiB / 24564MiB | 0% Default | +| | | N/A | ++-----------------------------------------+----------------------+----------------------+ + ++---------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=======================================================================================| +| 0 N/A N/A 1743205 G /usr/lib/xorg/Xorg 38MiB | +| 0 N/A N/A 1952449 C .../envs/vlm-hallucinations/bin/python 7132MiB | ++---------------------------------------------------------------------------------------+ +Using cache found in /home/abdelrahman.elsayed/.cache/torch/hub/mateuszbuda_brain-segmentation-pytorch_master +wandb: Currently logged in as: abdelrahman-elsayed (dinesh_saggurthi). Use `wandb login --relogin` to force relogin +wandb: wandb version 0.18.5 is available! To upgrade, please run: +wandb: $ pip install wandb --upgrade +wandb: Tracking run with wandb version 0.14.0 +wandb: Run data is saved locally in /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_162125-i4stmvih +wandb: Run `wandb offline` to turn off syncing. +wandb: Syncing run DIAS_model_Prompt Adapted SAM +wandb: ⭐️ View project at https://wandb.ai/dinesh_saggurthi/Baselines_exp +wandb: 🚀 View run at https://wandb.ai/dinesh_saggurthi/Baselines_exp/runs/i4stmvih +HERE +Train dataset size: 20 +Val dataset size: 10 +Train dataset size: 20 +Val dataset size: 10 +number of trainable parameters: 7763041 +Total parameters: 7,763,041 +Trainable parameters: 7,763,041 +Frozen parameters: 0 + +Parameters by module: +************************************************************************************************************* + model: + Total: 7,763,041 + Trainable: 7,763,041 + Frozen: 0 +******************************************************************************************* +************************************************************************************************************* + soft: + Total: 0 + Trainable: 0 + Frozen: 0 +******************************************************************************************* +./biastuning/DIAS +Training parameters: +---------- +number of trainable parameters: 7763041 +batch size: 5 +num epochs: 500 +Epoch 0/499 +---------- + train Epoch 0: 0%| | 0/4 [00:00 + main_train(data_config, model_config, args.pretrained_path, args.save_path, args.training_strategy, device=args.device) + File "/home/abdelrahman.elsayed/sarim_code/train_baselines.py", line 234, in main_train + model = train_dl( + File "/home/abdelrahman.elsayed/sarim_code/train.py", line 217, in train_dl + outputs, reg_loss = model(inputs, text) + File "/home/abdelrahman.elsayed/.conda/envs/s-sam/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl + return forward_call(*args, **kwargs) + File "/home/abdelrahman.elsayed/sarim_code/axialnet.py", line 711, in forward + return self.soft(self._forward_impl(x)),0 + File "/home/abdelrahman.elsayed/sarim_code/axialnet.py", line 636, in _forward_impl + x1 = self.layer1(x) + File "/home/abdelrahman.elsayed/.conda/envs/s-sam/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl + return forward_call(*args, **kwargs) + File "/home/abdelrahman.elsayed/.conda/envs/s-sam/lib/python3.8/site-packages/torch/nn/modules/container.py", line 217, in forward + input = module(input) + File "/home/abdelrahman.elsayed/.conda/envs/s-sam/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl + return forward_call(*args, **kwargs) + File "/home/abdelrahman.elsayed/sarim_code/axialnet.py", line 331, in forward + out = self.hight_block(out) + File "/home/abdelrahman.elsayed/.conda/envs/s-sam/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl + return forward_call(*args, **kwargs) + File "/home/abdelrahman.elsayed/sarim_code/axialnet.py", line 167, in forward + stacked_similarity = self.bn_similarity(stacked_similarity).view(N * W, 3, self.groups, H, H).sum(dim=1) +torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 2.50 GiB (GPU 0; 23.65 GiB total capacity; 23.03 GiB already allocated; 59.06 MiB free; 23.12 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF diff --git a/AllinonSAM/slurm-44777.out b/AllinonSAM/slurm-44777.out new file mode 100644 index 0000000000000000000000000000000000000000..6b6734718d32b0260fa34d33276eb8d60106deb6 --- /dev/null +++ b/AllinonSAM/slurm-44777.out @@ -0,0 +1,384 @@ +Fri Oct 18 21:20:13 2024 ++---------------------------------------------------------------------------------------+ +| NVIDIA-SMI 535.104.05 Driver Version: 535.104.05 CUDA Version: 12.2 | +|-----------------------------------------+----------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+======================+======================| +| 0 NVIDIA GeForce RTX 4090 On | 00000000:41:00.0 Off | Off | +| 0% 43C P8 24W / 450W | 13MiB / 24564MiB | 0% Default | +| | | N/A | ++-----------------------------------------+----------------------+----------------------+ + ++---------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=======================================================================================| +| 0 N/A N/A 6780 G /usr/lib/xorg/Xorg 4MiB | ++---------------------------------------------------------------------------------------+ +wandb: Currently logged in as: abdelrahman-elsayed (dinesh_saggurthi). Use `wandb login --relogin` to force relogin +wandb: - Waiting for wandb.init()... wandb: \ Waiting for wandb.init()... wandb: wandb version 0.18.5 is available! To upgrade, please run: +wandb: $ pip install wandb --upgrade +wandb: Tracking run with wandb version 0.14.0 +wandb: Run data is saved locally in /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212018-p8w61ip6 +wandb: Run `wandb offline` to turn off syncing. +wandb: Syncing run DIAS_model_MedT +wandb: ⭐️ View project at https://wandb.ai/dinesh_saggurthi/Baselines_exp +wandb: 🚀 View run at https://wandb.ai/dinesh_saggurthi/Baselines_exp/runs/p8w61ip6 +HERE +Train dataset size: 20 +Val dataset size: 10 +Train dataset size: 20 +Val dataset size: 10 +number of trainable parameters: 1572370 +Total parameters: 1,572,394 +Trainable parameters: 1,572,370 +Frozen parameters: 24 + +Parameters by module: +************************************************************************************************************* + conv1: + Total: 1,176 + Trainable: 1,176 + Frozen: 0 +******************************************************************************************* +************************************************************************************************************* + conv2: + Total: 9,216 + Trainable: 9,216 + Frozen: 0 +******************************************************************************************* +************************************************************************************************************* + conv3: + Total: 9,216 + Trainable: 9,216 + Frozen: 0 +******************************************************************************************* +************************************************************************************************************* + bn1: + Total: 16 + Trainable: 16 + Frozen: 0 +******************************************************************************************* +************************************************************************************************************* + bn2: + Total: 256 + Trainable: 256 + Frozen: 0 +******************************************************************************************* +************************************************************************************************************* + bn3: + Total: 16 + Trainable: 16 + Frozen: 0 +******************************************************************************************* +************************************************************************************************************* + relu: + Total: 0 + Trainable: 0 + Frozen: 0 +******************************************************************************************* +************************************************************************************************************* + layer1: + Total: 6,528 + Trainable: 6,520 + Frozen: 8 +******************************************************************************************* +************************************************************************************************************* + layer2: + Total: 31,408 + Trainable: 31,392 + Frozen: 16 +******************************************************************************************* +************************************************************************************************************* + decoder4: + Total: 18,464 + Trainable: 18,464 + Frozen: 0 +******************************************************************************************* +************************************************************************************************************* + decoder5: + Total: 4,624 + Trainable: 4,624 + Frozen: 0 +******************************************************************************************* +************************************************************************************************************* + adjust: + Total: 17 + Trainable: 17 + Frozen: 0 +******************************************************************************************* +************************************************************************************************************* + soft: + Total: 0 + Trainable: 0 + Frozen: 0 +******************************************************************************************* +************************************************************************************************************* + conv1_p: + Total: 9,408 + Trainable: 9,408 + Frozen: 0 +******************************************************************************************* +************************************************************************************************************* + conv2_p: + Total: 73,728 + Trainable: 73,728 + Frozen: 0 +******************************************************************************************* +************************************************************************************************************* + conv3_p: + Total: 73,728 + Trainable: 73,728 + Frozen: 0 +******************************************************************************************* +************************************************************************************************************* + bn1_p: + Total: 128 + Trainable: 128 + Frozen: 0 +******************************************************************************************* +************************************************************************************************************* + bn2_p: + Total: 256 + Trainable: 256 + Frozen: 0 +******************************************************************************************* +************************************************************************************************************* + bn3_p: + Total: 128 + Trainable: 128 + Frozen: 0 +******************************************************************************************* +************************************************************************************************************* + relu_p: + Total: 0 + Trainable: 0 + Frozen: 0 +******************************************************************************************* +************************************************************************************************************* + layer1_p: + Total: 5,264 + Trainable: 5,264 + Frozen: 0 +******************************************************************************************* +************************************************************************************************************* + layer2_p: + Total: 20,864 + Trainable: 20,864 + Frozen: 0 +******************************************************************************************* +************************************************************************************************************* + layer3_p: + Total: 156,800 + Trainable: 156,800 + Frozen: 0 +******************************************************************************************* +************************************************************************************************************* + layer4_p: + Total: 166,816 + Trainable: 166,816 + Frozen: 0 +******************************************************************************************* +************************************************************************************************************* + decoder1_p: + Total: 590,080 + Trainable: 590,080 + Frozen: 0 +******************************************************************************************* +************************************************************************************************************* + decoder2_p: + Total: 295,040 + Trainable: 295,040 + Frozen: 0 +******************************************************************************************* +************************************************************************************************************* + decoder3_p: + Total: 73,792 + Trainable: 73,792 + Frozen: 0 +******************************************************************************************* +************************************************************************************************************* + decoder4_p: + Total: 18,464 + Trainable: 18,464 + Frozen: 0 +******************************************************************************************* +************************************************************************************************************* + decoder5_p: + Total: 4,624 + Trainable: 4,624 + Frozen: 0 +******************************************************************************************* +************************************************************************************************************* + decoderf: + Total: 2,320 + Trainable: 2,320 + Frozen: 0 +******************************************************************************************* +************************************************************************************************************* + adjust_p: + Total: 17 + Trainable: 17 + Frozen: 0 +******************************************************************************************* +************************************************************************************************************* + soft_p: + Total: 0 + Trainable: 0 + Frozen: 0 +******************************************************************************************* +./biastuning/DIAS +Training parameters: +---------- +number of trainable parameters: 1572370 +batch size: 2 +num epochs: 500 +Epoch 0/499 +---------- + train Epoch 0: 0%| | 0/10 [00:00 + main_train(data_config, model_config, args.pretrained_path, args.save_path, args.training_strategy, device=args.device) + File "/home/abdelrahman.elsayed/sarim_code/train_baselines.py", line 234, in main_train + model = train_dl( + File "/home/abdelrahman.elsayed/sarim_code/train.py", line 218, in train_dl + outputs, reg_loss = model(inputs, text) + File "/home/abdelrahman.elsayed/.conda/envs/s-sam/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl + return forward_call(*args, **kwargs) + File "/home/abdelrahman.elsayed/sarim_code/axialnet.py", line 711, in forward + return self.soft(self._forward_impl(x)),0 + File "/home/abdelrahman.elsayed/sarim_code/axialnet.py", line 638, in _forward_impl + x2 = self.layer2(x1) + File "/home/abdelrahman.elsayed/.conda/envs/s-sam/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl + return forward_call(*args, **kwargs) + File "/home/abdelrahman.elsayed/.conda/envs/s-sam/lib/python3.8/site-packages/torch/nn/modules/container.py", line 217, in forward + input = module(input) + File "/home/abdelrahman.elsayed/.conda/envs/s-sam/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl + return forward_call(*args, **kwargs) + File "/home/abdelrahman.elsayed/sarim_code/axialnet.py", line 332, in forward + out = self.width_block(out) + File "/home/abdelrahman.elsayed/.conda/envs/s-sam/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl + return forward_call(*args, **kwargs) + File "/home/abdelrahman.elsayed/sarim_code/axialnet.py", line 167, in forward + stacked_similarity = self.bn_similarity(stacked_similarity).view(N * W, 3, self.groups, H, H).sum(dim=1) + File "/home/abdelrahman.elsayed/.conda/envs/s-sam/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl + return forward_call(*args, **kwargs) + File "/home/abdelrahman.elsayed/.conda/envs/s-sam/lib/python3.8/site-packages/torch/nn/modules/batchnorm.py", line 171, in forward + return F.batch_norm( + File "/home/abdelrahman.elsayed/.conda/envs/s-sam/lib/python3.8/site-packages/torch/nn/functional.py", line 2450, in batch_norm + return torch.batch_norm( +torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 3.00 GiB (GPU 0; 23.65 GiB total capacity; 18.59 GiB already allocated; 1.82 GiB free; 21.35 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF diff --git a/AllinonSAM/slurm-44778.out b/AllinonSAM/slurm-44778.out new file mode 100644 index 0000000000000000000000000000000000000000..90324b6de97280328f4beec7e875b4453aeab393 --- /dev/null +++ b/AllinonSAM/slurm-44778.out @@ -0,0 +1,204 @@ +Fri Oct 18 21:21:40 2024 ++---------------------------------------------------------------------------------------+ +| NVIDIA-SMI 535.104.05 Driver Version: 535.104.05 CUDA Version: 12.2 | +|-----------------------------------------+----------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+======================+======================| +| 0 NVIDIA GeForce RTX 4090 On | 00000000:41:00.0 Off | Off | +| 0% 44C P8 25W / 450W | 13MiB / 24564MiB | 0% Default | +| | | N/A | ++-----------------------------------------+----------------------+----------------------+ + ++---------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=======================================================================================| +| 0 N/A N/A 6780 G /usr/lib/xorg/Xorg 4MiB | ++---------------------------------------------------------------------------------------+ +wandb: Currently logged in as: abdelrahman-elsayed (dinesh_saggurthi). Use `wandb login --relogin` to force relogin +wandb: wandb version 0.18.5 is available! To upgrade, please run: +wandb: $ pip install wandb --upgrade +wandb: Tracking run with wandb version 0.14.0 +wandb: Run data is saved locally in /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2 +wandb: Run `wandb offline` to turn off syncing. +wandb: Syncing run DIAS_model_TransUNet +wandb: ⭐️ View project at https://wandb.ai/dinesh_saggurthi/Baselines_exp +wandb: 🚀 View run at https://wandb.ai/dinesh_saggurthi/Baselines_exp/runs/w102ona2 +HERE +Train dataset size: 20 +Val dataset size: 10 +Train dataset size: 20 +Val dataset size: 10 +number of trainable parameters: 107681297 +Total parameters: 107,681,297 +Trainable parameters: 107,681,297 +Frozen parameters: 0 + +Parameters by module: +************************************************************************************************************* + transformer: + Total: 100,293,952 + Trainable: 100,293,952 + Frozen: 0 +******************************************************************************************* +************************************************************************************************************* + decoder: + Total: 7,387,200 + Trainable: 7,387,200 + Frozen: 0 +******************************************************************************************* +************************************************************************************************************* + segmentation_head: + Total: 145 + Trainable: 145 + Frozen: 0 +******************************************************************************************* +************************************************************************************************************* + soft: + Total: 0 + Trainable: 0 + Frozen: 0 +******************************************************************************************* +./biastuning/DIAS +Training parameters: +---------- +number of trainable parameters: 107681297 +batch size: 2 +num epochs: 500 +Epoch 0/499 +---------- + train Epoch 0: 0%| | 0/10 [00:00 + main_train(data_config, model_config, args.pretrained_path, args.save_path, args.training_strategy, device=args.device) + File "/home/abdelrahman.elsayed/sarim_code/train_baselines.py", line 234, in main_train + model = train_dl( + File "/home/abdelrahman.elsayed/sarim_code/train.py", line 218, in train_dl + outputs, reg_loss = model(inputs, text) + File "/home/abdelrahman.elsayed/.conda/envs/s-sam/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl + return forward_call(*args, **kwargs) + File "/home/abdelrahman.elsayed/sarim_code/vit_seg_modeling.py", line 390, in forward + x = self.decoder(x, features) + File "/home/abdelrahman.elsayed/.conda/envs/s-sam/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl + return forward_call(*args, **kwargs) + File "/home/abdelrahman.elsayed/sarim_code/vit_seg_modeling.py", line 366, in forward + x = decoder_block(x, skip=skip) + File "/home/abdelrahman.elsayed/.conda/envs/s-sam/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl + return forward_call(*args, **kwargs) + File "/home/abdelrahman.elsayed/sarim_code/vit_seg_modeling.py", line 312, in forward + x = torch.cat([x, skip], dim=1) +RuntimeError: Sizes of tensors must match except in dimension 1. Expected size 32 but got size 64 for tensor number 1 in the list. diff --git a/AllinonSAM/ssam_env.yml b/AllinonSAM/ssam_env.yml new file mode 100644 index 0000000000000000000000000000000000000000..8dca75a71bec03c602c23f660a59041ce502ea67 --- /dev/null +++ b/AllinonSAM/ssam_env.yml @@ -0,0 +1,254 @@ +name: s-sam +channels: + - pytorch + - nvidia + - defaults +dependencies: + - _libgcc_mutex=0.1=main + - _openmp_mutex=5.1=1_gnu + - asttokens=2.0.5=pyhd3eb1b0_0 + - backcall=0.2.0=pyhd3eb1b0_0 + - ca-certificates=2023.01.10=h06a4308_0 + - certifi=2022.12.7=py38h06a4308_0 + - comm=0.1.2=py38h06a4308_0 + - cuda=11.7.1=0 + - cuda-cccl=11.7.91=0 + - cuda-command-line-tools=11.7.1=0 + - cuda-compiler=11.7.1=0 + - cuda-cudart=11.7.99=0 + - cuda-cudart-dev=11.7.99=0 + - cuda-cuobjdump=11.7.91=0 + - cuda-cupti=11.7.101=0 + - cuda-cuxxfilt=11.7.91=0 + - cuda-demo-suite=12.0.76=0 + - cuda-documentation=12.0.76=0 + - cuda-driver-dev=11.7.99=0 + - cuda-gdb=12.0.90=0 + - cuda-libraries=11.7.1=0 + - cuda-libraries-dev=11.7.1=0 + - cuda-memcheck=11.8.86=0 + - cuda-nsight=12.0.78=0 + - cuda-nsight-compute=12.0.0=0 + - cuda-nvcc=11.7.99=0 + - cuda-nvdisasm=12.0.76=0 + - cuda-nvml-dev=11.7.91=0 + - cuda-nvprof=12.0.90=0 + - cuda-nvprune=11.7.91=0 + - cuda-nvrtc=11.7.99=0 + - cuda-nvrtc-dev=11.7.99=0 + - cuda-nvtx=11.7.91=0 + - cuda-nvvp=12.0.90=0 + - cuda-runtime=11.7.1=0 + - cuda-sanitizer-api=12.0.90=0 + - cuda-toolkit=11.7.1=0 + - cuda-tools=11.7.1=0 + - cuda-visual-tools=11.7.1=0 + - cudatoolkit=11.0.221=h6bb024c_0 + - debugpy=1.5.1=py38h295c915_0 + - decorator=5.1.1=pyhd3eb1b0_0 + - executing=0.8.3=pyhd3eb1b0_0 + - flit-core=3.8.0=py38h06a4308_0 + - gds-tools=1.5.0.59=0 + - importlib_metadata=6.0.0=hd3eb1b0_0 + - ipykernel=6.19.2=py38hb070fc8_0 + - ipython=8.12.0=py38h06a4308_0 + - jedi=0.18.1=py38h06a4308_1 + - jupyter_client=8.1.0=py38h06a4308_0 + - jupyter_core=5.3.0=py38h06a4308_0 + - ld_impl_linux-64=2.38=h1181459_1 + - libcublas=11.10.3.66=0 + - libcublas-dev=11.10.3.66=0 + - libcufft=10.7.2.124=h4fbf590_0 + - libcufft-dev=10.7.2.124=h98a8f43_0 + - libcufile=1.5.0.59=0 + - libcufile-dev=1.5.0.59=0 + - libcurand=10.3.1.50=0 + - libcurand-dev=10.3.1.50=0 + - libcusolver=11.4.0.1=0 + - libcusolver-dev=11.4.0.1=0 + - libcusparse=11.7.4.91=0 + - libcusparse-dev=11.7.4.91=0 + - libffi=3.4.2=h6a678d5_6 + - libgcc-ng=11.2.0=h1234567_1 + - libgomp=11.2.0=h1234567_1 + - libnpp=11.7.4.75=0 + - libnpp-dev=11.7.4.75=0 + - libnvjpeg=11.8.0.2=0 + - libnvjpeg-dev=11.8.0.2=0 + - libsodium=1.0.18=h7b6447c_0 + - libstdcxx-ng=11.2.0=h1234567_1 + - matplotlib-inline=0.1.6=py38h06a4308_0 + - ml-collections + - ncurses=6.4=h6a678d5_0 + - nest-asyncio=1.5.6=py38h06a4308_0 + - nsight-compute=2022.4.0.15=0 + - openssl=1.1.1t=h7f8727e_0 + - parso=0.8.3=pyhd3eb1b0_0 + - pexpect=4.8.0=pyhd3eb1b0_3 + - pickleshare=0.7.5=pyhd3eb1b0_1003 + - pip=23.0.1=py38h06a4308_0 + - platformdirs=2.5.2=py38h06a4308_0 + - prompt-toolkit=3.0.36=py38h06a4308_0 + - ptyprocess=0.7.0=pyhd3eb1b0_2 + - pure_eval=0.2.2=pyhd3eb1b0_0 + - pygments=2.11.2=pyhd3eb1b0_0 + - python=3.8.16=h7a1cb2a_3 + - python-dateutil=2.8.2=pyhd3eb1b0_0 + - pytorch-cuda=11.7=h67b0de4_1 + - pyzmq=23.2.0=py38h6a678d5_0 + - readline=8.2=h5eee18b_0 + - setuptools=65.6.3=py38h06a4308_0 + - six=1.16.0=pyhd3eb1b0_1 + - sqlite=3.41.1=h5eee18b_0 + - stack_data=0.2.0=pyhd3eb1b0_0 + - tk=8.6.12=h1ccaba5_0 + - tornado=6.2=py38h5eee18b_0 + - traitlets=5.7.1=py38h06a4308_0 + - typing_extensions=4.4.0=py38h06a4308_0 + - wcwidth=0.2.5=pyhd3eb1b0_0 + - xz=5.2.10=h5eee18b_1 + - zeromq=4.3.4=h2531618_0 + - zipp=3.11.0=py38h06a4308_0 + - zlib=1.2.13=h5eee18b_0 + - pip: + - absl-py==1.3.0 + - addict==2.4.0 + - appdirs==1.4.4 + - argparse==1.4.0 + - batchgenerators==0.25 + - beautifulsoup4==4.11.1 + - cachetools==5.2.0 + - chardet==3.0.4 + - charset-normalizer==3.1.0 + - click==8.1.3 + - cmake==3.26.3 + - contourpy==1.0.7 + - cycler==0.11.0 + - docker-pycreds==0.4.0 + - efficientnet-pytorch==0.7.1 + - entrypoints==0.3 + - exceptiongroup==1.1.1 + - filelock==3.8.2 + - flake8==3.7.9 + - fonttools==4.39.3 + - ftfy==6.1.1 + - future==0.18.2 + - gdown==4.6.0 + - gensim==4.3.1 + - gitdb==4.0.10 + - gitpython==3.1.31 + - google-auth==2.15.0 + - google-auth-oauthlib==0.4.6 + - googletrans==3.0.0 + - grpcio==1.51.1 + - h11==0.9.0 + - h2==3.2.0 + - h5py==3.8.0 + - hpack==3.0.0 + - hstspreload==2023.1.1 + - httpcore==0.9.1 + - httpx==0.13.3 + - huggingface-hub==0.11.1 + - hyperframe==5.2.0 + - idna==2.10 + - imageio==2.28.0 + - importlib-metadata==5.2.0 + - importlib-resources==5.12.0 + - iniconfig==2.0.0 + - isort==4.3.21 + - jinja2==3.1.2 + - joblib==1.2.0 + - kiwisolver==1.4.4 + - lazy-loader==0.2 + - linecache2==1.0.0 + - lit==16.0.3 + - littleutils==0.2.2 + - markdown==3.4.1 + - markupsafe==2.1.1 + - matplotlib==3.7.1 + - mccabe==0.6.1 + - mpmath==1.3.0 + - munch==3.0.0 + - networkx==3.1 + - nibabel==5.1.0 + - nltk==3.8.1 + - numpy==1.24.2 + - nvidia-cublas-cu11==11.10.3.66 + - nvidia-cuda-cupti-cu11==11.7.101 + - nvidia-cuda-nvrtc-cu11==11.7.99 + - nvidia-cuda-runtime-cu11==11.7.99 + - nvidia-cudnn-cu11==8.5.0.96 + - nvidia-cufft-cu11==10.9.0.58 + - nvidia-curand-cu11==10.2.10.91 + - nvidia-cusolver-cu11==11.4.0.1 + - nvidia-cusparse-cu11==11.7.4.91 + - nvidia-nccl-cu11==2.14.3 + - nvidia-nvtx-cu11==11.7.91 + - oauthlib==3.2.2 + - ogb==1.3.5 + - opencv-python==4.6.0.66 + - outdated==0.2.2 + - packaging==22.0 + - pandas==1.5.2 + - pathtools==0.1.2 + - pillow==9.5.0 + - pluggy==1.0.0 + - pretrainedmodels==0.7.4 + - protobuf==3.20.3 + - psutil==5.9.4 + - pyasn1==0.4.8 + - pyasn1-modules==0.2.8 + - pycocotools==2.0.6 + - pycodestyle==2.5.0 + - pyflakes==2.1.1 + - pyparsing==3.0.9 + - pytest==7.3.1 + - pytz==2022.7 + - pywavelets==1.4.1 + - pyyaml==6.0 + - regex==2022.10.31 + - requests==2.28.2 + - requests-oauthlib==1.3.1 + - rfc3986==1.5.0 + - rsa==4.9 + - scikit-image==0.20.0 + - scikit-learn==1.2.0 + - scipy==1.9.1 + - sentry-sdk==1.18.0 + - setproctitle==1.3.2 + - simpleitk==2.2.1 + - smart-open==6.3.0 + - smmap==5.0.0 + - sniffio==1.3.0 + - soupsieve==2.3.2.post1 + - supervision==0.3.2 + - surface-distance-based-measures==0.1 + - sympy==1.12 + - tabulate==0.9.0 + - tb-nightly==2.12.0a20221225 + - tensorboard-data-server==0.6.1 + - tensorboard-plugin-wit==1.8.1 + - textaugment==1.3.4 + - textblob==0.17.1 + - threadpoolctl==3.1.0 + - tifffile==2023.4.12 + - timm==0.6.13 + - tokenizers==0.13.3 + - tomli==2.0.1 + - torch==2.0.1 + - torchaudio + - torchvision==0.15.2 + - tqdm==4.64.1 + - traceback2==1.4.0 + - transformers==4.27.4 + - triton==2.0.0 + - unittest2==1.1.0 + - urllib3==1.26.15 + - wandb==0.14.0 + - werkzeug==2.2.2 + - wget==3.2 + - wheel==0.38.4 + - wilds==1.2.2 + - yacs==0.1.8 + - yapf==0.29.0 +prefix: /home/ubuntu/anaconda3/envs/dassl diff --git a/AllinonSAM/test.py b/AllinonSAM/test.py new file mode 100644 index 0000000000000000000000000000000000000000..d80106ae052a7c4758de0d3f61487d4ec837ffdd --- /dev/null +++ b/AllinonSAM/test.py @@ -0,0 +1,66 @@ +import os +import sys +import numpy as np +from data_utils import * +from model import * +from utils import * +import torch + +def test(config_data, config_model, pretrained_path, test_start, test_end, device='cuda:0'): + with torch.no_grad(): + transform = Slice_Transforms(config_data) + all_label_list = config_data['data']['label_list'] + all_label_names = config_data['data']['label_names'] + all_label_dict = {} + for i,ln in enumerate(all_label_names): + all_label_dict[ln] = i + model = Prompt_Adapted_SAM(device=device,config=config_model,label_text_dict=all_label_dict) + + if pretrained_path is not None: + state_dict = torch.load(pretrained_path) + model.load_state_dict(state_dict, strict=True) + + #initialize dice scores for all labels + dices = {} + for l in all_label_names: + dices[l] = [] + + model = model.to(device) + data_dir = config_data['data']['root_path'] + for name in os.listdir(data_dir+'/images'): + print(name) + #only test for val set + if int(name[:name.find('.')])>=test_start and int(name[:name.find('.')])=0.5+0 + dice_l = dice_coef(mask_l, outputs.cpu()) + dices[all_label_names[num]].append(dice_l.numpy()) + + #take the average dice score in each label + for l in all_label_names: + dices[l] = np.mean(dices[l]) + print(dices) \ No newline at end of file diff --git a/AllinonSAM/train.py b/AllinonSAM/train.py new file mode 100644 index 0000000000000000000000000000000000000000..e6bb4defb9673637179b570ee8e31319409656e9 --- /dev/null +++ b/AllinonSAM/train.py @@ -0,0 +1,430 @@ +import torch + +import sys +import copy +import os + +from data_utils import * +from model import * +from utils import * +import yaml +from tqdm import tqdm +import wandb + + +def print_model_parameters_stats(model): + total_params = sum(p.numel() for p in model.parameters()) + trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad) + frozen_params = total_params - trainable_params + + print(f"Total parameters: {total_params:,}") + print(f"Trainable parameters: {trainable_params:,}") + print(f"Frozen parameters: {frozen_params:,}") + + # Print parameters by module + print("\nParameters by module:") + for name, module in model.named_children(): + total_params = sum(p.numel() for p in module.parameters()) + trainable_params = sum( + p.numel() for p in module.parameters() if p.requires_grad + ) + frozen_params = total_params - trainable_params + print( + "*************************************************************************************************************" + ) + print(f" {name}:") + print(f" Total: {total_params:,}") + print(f" Trainable: {trainable_params:,}") + print(f" Frozen: {frozen_params:,}") + print( + "*******************************************************************************************" + ) + + +def train( + model, + tr_dataset, + val_dataset, + criterion, + optimizer, + sav_path="./checkpoints/temp.pth", + num_epochs=25, + bs=32, + device="cuda:0", +): + model = model.to(device) + best_loss = 100000.0 + best_dice = 0 + best_HD95 = 1000000.0 + best_acd = float("inf") + print("Training parameters: \n----------") + print("batch size: ", bs) + print("num epochs: ", num_epochs) + for epoch in range(num_epochs): + print(f"Epoch {epoch}/{num_epochs - 1}") + print("-" * 10) + bs_count = 0 + inputs_li, labels_li, text_ids_li, text_li, slice_num_li = [], [], [], [], [] + running_loss = 0 + running_dice = 0 + count = 0 + # run training + # print("eere: ",len(tr_dataset)) + for i in range(len(tr_dataset)): + inputs, labels, _, text, slice_nums = tr_dataset[i] + inputs_li.append(inputs) + labels_li.append(labels) + text_li = text_li + [text] * (inputs.shape[0]) + slice_num_li = slice_num_li + slice_nums + bs_count += 1 + if (bs_count % bs == 0) or (i == len(tr_dataset) - 1): + # start training + bs_count = 0 + inputs = torch.cat(inputs_li, dim=0) + labels = torch.cat(labels_li, dim=0) + inputs = inputs.to(device) + labels = labels.to(device) + with torch.set_grad_enabled(True): + optimizer.zero_grad() + outputs, reg_loss = model(inputs, text_li, slice_num_li) + seg_loss = 0 + for c in criterion: + seg_loss += c(outputs, labels.float()) + seg_loss.backward() + optimizer.step() + running_loss += seg_loss.cpu() + + preds = outputs >= 0.5 + ri, ru = running_stats(labels, preds) + running_dice += dice_collated(ri, ru) + count += ri.shape[0] + + inputs_li = [] + labels_li = [] + text_li = [] + slice_num_li = [] + epoch_dice = running_dice / count + + print("Training loss: ", running_loss / (1 + (len(tr_dataset) // bs))) + print("Training dice: ", epoch_dice) + + # do val if epoch is a multiple of 5 + if epoch % 5 == 0: + running_dice = 0 + count = 0 + for i in range(len(val_dataset)): + inputs, labels, _, text, slice_nums = val_dataset[i] + inputs_li.append(inputs) + labels_li.append(labels) + text_li = text_li + [text] * (inputs.shape[0]) + slice_num_li = slice_num_li + slice_nums + bs_count += 1 + if bs_count % bs == 0: + # start training + bs_count = 0 + inputs = torch.cat(inputs_li, dim=0) + labels = torch.cat(labels_li, dim=0) + inputs = inputs.to(device) + labels = labels.to(device) + with torch.set_grad_enabled(False): + outputs, reg_loss = model(inputs, text_li, slice_num_li) + preds = outputs >= 0.5 + ri, ru = running_stats(labels, preds) + running_dice += dice_collated(ri, ru) + count += ri.shape[0] + + inputs_li = [] + labels_li = [] + text_li = [] + slice_num_li = [] + # epoch_dice = running_dice / (len(val_dataset)) + epoch_dice = running_dice / count + + print(f"Val Dice: {epoch_dice:.4f}") + + # deep copy the model + if epoch_dice > best_dice: + # best_loss = epoch_loss + best_dice = epoch_dice + torch.save(model.state_dict(), sav_path) + + return model + + +import torch +import wandb +from tqdm import tqdm +import os +from PIL import Image +import numpy as np +from utils import ( + running_stats, + dice_collated, + compute_hd95, + fractal_dimension, + iou_coef, + average_closest_distance, +) + +import os +from PIL import Image +import numpy as np +from pathlib import Path + +# Load configuration from data_config.yml +with open( + "/home/abdelrahman.elsayed/CVPR/AllinonSAM/config_arcade.yml", "r" +) as data_config_file: + data_config = yaml.safe_load(data_config_file) + +# Load configuration from model_svdtuning.yml +with open( + "/home/abdelrahman.elsayed/CVPR/AllinonSAM/model_svdtuning.yml", "r" +) as model_config_file: + model_config = yaml.safe_load(model_config_file) + + +def train_dl( + model, + datasets, + dataset_sizes, + criterion, + optimizer, + scheduler, + sav_path="./checkpoints/temp.pth", + num_epochs=25, + bs=32, + device="cuda:0", + iou_weight=1.0, + # Add weight for IoU loss + retain_graph=False, + neg2pos_ratio=-1, + save_dir="./validation_images", + reg_multiplier=0.01, +): + torch.cuda.empty_cache() + model = model.to(device) + best_dice = 0 + best_loss = 10000 + best_hd95 = 1000000 + best_acd = float("inf") + print_model_parameters_stats(model) + + # Create directories for saving images + print(save_dir) + # save_dir_test = Path(save_dir) + # if save_dir_test.is_dir(): + # print("The experiment was run") + # return model + label_dir = os.path.join(save_dir, "labels") + pred_dir = os.path.join(save_dir, "pred_labels") + os.makedirs(label_dir, exist_ok=True) + os.makedirs(pred_dir, exist_ok=True) + + run_name = f"{data_config['data']['root_path'].split('/')[-1]}_model_{model_config['arch']}" + if model_config["use_salt"]: + run_name = f"{data_config['data']['root_path'].split('/')[-1]}_model_{model_config['arch']}_ft_salt_{model_config['salt']['type']}_svd_{model_config['salt']['svd_rank_linear']}_svd_conv_{model_config['salt']['svd_rank_conv2d']}_loRA_{model_config['salt']['r_lora']}" + # Initialize wandb + wandb.init( + project="CVPR", + name=run_name, + config={ + "learning_rate": optimizer.param_groups[0]["lr"], + "batch_size": bs, + "num_epochs": num_epochs, + "reg_multiplier": reg_multiplier, + }, + ) + + + for name, param in model.named_parameters(): + print(name) + + print("Training parameters: \n----------") + print( + "number of trainable parameters: ", + sum(p.numel() for p in model.parameters() if p.requires_grad), + ) + print("batch size: ", bs) + print("num epochs: ", num_epochs) + + for epoch in range(num_epochs): + print(f"Epoch {epoch}/{num_epochs - 1}") + print("-" * 10) + dataloaders = {} + + # Each epoch has a training and validation phase + for phase in ["train", "val"]: + if phase == "train": + model.train() + if neg2pos_ratio > 0: + datasets[phase].generate_examples(neg2pos_ratio) + else: + model.eval() + + running_loss = 0.0 + running_hd95 = 0.0 + running_iou = 0.0 + running_acd = 0.0 + all_preds = [] # To store preds for fractal dimension + running_dice = 0 + count = 0 + dataloaders[phase] = torch.utils.data.DataLoader( + datasets[phase], batch_size=bs, shuffle=True, num_workers=4 + ) + + # Wrap dataloader with tqdm for progress bar + pbar = tqdm(dataloaders[phase], desc=f"{phase} Epoch {epoch}", leave=False) + + # Iterate over data + for batch_idx, (inputs, labels, text_idxs, text) in enumerate(pbar): + count += 1 + inputs = inputs.to(device) + labels = labels.to(device) + + optimizer.zero_grad() + + with torch.set_grad_enabled(phase == "train"): + outputs, reg_loss = model(inputs, text) + if len(outputs.shape) == 4: + outputs = torch.squeeze(outputs, dim=1) + loss = 0 + seg_loss = 0 + for c in criterion: + if "text" in c.__code__.co_varnames: + seg_loss += c(outputs, text, labels.float()) + else: + seg_loss += c(outputs, labels.float()) + loss += seg_loss + loss += reg_loss * reg_multiplier + iou_loss = iou_weight * (1 - iou_coef(labels, outputs > 0.5)) + loss += iou_loss + + if phase == "train": + loss.backward(retain_graph=True) + optimizer.step() + + with torch.no_grad(): + preds = outputs >= 0.5 + running_loss += loss.item() * inputs.size(0) + ri, ru = running_stats(labels, preds) + running_dice += dice_collated(ri, ru) + hd95 = compute_hd95(preds, labels) + running_hd95 += hd95.item() # Accumulate HD95 + running_iou += iou_loss.item() + + if phase == "val": + labels_np = labels.cpu().numpy() + batch_acd = [ + average_closest_distance( + preds[i].cpu().numpy(), labels_np[i] + ) + for i in range(len(preds)) + ] + # Filter out any NaN values from batch_acd + batch_acd = [d for d in batch_acd if not np.isnan(d)] + if batch_acd: + running_acd += np.mean(batch_acd) + all_preds.append(preds.cpu().numpy()) + + # Save images during validation (reduced frequency) + if ( + phase == "val" and epoch % 10 == 0 and batch_idx < 5 + ): # Save every 10 epochs, first 5 batches + for i in range( + min(2, inputs.size(0)) + ): # Save only first 2 images of the batch + img_name = f"epoch_{epoch}_batch_{batch_idx}_img_{i}.png" + + # Save true label + label_img = labels[i].cpu().numpy() * 255 + label_img = Image.fromarray(label_img.astype(np.uint8)) + label_img.save(os.path.join(label_dir, img_name)) + + # Save predicted label + pred_img = preds[i].cpu().numpy() * 255 + pred_img = Image.fromarray(pred_img.astype(np.uint8)) + pred_img.save(os.path.join(pred_dir, img_name)) + + # Update progress bar + pbar.set_postfix( + { + "loss": loss.item(), + "dice": running_dice / count, + "IoU Loss": running_iou / count, + "hd95": running_hd95 / count, + "ACD": running_acd / count, + } + ) + + if phase == "train": + scheduler.step() + + epoch_loss = running_loss / dataset_sizes[phase] + epoch_dice = running_dice / dataset_sizes[phase] + epoch_hd95 = running_hd95 / dataset_sizes[phase] + epoch_iou = running_iou / dataset_sizes[phase] + + if phase == "val": + epoch_acd = running_acd / count + # Calculate fractal dimension after validation + all_preds = np.concatenate(all_preds, axis=0) + fractal_dim_values = [] + for i in range(all_preds.shape[0]): + fractal_dim = fractal_dimension( + all_preds[i] + ) # Calculate for each mask + fractal_dim_values.append(fractal_dim) + + average_fractal_dim = np.mean(fractal_dim_values) + wandb.log( + { + "average_fractal_dimension": average_fractal_dim, + "Validation ACD": epoch_acd, + } + ) + + print( + f"{phase} Loss: {epoch_loss:.4f} Dice: {epoch_dice:.4f} HD95: {epoch_hd95:.4f} IOU_loss: {epoch_iou:.4f}" + ) + + # Log metrics to wandb + wandb.log( + { + f"{phase}_loss": epoch_loss, + f"{phase}_hd95": epoch_hd95, + f"{phase}_dice": epoch_dice, + f"{phase}_iou": epoch_iou, + "epoch": epoch, + } + ) + + if phase == "val" and epoch_loss < best_loss: + best_loss = epoch_loss + best_dice = epoch_dice + best_hd95 = epoch_hd95 + best_acd = epoch_acd + # torch.save(model.state_dict(), sav_path) + wandb.run.summary["best_val_loss"] = best_loss + wandb.run.summary["best_val_dice"] = best_dice + wandb.run.summary["best_val_hd95"] = best_hd95 + wandb.run.summary["best_acd"] = best_acd + + elif phase == "val" and np.isnan(epoch_loss): + print("nan loss but saving model") + torch.save(model.state_dict(), sav_path) + + print( + f"Best val loss: {best_loss:4f}, best val dice: {best_dice:2f} , best Hd95: {best_hd95:2f}" + ) + model_save_path = f"{sav_path}/final_model.pth" + model_dir = os.path.dirname(model_save_path) + if not os.path.exists(model_dir): + os.makedirs(model_dir) + torch.save(model.state_dict(), model_save_path) + print(f"Model saved at: {model_save_path}") + + # Finish wandb run + wandb.finish() + + return model diff --git a/AllinonSAM/train_baselines.py b/AllinonSAM/train_baselines.py new file mode 100644 index 0000000000000000000000000000000000000000..44711d87032c9d380357e7b2b7945b0392e12f72 --- /dev/null +++ b/AllinonSAM/train_baselines.py @@ -0,0 +1,266 @@ +import argparse +import yaml +import torch.nn as nn +import torch.optim as optim +from torch.optim import lr_scheduler +from data_utils import * +from model import * +from test import * +from train import * +from baselines import UNet, UNext, medt_net +from vit_seg_modeling import VisionTransformer +from vit_seg_modeling import CONFIGS as CONFIGS_ViT_seg +from axialnet import MedT +import sys +source_path = os.path.join("/home/abdelrahman.elsayed/sarim_code/datasets") +sys.path.append(source_path) +from arcade import ArcadeDataset + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument('--data_config', default='/home/abdelrahman.elsayed/sarim_code/config_arcade.yml', + help='data config file path') + + parser.add_argument('--model_config', default='/home/abdelrahman.elsayed/sarim_code/model_baseline.yml', + help='model config file path') + + parser.add_argument('--pretrained_path', default=None, + help='pretrained model path') + + parser.add_argument('--save_path', default='checkpoints/temp.pth', + help='pretrained model path') + parser.add_argument('--training_strategy', default='biastuning', help='how to train the model') + + parser.add_argument('--device', default='cuda:0', help='device to train on') + + args = parser.parse_args() + + return args + +def main_datautils(config, use_norm=True): + selected_idxs = [0,12,42,79,100] + print(config) + dataset_dict, dataset_sizes, label_dict = get_data(config, tr_folder_start=0, tr_folder_end=78000, val_folder_start=0, val_folder_end=104000, use_norm=use_norm) + print(len(dataset_dict['train'])) + for i in selected_idxs: + temp = (dataset_dict['train'][i]) + print(temp[-1]) + print(temp[-2]) + print(temp[0].shape) + print(temp[1].shape) + plt.imshow(temp[0].permute(1,2,0), cmap='gray') + plt.show() + plt.imshow(temp[1], cmap='gray') + plt.show() + +def main_model(config): + print(config) + label_dict = { + 'liver':0, + 'tumor':1 + } + model = Prompt_Adapted_SAM(config, label_dict) + for name, p in model.named_parameters(): + print(name) + return + +def main_test(data_config, model_config, pretrained_path): + test_start = 104 + test_end = 131 + test(data_config, model_config, pretrained_path, test_start, test_end, device='cuda:0') + +def main_train(data_config, model_config, pretrained_path, save_path, training_strategy='biastuning', device='cuda:0'): + #load data + if data_config['data']['name']=='LITS': + dataset_dict, dataset_sizes, label_dict = get_data(data_config, tr_folder_start=0, tr_folder_end=78, val_folder_start=78, val_folder_end=104) + elif data_config['data']['name']=='IDRID': + dataset_dict, dataset_sizes, label_dict = get_data(data_config, tr_folder_start=0, tr_folder_end=40, val_folder_start=40, val_folder_end=104) + dataloader_dict = {} + for x in ['train','val']: + dataloader_dict[x] = torch.utils.data.DataLoader(dataset_dict[x], batch_size=model_config['training']['batch_size'], shuffle=True, num_workers=4) + elif data_config['data']['name']=='ENDOVIS': + dataset_dict, dataset_sizes, label_dict = get_data(data_config, tr_folder_start=0, tr_folder_end=180, val_folder_start=180, val_folder_end=304, no_text_mode=True) + dataloader_dict = {} + for x in ['train','val']: + dataloader_dict[x] = torch.utils.data.DataLoader(dataset_dict[x], batch_size=model_config['training']['batch_size'], shuffle=True, num_workers=4) + elif data_config['data']['name']=='ENDOVIS 18': + dataset_dict, dataset_sizes, label_dict = get_data(data_config, tr_folder_start=0, tr_folder_end=18000, val_folder_start=0, val_folder_end=34444, no_text_mode=True) + dataloader_dict = {} + for x in ['train','val']: + dataloader_dict[x] = torch.utils.data.DataLoader(dataset_dict[x], batch_size=model_config['training']['batch_size'], shuffle=True, num_workers=4) + elif data_config['data']['name']=='CHESTXDET': + dataset_dict, dataset_sizes, label_dict = get_data(data_config, tr_folder_start=0, tr_folder_end=18000, val_folder_start=0, val_folder_end=34444, no_text_mode=True) + dataloader_dict = {} + for x in ['train','val']: + dataloader_dict[x] = torch.utils.data.DataLoader(dataset_dict[x], batch_size=model_config['training']['batch_size'], shuffle=True, num_workers=4) + elif data_config['data']['name']=='CHOLEC 8K': + dataset_dict, dataset_sizes, label_dict = get_data(data_config, tr_folder_start=0, tr_folder_end=18000, val_folder_start=0, val_folder_end=34444, no_text_mode=True) + dataloader_dict = {} + for x in ['train','val']: + dataloader_dict[x] = torch.utils.data.DataLoader(dataset_dict[x], batch_size=model_config['training']['batch_size'], shuffle=True, num_workers=4) + elif data_config['data']['name']=='ULTRASOUND': + dataset_dict, dataset_sizes, label_dict = get_data(data_config, tr_folder_start=0, tr_folder_end=18000, val_folder_start=0, val_folder_end=34444, no_text_mode=True) + dataloader_dict = {} + for x in ['train','val']: + dataloader_dict[x] = torch.utils.data.DataLoader(dataset_dict[x], batch_size=model_config['training']['batch_size'], shuffle=True, num_workers=4) + elif data_config['data']['name']=='GLAS': + dataset_dict, dataset_sizes, label_dict = get_data(data_config, tr_folder_start=0, tr_folder_end=18000, val_folder_start=0, val_folder_end=34444, no_text_mode=True) + dataloader_dict = {} + for x in ['train','val']: + dataloader_dict[x] = torch.utils.data.DataLoader(dataset_dict[x], batch_size=model_config['training']['batch_size'], shuffle=True, num_workers=4) + elif data_config["data"]["name"] == "ArcadeDataset": + print("HERE") + data_split_csv_path = data_config["data"]["data_split_csv"] + data_split = pd.read_csv(data_split_csv_path) + + dataset_dict = {} + dataloader_dict = {} + + use_norm = True + no_text_mode = False + + for split in ["train", "val"]: + # Filter the CSV for the current split + split_data = data_split[data_split["split"] == split]["imgs"].tolist() + + # Pass the filtered data to the dataset class (ArcadeDataset) + dataset_dict[split] = ArcadeDataset( + config=data_config, + file_list=split_data, # Pass file_list as (image_path, mask_path) tuples + shuffle_list=True, + is_train=(split == "train"), + apply_norm=use_norm, + no_text_mode=no_text_mode, + ) + + # Create DataLoader for each dataset + dataloader_dict[split] = torch.utils.data.DataLoader( + dataset_dict[split], + batch_size=model_config["training"]["batch_size"], + shuffle=True, + num_workers=4, + ) + + # Get dataset sizes + dataset_sizes = {split: len(dataset_dict[split]) for split in ["train", "val"]} + + # Create label dictionary + label_dict = { + name: i for i, name in enumerate(data_config["data"]["label_names"]) + } + + # Print dataset sizes + print(f"Train dataset size: {dataset_sizes['train']}") + print(f"Val dataset size: {dataset_sizes['val']}") + + # Get dataset sizes + dataset_sizes = {split: len(dataset_dict[split]) for split in ["train", "val"]} + + # Create label dictionary + label_dict = { + name: i for i, name in enumerate(data_config["data"]["label_names"]) + } + + # Print dataset sizes + print(f"Train dataset size: {dataset_sizes['train']}") + print(f"Val dataset size: {dataset_sizes['val']}") + + + #load model + #change the img size in model config according to data config + in_channels = model_config['in_channels'] + out_channels = model_config['num_classes'] + img_size = model_config['img_size'] + if model_config['arch']=='Prompt Adapted SAM': + model = Prompt_Adapted_SAM(model_config, label_dict, device, training_strategy=training_strategy) + elif model_config['arch']=='UNet': + model = UNet(in_channels=in_channels, out_channels=out_channels , pretrained=True) + elif model_config['arch']=='UNext': + model = UNext(num_classes=out_channels, input_channels=in_channels, img_size=img_size) + elif model_config['arch']=='MedT': + #TODO + model = MedT(img_size=img_size, num_classes=out_channels) + elif model_config['arch']=='TransUNet': + config_vit = CONFIGS_ViT_seg['R50-ViT-B_16'] + config_vit.n_classes = out_channels + config_vit.n_skip = 3 + # if args.vit_name.find('R50') != -1: + # config_vit.patches.grid = (int(args.img_size / args.vit_patches_size), int(args.img_size / args.vit_patches_size)) + model = VisionTransformer(config_vit, img_size=img_size, num_classes=config_vit.n_classes) + + #load model weights + if pretrained_path is not None: + model.load_state_dict(torch.load(pretrained_path)) + + #training parameters + print('number of trainable parameters: ', sum(p.numel() for p in model.parameters() if p.requires_grad)) + training_params = model_config['training'] + if training_params['optimizer'] == 'adamw': + optimizer = optim.AdamW(model.parameters(), lr=float(training_params['lr']), weight_decay=float(training_params['weight_decay'])) + elif training_params['optimizer'] == 'sgd': + optimizer = optim.SGD(model.parameters(), lr=float(training_params['lr']), weight_decay=float(training_params['weight_decay']), momentum=0.9) + exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=training_params['schedule_step'], gamma=training_params['schedule_step_factor']) + + criterion = [] + if 'dice' in training_params['loss']: + criterion.append(dice_loss) + if 'focal' in training_params['loss']: + criterion.append(multiclass_focal_loss) + if 'weighted CE' in training_params['loss']: + criterion.append(weighted_ce_loss) + if criterion==[]: + criterion = [nn.BCELoss()] + + #train the model + if data_config['data']['name']=='LITS': + model = train(model, dataset_dict['train'], dataset_dict['val'], criterion, optimizer, save_path, num_epochs=training_params['num_epochs'], bs=training_params['batch_size'], device=device) + elif data_config['data']['name']=='IDRID': + model = train_dl(model, dataloader_dict, dataset_sizes, criterion, optimizer, exp_lr_scheduler, save_path, num_epochs=training_params['num_epochs'], bs=training_params['batch_size'], device=device) + elif data_config['data']['name']=='ENDOVIS': + model = train_dl(model, dataloader_dict, dataset_sizes, criterion, optimizer, exp_lr_scheduler, save_path, num_epochs=training_params['num_epochs'], bs=training_params['batch_size'], device=device) + elif data_config['data']['name']=='ENDOVIS 18': + model = train_dl(model, dataloader_dict, dataset_sizes, criterion, optimizer, exp_lr_scheduler, save_path, num_epochs=training_params['num_epochs'], bs=training_params['batch_size'], device=device) + elif data_config['data']['name']=='CHOLEC 8K': + model = train_dl(model, dataloader_dict, dataset_sizes, criterion, optimizer, exp_lr_scheduler, save_path, num_epochs=training_params['num_epochs'], bs=training_params['batch_size'], device=device) + elif data_config['data']['name']=='ULTRASOUND': + model = train_dl(model, dataloader_dict, dataset_sizes, criterion, optimizer, exp_lr_scheduler, save_path, num_epochs=training_params['num_epochs'], bs=training_params['batch_size'], device=device) + elif data_config['data']['name']=='CHESTXDET': + model = train_dl(model, dataloader_dict, dataset_sizes, criterion, optimizer, exp_lr_scheduler, save_path, num_epochs=training_params['num_epochs'], bs=training_params['batch_size'], device=device) + elif data_config['data']['name']=='GLAS': + model = train_dl(model, dataset_dict, dataset_sizes, criterion, optimizer, exp_lr_scheduler, save_path, num_epochs=training_params['num_epochs'], bs=training_params['batch_size'], device=device) + elif data_config["data"]["name"] == "ArcadeDataset": + save_path = "./models" + data_config["data"]["root_path"].split("/")[-1] + model = train_dl( + model, + dataset_dict, + dataset_sizes, + criterion, + optimizer, + exp_lr_scheduler, + save_path, + save_dir=f"./{args.training_strategy}/{data_config['data']['root_path'].split('/')[-1]}", + num_epochs=training_params["num_epochs"], + bs=2, + device=device + ) + + +if __name__ == '__main__': + args = parse_args() + with open(args.data_config, 'r') as f: + data_config = yaml.load(f, Loader=yaml.FullLoader) + with open(args.model_config, 'r') as f: + model_config = yaml.load(f, Loader=yaml.FullLoader) + + # #for checking data_utils + # main_datautils(data_config, use_norm=False) + + # #for checking model + # main_model(model_config) + + # #for testing on the test dataset + # main_test(data_config, model_config, args.pretrained_path) + + # for training the model + main_train(data_config, model_config, args.pretrained_path, args.save_path, args.training_strategy, device=args.device) diff --git a/AllinonSAM/utils.py b/AllinonSAM/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ab5f703f7129473ceedd6e17b303d887313edc82 --- /dev/null +++ b/AllinonSAM/utils.py @@ -0,0 +1,355 @@ +import numpy as np +import torch +import torch.nn.functional as F +import argparse +import torch.nn as nn +from scipy.ndimage import distance_transform_edt +from skimage import morphology +from medpy.metric.binary import hd95 +import math +from torch.optim import Optimizer +from torch.optim.lr_scheduler import _LRScheduler +import warnings + +# boundary points and ACD functions (From BraTS) +def boundary_points(mask): + return np.argwhere(morphology.binary_erosion(mask) != mask) + +def average_closest_distance(prediction, ground_truth): + pred_boundary = boundary_points(prediction) + gt_boundary = boundary_points(ground_truth) + distances = [ + np.min(np.linalg.norm(pred - gt_boundary, axis=1)) for pred in pred_boundary + ] + acd = np.mean(distances) + return acd + +# Implementation for CosineAnnealing+warmup (Linear) LR +class CosineAnnealingWarmupScheduler(_LRScheduler): + """ + Implements Cosine Annealing with Warmup learning rate scheduler. + + Args: + optimizer (Optimizer): Wrapped optimizer + warmup_epochs (int): Number of epochs for warmup + total_epochs (int): Total number of training epochs + min_lr (float): Minimum learning rate after cosine annealing + warmup_start_lr (float): Initial learning rate for warmup + verbose (bool): If True, prints a message to stdout for each update + """ + def __init__( + self, + optimizer: Optimizer, + warmup_epochs: int, + total_epochs: int, + min_lr: float = 1e-6, + warmup_start_lr: float = 1e-6, + verbose: bool = False + ): + self.warmup_epochs = warmup_epochs + self.total_epochs = total_epochs + self.min_lr = min_lr + self.warmup_start_lr = warmup_start_lr + self.max_lrs = [group['lr'] for group in optimizer.param_groups] + + super().__init__(optimizer, verbose) + + def get_lr(self): + if not self._get_lr_called_within_step: + warnings.warn("To get the last learning rate computed by the scheduler, " + "please use `get_last_lr()`.", UserWarning) + + epoch = self.last_epoch + + # Warmup phase + if epoch < self.warmup_epochs: + return self._get_warmup_lr(epoch) + + # Cosine annealing phase + return self._get_cosine_lr(epoch) + + def _get_warmup_lr(self, epoch): + """Linear warmup""" + alpha = epoch / self.warmup_epochs + return [self.warmup_start_lr + alpha * (max_lr - self.warmup_start_lr) + for max_lr in self.max_lrs] + + def _get_cosine_lr(self, epoch): + """Cosine annealing after warmup""" + epoch = epoch - self.warmup_epochs + cosine_epochs = self.total_epochs - self.warmup_epochs + + alpha = epoch / cosine_epochs + cosine_factor = 0.5 * (1 + math.cos(math.pi * alpha)) + + return [self.min_lr + (max_lr - self.min_lr) * cosine_factor + for max_lr in self.max_lrs] + + +# My implementation for the HD95 Loss function from medpy +# https://loli.github.io/medpy/_modules/medpy/metric/binary.html +class HDLoss(nn.Module): + def __init__(self, threshold=0.5, max_hd95=14500): + super().__init__() + self.threshold = threshold + self.max_hd95 = max_hd95 + + def forward(self, preds: torch.Tensor, targets: torch.Tensor) -> torch.Tensor: + """ + Compute the 95th percentile of the Hausdorff Distance. + + Args: + preds: Predicted masks (B x H x W) + targets: Ground truth masks (B x H x W) + + Returns: + Mean normalized HD95 across the batch + """ + preds_binary = (preds > self.threshold).float() + targets_binary = (targets > self.threshold).float() + + hd95_values = torch.zeros(preds.size(0), device=preds.device) + + for i in range(preds.size(0)): + pred_np = preds_binary[i].cpu().numpy() + target_np = targets_binary[i].cpu().numpy() + + # Handle empty masks + if not np.any(pred_np) or not np.any(target_np): + hd95_values[i] = self.max_hd95 + continue + + try: + # medpy.metric.binary.hd95 computes symmetric HD95 + value = hd95(pred_np, target_np) + hd95_values[i] = torch.tensor(value, device=preds.device) + except Exception as e: + # Fallback to maximum distance in case of errors + hd95_values[i] = self.max_hd95 + + # Normalize to [0, 1] + return (hd95_values / self.max_hd95).mean() + +def boxcount(Z, k): + """ + returns a count of squares of size kxk in which there are both colours (black and white), ie. the sum of numbers + in those squares is not 0 or k^2 + Z: np.array, matrix to be checked, needs to be 2D + k: int, size of a square + """ + S = np.add.reduceat( + np.add.reduceat(Z, np.arange(0, Z.shape[0], k), axis=0), + np.arange(0, Z.shape[1], k), axis=1) # jumps by powers of 2 squares + + # We count non-empty (0) and non-full boxes (k*k) + return len(np.where((S > 0) & (S < k * k))[0]) + + +def fractal_dimension(Z, threshold=0.5): + """ + calculate fractal dimension of an object in an array defined to be above certain threshold as a count of squares + with both black and white pixels for a sequence of square sizes. The dimension is the a coefficient to a poly fit + to log(count) vs log(size) as defined in the sources. + :param Z: np.array, must be 2D + :param threshold: float, a thr to distinguish background from foreground and pick up the shape, originally from + (0, 1) for a scaled arr but can be any number, generates boolean array + :return: coefficients to the poly fit, fractal dimension of a shape in the given arr + """ + # Only for 2d image + assert (len(Z.shape) == 2) + + # Transform Z into a binary array + Z = (Z < threshold) + + # Minimal dimension of image + p = min(Z.shape) + + # Greatest power of 2 less than or equal to p + n = 2 ** np.floor(np.log(p) / np.log(2)) + + # Extract the exponent + n = int(np.log(n) / np.log(2)) + + # Build successive box sizes (from 2**n down to 2**1) + sizes = 2 ** np.arange(n, 1, -1) + + # Actual box counting with decreasing size + counts = [] + for size in sizes: + counts.append(boxcount(Z, size)) + + # Fit the successive log(sizes) with log (counts) + coeffs = np.polyfit(np.log(sizes), np.log(counts), 1) + return -coeffs[0] + + +def compute_hd95(pred, target, pixel_spacing=None): + """ + Compute the 95th percentile Hausdorff Distance between binary segmentation masks. + + Args: + pred (torch.Tensor): Predicted binary segmentation mask (B, H, W) + target (torch.Tensor): Ground truth binary segmentation mask (B, H, W) + pixel_spacing (tuple, optional): Pixel spacing in (y, x) format. Defaults to (1.0, 1.0) + + Returns: + torch.Tensor: 95th percentile Hausdorff Distance + """ + if pixel_spacing is None: + pixel_spacing = (1.0, 1.0) + + def compute_surface_distances(mask1, mask2, spacing): + """Compute surface distances between binary masks.""" + mask1 = mask1.cpu().numpy() + mask2 = mask2.cpu().numpy() + + # Convert to boolean arrays + mask1 = mask1 > 0.5 + mask2 = mask2 > 0.5 + + # Distance transforms + dist1 = distance_transform_edt(~mask1, sampling=spacing) + dist2 = distance_transform_edt(~mask2, sampling=spacing) + + # Get surface points + surface1 = np.logical_xor(mask1, morphology.binary_erosion(mask1)) + surface2 = np.logical_xor(mask2, morphology.binary_erosion(mask2)) + + # Get distances from surface points + distances1 = dist2[surface1] + distances2 = dist1[surface2] + + return distances1, distances2 + + def compute_hd95_single(pred_mask, target_mask, spacing): + """Compute HD95 for a single pair of masks.""" + distances1, distances2 = compute_surface_distances(pred_mask, target_mask, spacing) + + if len(distances1) == 0 and len(distances2) == 0: + return 0.0 # Both masks are empty + elif len(distances1) == 0 or len(distances2) == 0: + return np.inf # One mask is empty + + # Compute 95th percentile of distances + dist1_95 = np.percentile(distances1, 95) + dist2_95 = np.percentile(distances2, 95) + + return max(dist1_95, dist2_95) + + # Handle batch dimension + if len(pred.shape) == 4: # (B, C, H, W) + pred = pred.squeeze(1) + if len(target.shape) == 4: + target = target.squeeze(1) + + batch_size = pred.shape[0] + hd95_values = [] + + for i in range(batch_size): + hd95 = compute_hd95_single(pred[i], target[i], pixel_spacing) + hd95_values.append(hd95) + + return torch.tensor(np.mean(hd95_values)).to(pred.device) + +def dice_coef(y_true, y_pred, smooth=1): + # print(y_pred.shape, y_true.shape) + intersection = torch.sum(y_true * y_pred,axis=(-1,-2)) + union = torch.sum(y_true, axis=(-1,-2)) + torch.sum(y_pred, axis=(-1,-2)) + dice = ((2. * intersection + smooth)/(union + smooth)).mean() + # print(dice) + return dice + +def iou_coef(y_true, y_pred, smooth=1): + intersection = torch.sum(torch.abs(y_true * y_pred),axis=(-1,-2)) + union = torch.sum(y_true,axis=(-1,-2))+torch.sum(y_pred,axis=(-1,-2))-intersection + iou = ((intersection + smooth) / (union + smooth)).mean() + return iou + +def running_stats(y_true, y_pred, smooth = 1): + intersection = torch.sum(y_true * y_pred,axis=(-1,-2)) + union = torch.sum(y_true, axis=(-1,-2)) + torch.sum(y_pred, axis=(-1,-2)) + return intersection, union + +def dice_collated(running_intersection, running_union, smooth =1): + if len(running_intersection.size())>=2: + dice = (torch.mean((2. * running_intersection + smooth)/(running_union + smooth),dim=1)).sum() + else: + dice = ((2. * running_intersection + smooth)/(running_union + smooth)).sum() + return dice + +def dice_batchwise(running_intersection, running_union, smooth =1): + dice = ((2. * running_intersection + smooth)/(running_union + smooth)) + return dice + +def dice_loss(y_pred, y_true): + numerator = (2 * torch.sum(y_true * y_pred)) + denominator = torch.sum(y_true + y_pred) + + return 1 - ((numerator+1) / (denominator+1)) + +def weighted_ce_loss(y_pred, y_true, alpha=64, smooth=1): + weight1 = torch.sum(y_true==1,dim=(-1,-2))+smooth + weight0 = torch.sum(y_true==0, dim=(-1,-2))+smooth + multiplier_1 = weight0/(weight1*alpha) + multiplier_1 = multiplier_1.view(-1,1,1) + # print(multiplier_1.shape) + # print(y_pred.shape) + # print(y_true.shape) + + loss = -torch.mean(torch.mean((multiplier_1*y_true*torch.log(y_pred)) + (1-y_true)*(torch.log(1-y_pred)),dim=(-1,-2))) + return loss + +def focal_loss(y_pred, y_true, alpha_def=0.75, gamma=3): + # print('going back to the default value of alpha') + alpha = alpha_def + ce_loss = F.binary_cross_entropy_with_logits(y_pred, y_true, reduction="none") + assert (ce_loss>=0).all() + p_t = y_pred * y_true + (1 - y_pred) * (1 - y_true) + # 1/0 + loss = ce_loss * ((1 - p_t) ** gamma) + alpha_t = alpha * y_true + (1 - alpha) * (1 - y_true) + loss = alpha_t * loss + loss = torch.mean(loss, dim=(-1,-2)) + return loss.mean() + +def multiclass_focal_loss(y_pred, y_true, alpha = 0.75, gamma=3): + if len(y_pred.shape)==4: + y_pred = y_pred.squeeze() + ce = y_true*(-torch.log(y_pred)) + weight = y_true * ((1-y_pred)**gamma) + fl = torch.sum(alpha*weight*ce, dim=(-1,-2)) + return torch.mean(fl) + +def str2bool(v): + if v.lower() in ['true', 1]: + return True + elif v.lower() in ['false', 0]: + return False + else: + raise argparse.ArgumentTypeError('Boolean value expected.') + + +def count_params(model): + return sum(p.numel() for p in model.parameters() if p.requires_grad) + + +class AverageMeter(object): + """Computes and stores the average and current value""" + + def __init__(self): + self.reset() + + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + +class qkv_transform(nn.Conv1d): + """Conv1d for qkv_transform""" diff --git a/AllinonSAM/validation_images/labels/epoch_0_batch_0_img_0.png b/AllinonSAM/validation_images/labels/epoch_0_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..456941ccdcf4fa34b9a00a4cf56cea8659575b1a Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_0_batch_0_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_0_batch_0_img_1.png b/AllinonSAM/validation_images/labels/epoch_0_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..daec4abcaaf8533671dc6ae27cdb45f1d89ceb6c Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_0_batch_0_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_0_batch_1_img_0.png b/AllinonSAM/validation_images/labels/epoch_0_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..37fa2bb615711f1582afe38b055aa0efdc214835 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_0_batch_1_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_0_batch_1_img_1.png b/AllinonSAM/validation_images/labels/epoch_0_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..13b2c536d4ca146744af0adcf83d02c478e5b920 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_0_batch_1_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_0_batch_2_img_0.png b/AllinonSAM/validation_images/labels/epoch_0_batch_2_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..97e62df2ddbd2f2652fc5e164779107c83c4b7c6 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_0_batch_2_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_0_batch_2_img_1.png b/AllinonSAM/validation_images/labels/epoch_0_batch_2_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..9e492fd26890676b76508ab967e8390bdafef71e Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_0_batch_2_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_0_batch_3_img_0.png b/AllinonSAM/validation_images/labels/epoch_0_batch_3_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..c7aca8cedb1f0929b5fc86f1c67c9bca2302a5c3 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_0_batch_3_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_0_batch_3_img_1.png b/AllinonSAM/validation_images/labels/epoch_0_batch_3_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..7d729273535e8d6ce35f93852554841f5d64d0ec Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_0_batch_3_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_0_batch_4_img_0.png b/AllinonSAM/validation_images/labels/epoch_0_batch_4_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..ba4a97353cb2d94eae878a2bd07d3a09ee453fb9 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_0_batch_4_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_0_batch_4_img_1.png b/AllinonSAM/validation_images/labels/epoch_0_batch_4_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..c9b6e56d5e8a188714c180501b296f789b2ad2b1 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_0_batch_4_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_100_batch_0_img_0.png b/AllinonSAM/validation_images/labels/epoch_100_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..4fcf589f24ee6eca178179db89d73e4535962a2b Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_100_batch_0_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_100_batch_0_img_1.png b/AllinonSAM/validation_images/labels/epoch_100_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..5c4220f9dfbe371dc1e41d988607674a9d486347 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_100_batch_0_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_100_batch_1_img_0.png b/AllinonSAM/validation_images/labels/epoch_100_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..ecebe8daf3dcbf2a1d87c2b8cbdab5d7417fb7f0 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_100_batch_1_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_100_batch_1_img_1.png b/AllinonSAM/validation_images/labels/epoch_100_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..44b9a53add575644c41c0759fe5458d461fc627f Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_100_batch_1_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_100_batch_2_img_0.png b/AllinonSAM/validation_images/labels/epoch_100_batch_2_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..c82586448b8630db6e406805773a941f22012f7b Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_100_batch_2_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_100_batch_2_img_1.png b/AllinonSAM/validation_images/labels/epoch_100_batch_2_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..c105c38ce5d00258d328b772e459a7e459c383a6 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_100_batch_2_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_100_batch_3_img_0.png b/AllinonSAM/validation_images/labels/epoch_100_batch_3_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..ba62699414bcdcda631ecb54f6339828da546e84 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_100_batch_3_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_100_batch_3_img_1.png b/AllinonSAM/validation_images/labels/epoch_100_batch_3_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..8af2ceff747327552777a02f07bc945ff6fd7fa6 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_100_batch_3_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_100_batch_4_img_0.png b/AllinonSAM/validation_images/labels/epoch_100_batch_4_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..8c1e503605c624e50442c81c6257be032a04e89a Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_100_batch_4_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_100_batch_4_img_1.png b/AllinonSAM/validation_images/labels/epoch_100_batch_4_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..9e7f5bdc7adc7c737dad2136c929b580177304c7 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_100_batch_4_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_10_batch_0_img_0.png b/AllinonSAM/validation_images/labels/epoch_10_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..0fa90d842dfecf2dd0634c71f306bbba8313c0c9 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_10_batch_0_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_10_batch_0_img_1.png b/AllinonSAM/validation_images/labels/epoch_10_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..ee949e85236731e6f725835cd7208d7e3f78ef29 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_10_batch_0_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_10_batch_1_img_0.png b/AllinonSAM/validation_images/labels/epoch_10_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..529f219f1c2b021971a749321efd56b03ec2ba5e Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_10_batch_1_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_10_batch_1_img_1.png b/AllinonSAM/validation_images/labels/epoch_10_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..04ca611ac451971686bb2d75d923fc8da4458267 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_10_batch_1_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_10_batch_2_img_0.png b/AllinonSAM/validation_images/labels/epoch_10_batch_2_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..7eb78affd13985ec04794c2e662bfdb55ac69c73 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_10_batch_2_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_10_batch_2_img_1.png b/AllinonSAM/validation_images/labels/epoch_10_batch_2_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..3be8949d6e488e9819e19c73c1e4565fdb97d79f Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_10_batch_2_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_10_batch_3_img_0.png b/AllinonSAM/validation_images/labels/epoch_10_batch_3_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..1a9f7d04da67f8ac4e63d74968e58a5963d98cd9 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_10_batch_3_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_10_batch_3_img_1.png b/AllinonSAM/validation_images/labels/epoch_10_batch_3_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..acc8873d7aedc340bd339d576f34b9fdc04efdf9 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_10_batch_3_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_10_batch_4_img_0.png b/AllinonSAM/validation_images/labels/epoch_10_batch_4_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..9fb2cb23d74b29ffdb0eee586f84af6dc14c53c7 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_10_batch_4_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_10_batch_4_img_1.png b/AllinonSAM/validation_images/labels/epoch_10_batch_4_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..48480c05bba471bc775f83a484da3ba67c9c0480 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_10_batch_4_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_110_batch_0_img_0.png b/AllinonSAM/validation_images/labels/epoch_110_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..f9699a3071264db27d79b472e9e6ecaee09bbd8c Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_110_batch_0_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_110_batch_0_img_1.png b/AllinonSAM/validation_images/labels/epoch_110_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..b8af0d8377693e96c7b7d0013218995aa5aedc45 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_110_batch_0_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_110_batch_1_img_0.png b/AllinonSAM/validation_images/labels/epoch_110_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..dfc9e236b1889f03169acd5e1001b4ab4db68b4b Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_110_batch_1_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_110_batch_1_img_1.png b/AllinonSAM/validation_images/labels/epoch_110_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..e2ae2f7e5f6df76a07c3c2fca3c6ac7ea66fcdd7 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_110_batch_1_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_110_batch_2_img_0.png b/AllinonSAM/validation_images/labels/epoch_110_batch_2_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..7edc643e74c33cdf4fedcde277d6ef4526800071 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_110_batch_2_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_110_batch_2_img_1.png b/AllinonSAM/validation_images/labels/epoch_110_batch_2_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..1a89289196cd585d6a66cf4a89aef24ef9bc6111 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_110_batch_2_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_110_batch_3_img_0.png b/AllinonSAM/validation_images/labels/epoch_110_batch_3_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..34f30f88656d6cd608b5e33b15519d66bbd520ba Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_110_batch_3_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_110_batch_3_img_1.png b/AllinonSAM/validation_images/labels/epoch_110_batch_3_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..202a272d5a03f74c4e38c344bf2e9d88ee999c7a Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_110_batch_3_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_110_batch_4_img_0.png b/AllinonSAM/validation_images/labels/epoch_110_batch_4_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..cb8bcafe55b42b8db732a735d0752f8263da1078 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_110_batch_4_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_110_batch_4_img_1.png b/AllinonSAM/validation_images/labels/epoch_110_batch_4_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..609e27a9e7b01faf68e31a29e693f9ebf5f4e370 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_110_batch_4_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_120_batch_0_img_0.png b/AllinonSAM/validation_images/labels/epoch_120_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..c141ba6e3f06b21b2528aebb805576fd5240f27c Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_120_batch_0_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_120_batch_0_img_1.png b/AllinonSAM/validation_images/labels/epoch_120_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb57d96903dc0ef5292596a3cfbaf054e2c41819 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_120_batch_0_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_120_batch_1_img_0.png b/AllinonSAM/validation_images/labels/epoch_120_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..c45dd3af7e3bb670ac3a77bbe8c01b63dbd66eac Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_120_batch_1_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_120_batch_1_img_1.png b/AllinonSAM/validation_images/labels/epoch_120_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..fe7395096258083040849673579edb45600a0a66 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_120_batch_1_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_120_batch_2_img_0.png b/AllinonSAM/validation_images/labels/epoch_120_batch_2_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..d59dd80f5de3fe853a68abfc85e55b8657325bb9 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_120_batch_2_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_120_batch_2_img_1.png b/AllinonSAM/validation_images/labels/epoch_120_batch_2_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..c1bff9b5ab52f162d30f8afe7860f23392f516e7 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_120_batch_2_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_120_batch_3_img_0.png b/AllinonSAM/validation_images/labels/epoch_120_batch_3_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..bc38c78ceb6a374abd93f29e2d9c9b9e412ac5e0 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_120_batch_3_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_120_batch_3_img_1.png b/AllinonSAM/validation_images/labels/epoch_120_batch_3_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..f9725dcf272280077569fc20c45373d79f1d1567 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_120_batch_3_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_120_batch_4_img_0.png b/AllinonSAM/validation_images/labels/epoch_120_batch_4_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..bcffa8121a3358d8aac44110246fc14ae95cfd8f Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_120_batch_4_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_120_batch_4_img_1.png b/AllinonSAM/validation_images/labels/epoch_120_batch_4_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..e74d578e3de8b7175c3f58f220943e1c0c26d3ac Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_120_batch_4_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_130_batch_0_img_0.png b/AllinonSAM/validation_images/labels/epoch_130_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..bcffa8121a3358d8aac44110246fc14ae95cfd8f Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_130_batch_0_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_130_batch_0_img_1.png b/AllinonSAM/validation_images/labels/epoch_130_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..95d076c0e3e6dd403b096215c98bee17bc19f558 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_130_batch_0_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_130_batch_1_img_0.png b/AllinonSAM/validation_images/labels/epoch_130_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..7b029792c68b6aab3b67f3c6f297f542a4affc8b Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_130_batch_1_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_130_batch_1_img_1.png b/AllinonSAM/validation_images/labels/epoch_130_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..74a70beef661ceed8af56f015333bd2541a27947 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_130_batch_1_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_130_batch_2_img_0.png b/AllinonSAM/validation_images/labels/epoch_130_batch_2_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..36bc8fe587cd5f9d4df6152cbc0a2c59d990874d Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_130_batch_2_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_130_batch_2_img_1.png b/AllinonSAM/validation_images/labels/epoch_130_batch_2_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..5bc968a00cb5bc7fbdbe0fbbe28cb7bcb8e224cf Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_130_batch_2_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_130_batch_3_img_0.png b/AllinonSAM/validation_images/labels/epoch_130_batch_3_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..a535830f6ee2ae45dc35ee6cd5869f58cc6a3f0c Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_130_batch_3_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_130_batch_3_img_1.png b/AllinonSAM/validation_images/labels/epoch_130_batch_3_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..4571b4ea3c36d51abfb6abc389d87b428d073fc8 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_130_batch_3_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_130_batch_4_img_0.png b/AllinonSAM/validation_images/labels/epoch_130_batch_4_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..8af2ceff747327552777a02f07bc945ff6fd7fa6 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_130_batch_4_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_130_batch_4_img_1.png b/AllinonSAM/validation_images/labels/epoch_130_batch_4_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..ae92202330656c53437f40566c285c5f1acbf32b Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_130_batch_4_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_140_batch_0_img_0.png b/AllinonSAM/validation_images/labels/epoch_140_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..fe7d25eedae0e274707fcd72a981dd17e4b62775 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_140_batch_0_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_140_batch_0_img_1.png b/AllinonSAM/validation_images/labels/epoch_140_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..5ad4b3f226351aa126611baf2f429cabb56f2b5c Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_140_batch_0_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_140_batch_1_img_0.png b/AllinonSAM/validation_images/labels/epoch_140_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..2c72c3426983c5d12491a61c47132e4eced06d9e Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_140_batch_1_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_140_batch_1_img_1.png b/AllinonSAM/validation_images/labels/epoch_140_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..a535830f6ee2ae45dc35ee6cd5869f58cc6a3f0c Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_140_batch_1_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_140_batch_2_img_0.png b/AllinonSAM/validation_images/labels/epoch_140_batch_2_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..0b8af5418bb458a924e4ac74291da49b87de82ff Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_140_batch_2_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_140_batch_2_img_1.png b/AllinonSAM/validation_images/labels/epoch_140_batch_2_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..38a0cfc92a9b9dc4d042f4397a20315b1e7b599d Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_140_batch_2_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_140_batch_3_img_0.png b/AllinonSAM/validation_images/labels/epoch_140_batch_3_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..509f093909acd9ec050443a78b4b779abeb630cd Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_140_batch_3_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_140_batch_3_img_1.png b/AllinonSAM/validation_images/labels/epoch_140_batch_3_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..bb65dbc7c2ab23ee713aa66f078da351eb2afb59 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_140_batch_3_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_140_batch_4_img_0.png b/AllinonSAM/validation_images/labels/epoch_140_batch_4_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..bef002bab10fed8798491ebf8625201418a6262e Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_140_batch_4_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_140_batch_4_img_1.png b/AllinonSAM/validation_images/labels/epoch_140_batch_4_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..2ab5ff38e27f1a87290e49c2b68921c7c1a0d0ff Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_140_batch_4_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_150_batch_0_img_0.png b/AllinonSAM/validation_images/labels/epoch_150_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..0ad55796446ce821349554643617ba94efdccfb4 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_150_batch_0_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_150_batch_0_img_1.png b/AllinonSAM/validation_images/labels/epoch_150_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..9e0e2fa21cfd049bba3c917b9297612f908f6361 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_150_batch_0_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_150_batch_1_img_0.png b/AllinonSAM/validation_images/labels/epoch_150_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..3e7b7a8abe9b5bebda4caed9bfa6c5a4f0ccca4d Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_150_batch_1_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_150_batch_1_img_1.png b/AllinonSAM/validation_images/labels/epoch_150_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..ab45541f736c225340b5ac1dac64b180739412ce Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_150_batch_1_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_150_batch_2_img_0.png b/AllinonSAM/validation_images/labels/epoch_150_batch_2_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..9e0e0f9c22adbd8250c75f67d71fb064ffd1d8bc Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_150_batch_2_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_150_batch_2_img_1.png b/AllinonSAM/validation_images/labels/epoch_150_batch_2_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..60ea6767f428d9ee4de36a4c0ac5ef95fd08db60 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_150_batch_2_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_150_batch_3_img_0.png b/AllinonSAM/validation_images/labels/epoch_150_batch_3_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..688e57b9755eb6aeac995aab0fb9c1118e7b875b Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_150_batch_3_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_150_batch_3_img_1.png b/AllinonSAM/validation_images/labels/epoch_150_batch_3_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..d9b367f499d5b35e54d5c09bdd9752887d396d20 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_150_batch_3_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_150_batch_4_img_0.png b/AllinonSAM/validation_images/labels/epoch_150_batch_4_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..556059824d823b67c37bb2420cebee7558e54f92 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_150_batch_4_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_150_batch_4_img_1.png b/AllinonSAM/validation_images/labels/epoch_150_batch_4_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..89e151c0dfcc3a0b26e6957561bda46b8fc589e2 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_150_batch_4_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_160_batch_0_img_0.png b/AllinonSAM/validation_images/labels/epoch_160_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..fb57e80b6339376348bb81acc2bc13cd55fa46c0 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_160_batch_0_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_160_batch_0_img_1.png b/AllinonSAM/validation_images/labels/epoch_160_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..74a70beef661ceed8af56f015333bd2541a27947 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_160_batch_0_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_160_batch_1_img_0.png b/AllinonSAM/validation_images/labels/epoch_160_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..48281b50bd6b00d1eb64f8bac47e25d248c79dce Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_160_batch_1_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_160_batch_1_img_1.png b/AllinonSAM/validation_images/labels/epoch_160_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..19ea8d26a56631d48af0ca9e796a2ba06370473e Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_160_batch_1_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_160_batch_2_img_0.png b/AllinonSAM/validation_images/labels/epoch_160_batch_2_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..bd9551178c5de6c003e0a6759a8acc3fec85badb Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_160_batch_2_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_160_batch_2_img_1.png b/AllinonSAM/validation_images/labels/epoch_160_batch_2_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..3a5305e40447a85e42e982d281502812e2d5e74f Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_160_batch_2_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_160_batch_3_img_0.png b/AllinonSAM/validation_images/labels/epoch_160_batch_3_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..40629c98c74a1c8729d7d476026366a02e6af36f Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_160_batch_3_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_160_batch_3_img_1.png b/AllinonSAM/validation_images/labels/epoch_160_batch_3_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..e19df039971d88798faf23aaafb09f725a55ade1 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_160_batch_3_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_160_batch_4_img_0.png b/AllinonSAM/validation_images/labels/epoch_160_batch_4_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..4c84fd6d239e2c8429243230d5aa3269d41dc38e Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_160_batch_4_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_160_batch_4_img_1.png b/AllinonSAM/validation_images/labels/epoch_160_batch_4_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..395b11cc0d504eeee964658394af7e46d44b23c2 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_160_batch_4_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_170_batch_0_img_0.png b/AllinonSAM/validation_images/labels/epoch_170_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..fcc5a3ba565ef62c3103ef19cc73e04a2b995f4d Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_170_batch_0_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_170_batch_0_img_1.png b/AllinonSAM/validation_images/labels/epoch_170_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..5a9d152ff469bb13aa1145808b38ad39b1a0983c Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_170_batch_0_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_170_batch_1_img_0.png b/AllinonSAM/validation_images/labels/epoch_170_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..35e1c8cc86cdaacbea988aabc8e1f20bc522535b Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_170_batch_1_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_170_batch_1_img_1.png b/AllinonSAM/validation_images/labels/epoch_170_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..6f21dde78580c55da024651b63157b2f61855266 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_170_batch_1_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_170_batch_2_img_0.png b/AllinonSAM/validation_images/labels/epoch_170_batch_2_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..44b73d8fef407b408275859945dc89588615dae6 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_170_batch_2_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_170_batch_2_img_1.png b/AllinonSAM/validation_images/labels/epoch_170_batch_2_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..bf1c0a9d436e6e9b009f5a6b0d3ab394ac9e0fd8 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_170_batch_2_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_170_batch_3_img_0.png b/AllinonSAM/validation_images/labels/epoch_170_batch_3_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..8f35ecaa2e7e359b59e553461b0512fec88c239b Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_170_batch_3_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_170_batch_3_img_1.png b/AllinonSAM/validation_images/labels/epoch_170_batch_3_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..a8b75a35d3912174330810a81bd48120f52c14f9 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_170_batch_3_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_170_batch_4_img_0.png b/AllinonSAM/validation_images/labels/epoch_170_batch_4_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..e562c38c46a36ddd86f5d8ecf51535f65206f30a Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_170_batch_4_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_170_batch_4_img_1.png b/AllinonSAM/validation_images/labels/epoch_170_batch_4_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..a7eba964533497dff46948ed050cb28f1d60e550 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_170_batch_4_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_180_batch_0_img_0.png b/AllinonSAM/validation_images/labels/epoch_180_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..8286f1deb3cf019736401f4ecc9c2a069b562729 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_180_batch_0_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_180_batch_0_img_1.png b/AllinonSAM/validation_images/labels/epoch_180_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..44b9a53add575644c41c0759fe5458d461fc627f Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_180_batch_0_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_180_batch_1_img_0.png b/AllinonSAM/validation_images/labels/epoch_180_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..b4449181a5938cda1e080c459215a6874ae24ecc Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_180_batch_1_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_180_batch_1_img_1.png b/AllinonSAM/validation_images/labels/epoch_180_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..796fd09bfda3b8ca1676fc72f23fb6ed84cd8dde Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_180_batch_1_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_180_batch_2_img_0.png b/AllinonSAM/validation_images/labels/epoch_180_batch_2_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..7f56b3e812c5365bf90426502891a178f7e992e4 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_180_batch_2_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_180_batch_2_img_1.png b/AllinonSAM/validation_images/labels/epoch_180_batch_2_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..b533a113d0356906b0e968808b5fd5b132f83ab9 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_180_batch_2_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_180_batch_3_img_0.png b/AllinonSAM/validation_images/labels/epoch_180_batch_3_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..c7fd4ddb94e2c059e64ac049e2120663f589eb25 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_180_batch_3_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_180_batch_3_img_1.png b/AllinonSAM/validation_images/labels/epoch_180_batch_3_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..456941ccdcf4fa34b9a00a4cf56cea8659575b1a Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_180_batch_3_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_180_batch_4_img_0.png b/AllinonSAM/validation_images/labels/epoch_180_batch_4_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..35ffca3a4ba2a4302194b3ac3807fc659d9ff6a3 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_180_batch_4_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_180_batch_4_img_1.png b/AllinonSAM/validation_images/labels/epoch_180_batch_4_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..5ce912074ed0c515acd0ffea2d3e9c70f44055d7 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_180_batch_4_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_190_batch_0_img_0.png b/AllinonSAM/validation_images/labels/epoch_190_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..c82586448b8630db6e406805773a941f22012f7b Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_190_batch_0_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_190_batch_0_img_1.png b/AllinonSAM/validation_images/labels/epoch_190_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..43dd2eec6eb2ccec9ce6b4df8489c0638ba9cf43 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_190_batch_0_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_190_batch_1_img_0.png b/AllinonSAM/validation_images/labels/epoch_190_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..a228481bbae5b7951969b9b6fa3295c94bcd6ed3 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_190_batch_1_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_190_batch_1_img_1.png b/AllinonSAM/validation_images/labels/epoch_190_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..282e82b3c437dfd5a7f8bcc24ab21277a696fac7 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_190_batch_1_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_190_batch_2_img_0.png b/AllinonSAM/validation_images/labels/epoch_190_batch_2_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..e8deb4bff7ede1a6d4a1b3b252aec1c2d811241c Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_190_batch_2_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_190_batch_2_img_1.png b/AllinonSAM/validation_images/labels/epoch_190_batch_2_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..f9725dcf272280077569fc20c45373d79f1d1567 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_190_batch_2_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_190_batch_3_img_0.png b/AllinonSAM/validation_images/labels/epoch_190_batch_3_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..9a69bb8cf9f676fb43f07bfb68da220cd5073fe9 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_190_batch_3_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_190_batch_3_img_1.png b/AllinonSAM/validation_images/labels/epoch_190_batch_3_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..90d8184423bee114c4d47097a01cc99de836109f Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_190_batch_3_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_190_batch_4_img_0.png b/AllinonSAM/validation_images/labels/epoch_190_batch_4_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..1931bc0bd99ac44ec6e94439e6e8531e82819a8f Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_190_batch_4_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_190_batch_4_img_1.png b/AllinonSAM/validation_images/labels/epoch_190_batch_4_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..3b56c0ed5e581d6d2d2d3128e699e9c4da2346ac Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_190_batch_4_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_20_batch_0_img_0.png b/AllinonSAM/validation_images/labels/epoch_20_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..1979da369938263b07db67bc42031782721ad2ca Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_20_batch_0_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_20_batch_0_img_1.png b/AllinonSAM/validation_images/labels/epoch_20_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..48281b50bd6b00d1eb64f8bac47e25d248c79dce Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_20_batch_0_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_20_batch_1_img_0.png b/AllinonSAM/validation_images/labels/epoch_20_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..fba6e0dfe8bbe9c52eb1da7596d5884fd1a4903d Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_20_batch_1_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_20_batch_1_img_1.png b/AllinonSAM/validation_images/labels/epoch_20_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..df273fbd5df4ac393a8dbf06a98a2c95c11124dc Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_20_batch_1_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_20_batch_2_img_0.png b/AllinonSAM/validation_images/labels/epoch_20_batch_2_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..ac83cd68eeca9f60012327c24b80437e10cbf5a9 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_20_batch_2_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_20_batch_2_img_1.png b/AllinonSAM/validation_images/labels/epoch_20_batch_2_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..466121097ff0203be94dc740726ea4d494df55c1 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_20_batch_2_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_20_batch_3_img_0.png b/AllinonSAM/validation_images/labels/epoch_20_batch_3_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..37aa00e7516c882ea5722a45151bfe84bf89de0a Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_20_batch_3_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_20_batch_3_img_1.png b/AllinonSAM/validation_images/labels/epoch_20_batch_3_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..113d52004b64ad8650658dfa02691a604785b002 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_20_batch_3_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_20_batch_4_img_0.png b/AllinonSAM/validation_images/labels/epoch_20_batch_4_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..bd6eaca2c2df4636b47bb35063bf7962728d79ee Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_20_batch_4_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_20_batch_4_img_1.png b/AllinonSAM/validation_images/labels/epoch_20_batch_4_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..674159e66bb8d5bc94ec0b9a1961a0c6ff8abd98 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_20_batch_4_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_30_batch_0_img_0.png b/AllinonSAM/validation_images/labels/epoch_30_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..92596bef7627f8088611a25f72a08055621fd3b3 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_30_batch_0_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_30_batch_0_img_1.png b/AllinonSAM/validation_images/labels/epoch_30_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..f513bc7b868c6c3d48181e4a7c079d671684c53d Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_30_batch_0_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_30_batch_1_img_0.png b/AllinonSAM/validation_images/labels/epoch_30_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..a0400389f732a4ee280dc7521187ec60fd36a4ed Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_30_batch_1_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_30_batch_1_img_1.png b/AllinonSAM/validation_images/labels/epoch_30_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..f2af26b74865ba42e44cf8ed3ffcb156fad1c69b Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_30_batch_1_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_30_batch_2_img_0.png b/AllinonSAM/validation_images/labels/epoch_30_batch_2_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..f627d102dfa7b13993cd6c0fc396b19131a2b281 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_30_batch_2_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_30_batch_2_img_1.png b/AllinonSAM/validation_images/labels/epoch_30_batch_2_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..82905334904ac80d868ab6833a8721727fc7223d Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_30_batch_2_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_30_batch_3_img_0.png b/AllinonSAM/validation_images/labels/epoch_30_batch_3_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..8c1e503605c624e50442c81c6257be032a04e89a Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_30_batch_3_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_30_batch_3_img_1.png b/AllinonSAM/validation_images/labels/epoch_30_batch_3_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..fbf935882de9d98b72bdedea4558143bb11be131 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_30_batch_3_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_30_batch_4_img_0.png b/AllinonSAM/validation_images/labels/epoch_30_batch_4_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..9f87705cb1c1e99d44f58097e8828d8d7c207650 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_30_batch_4_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_30_batch_4_img_1.png b/AllinonSAM/validation_images/labels/epoch_30_batch_4_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..4acde2aed3c736c83aafdab42591d4d63a8ae486 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_30_batch_4_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_40_batch_0_img_0.png b/AllinonSAM/validation_images/labels/epoch_40_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..37f0cc98224a4b9d2f72c27ab3be45f006fdccdc Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_40_batch_0_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_40_batch_0_img_1.png b/AllinonSAM/validation_images/labels/epoch_40_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..cd6f8efbf2373bb5e380b40477ff3fa683c1e747 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_40_batch_0_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_40_batch_1_img_0.png b/AllinonSAM/validation_images/labels/epoch_40_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..4d155e63ba1d1f6b02766476559900f4203f5851 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_40_batch_1_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_40_batch_1_img_1.png b/AllinonSAM/validation_images/labels/epoch_40_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..3cc2a7df744d5e470ed85461ddf805933d8cfa2f Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_40_batch_1_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_40_batch_2_img_0.png b/AllinonSAM/validation_images/labels/epoch_40_batch_2_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..d08a1f3da8fc060599e0fc9d76de99d7012b9c2e Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_40_batch_2_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_40_batch_2_img_1.png b/AllinonSAM/validation_images/labels/epoch_40_batch_2_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..92bd5bc611c0965fe0aa0ff1048ef273a5d1d2a8 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_40_batch_2_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_40_batch_3_img_0.png b/AllinonSAM/validation_images/labels/epoch_40_batch_3_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..66781bfc608438ccd7b4c3051230b72fb3e88005 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_40_batch_3_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_40_batch_3_img_1.png b/AllinonSAM/validation_images/labels/epoch_40_batch_3_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..ada0aef28b12cc45e54d7b02c30eb7b03c6d1228 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_40_batch_3_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_40_batch_4_img_0.png b/AllinonSAM/validation_images/labels/epoch_40_batch_4_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..d949e46a71cb78f6b731507b169b0964177fd706 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_40_batch_4_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_40_batch_4_img_1.png b/AllinonSAM/validation_images/labels/epoch_40_batch_4_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..007964fc17cd29d1b741874193476e38891fe44e Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_40_batch_4_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_50_batch_0_img_0.png b/AllinonSAM/validation_images/labels/epoch_50_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..54bf03a64d18b445bb15f18a9f28f1c7ab896ae9 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_50_batch_0_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_50_batch_0_img_1.png b/AllinonSAM/validation_images/labels/epoch_50_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..af5a4ecedf6e11d2fa36cab8f407fef6572aef66 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_50_batch_0_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_50_batch_1_img_0.png b/AllinonSAM/validation_images/labels/epoch_50_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..c6864c09dec60b2a7cb97eb09aff31b93df9a65b Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_50_batch_1_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_50_batch_1_img_1.png b/AllinonSAM/validation_images/labels/epoch_50_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..937fa45f424295c3a527347c9aa55ca9cff46b97 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_50_batch_1_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_50_batch_2_img_0.png b/AllinonSAM/validation_images/labels/epoch_50_batch_2_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..c1bff9b5ab52f162d30f8afe7860f23392f516e7 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_50_batch_2_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_50_batch_2_img_1.png b/AllinonSAM/validation_images/labels/epoch_50_batch_2_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..5b06cda9844dddec8d0a0a40dbc2a0d193f7ad57 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_50_batch_2_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_50_batch_3_img_0.png b/AllinonSAM/validation_images/labels/epoch_50_batch_3_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..2585f108080cf2ceeea03e17de88e3e934011ed7 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_50_batch_3_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_50_batch_3_img_1.png b/AllinonSAM/validation_images/labels/epoch_50_batch_3_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..2ebacc9214a51e6167ae247de451362cbbd18169 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_50_batch_3_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_50_batch_4_img_0.png b/AllinonSAM/validation_images/labels/epoch_50_batch_4_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..aa9f210e4b4844c949f4ab965494bd0af202f89c Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_50_batch_4_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_50_batch_4_img_1.png b/AllinonSAM/validation_images/labels/epoch_50_batch_4_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..e00d00fe30303546ef8ed144520ed07dcc9e9fb4 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_50_batch_4_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_60_batch_0_img_0.png b/AllinonSAM/validation_images/labels/epoch_60_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..bb66814157655111c665359b97727372190550e6 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_60_batch_0_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_60_batch_0_img_1.png b/AllinonSAM/validation_images/labels/epoch_60_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..64a91b1baac616ffad67967b47a6234b4f9c8e74 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_60_batch_0_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_60_batch_1_img_0.png b/AllinonSAM/validation_images/labels/epoch_60_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..4c4476381f1ea43cea66695e7715764782277627 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_60_batch_1_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_60_batch_1_img_1.png b/AllinonSAM/validation_images/labels/epoch_60_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..4c84fd6d239e2c8429243230d5aa3269d41dc38e Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_60_batch_1_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_60_batch_2_img_0.png b/AllinonSAM/validation_images/labels/epoch_60_batch_2_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..ada0aef28b12cc45e54d7b02c30eb7b03c6d1228 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_60_batch_2_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_60_batch_2_img_1.png b/AllinonSAM/validation_images/labels/epoch_60_batch_2_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..1db94975ebbf9cee22025fb093bc7d803491cdd9 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_60_batch_2_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_60_batch_3_img_0.png b/AllinonSAM/validation_images/labels/epoch_60_batch_3_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..c105c38ce5d00258d328b772e459a7e459c383a6 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_60_batch_3_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_60_batch_3_img_1.png b/AllinonSAM/validation_images/labels/epoch_60_batch_3_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..35e1c8cc86cdaacbea988aabc8e1f20bc522535b Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_60_batch_3_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_60_batch_4_img_0.png b/AllinonSAM/validation_images/labels/epoch_60_batch_4_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..57b0db560ee69575bc05ff2a97cc7a4c4cee47c3 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_60_batch_4_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_60_batch_4_img_1.png b/AllinonSAM/validation_images/labels/epoch_60_batch_4_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..b4ec8f14c8ba4fe63ec57f3ed425fc4a51672db0 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_60_batch_4_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_70_batch_0_img_0.png b/AllinonSAM/validation_images/labels/epoch_70_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..f65bac9d714b33c3c6e84410da21ccacc31e607f Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_70_batch_0_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_70_batch_0_img_1.png b/AllinonSAM/validation_images/labels/epoch_70_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..dcac53ab1e492f50bcd7f1f568dbba6cbce32d9d Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_70_batch_0_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_70_batch_1_img_0.png b/AllinonSAM/validation_images/labels/epoch_70_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..7102448c826a648d1a40eaab8cf3d515ab21efe6 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_70_batch_1_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_70_batch_1_img_1.png b/AllinonSAM/validation_images/labels/epoch_70_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..f2af26b74865ba42e44cf8ed3ffcb156fad1c69b Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_70_batch_1_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_70_batch_2_img_0.png b/AllinonSAM/validation_images/labels/epoch_70_batch_2_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..dd99d4a816f567595d0f8408954ac027a9219ea9 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_70_batch_2_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_70_batch_2_img_1.png b/AllinonSAM/validation_images/labels/epoch_70_batch_2_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..efd62dc5474742428ac86955b106e3faa809bb33 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_70_batch_2_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_70_batch_3_img_0.png b/AllinonSAM/validation_images/labels/epoch_70_batch_3_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..75ba42a900f98858a06dea4ca840644499dd2a0f Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_70_batch_3_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_70_batch_3_img_1.png b/AllinonSAM/validation_images/labels/epoch_70_batch_3_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..a535830f6ee2ae45dc35ee6cd5869f58cc6a3f0c Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_70_batch_3_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_70_batch_4_img_0.png b/AllinonSAM/validation_images/labels/epoch_70_batch_4_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..b86e4444640db4167d674c12eedc46b75f7aa4c8 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_70_batch_4_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_70_batch_4_img_1.png b/AllinonSAM/validation_images/labels/epoch_70_batch_4_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..2611ed7ef1621fc3a7a58102be799cc74aaf585b Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_70_batch_4_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_80_batch_0_img_0.png b/AllinonSAM/validation_images/labels/epoch_80_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..7b60db665b4758be5e7242245daf4e99632939ed Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_80_batch_0_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_80_batch_0_img_1.png b/AllinonSAM/validation_images/labels/epoch_80_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..df4e49a6fa89079363564b571249f690c9285ef1 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_80_batch_0_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_80_batch_1_img_0.png b/AllinonSAM/validation_images/labels/epoch_80_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..45c9de5c6451ebcfe8d328fff0043c2cee4e843d Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_80_batch_1_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_80_batch_1_img_1.png b/AllinonSAM/validation_images/labels/epoch_80_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..432012b209d331308d59117996092dea35fdb2f5 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_80_batch_1_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_80_batch_2_img_0.png b/AllinonSAM/validation_images/labels/epoch_80_batch_2_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..64c83b317683dcd292ad8793f43e50ea005ccfca Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_80_batch_2_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_80_batch_2_img_1.png b/AllinonSAM/validation_images/labels/epoch_80_batch_2_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..9f8eba88fc2ea806c28300789a6565cbd68b682d Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_80_batch_2_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_80_batch_3_img_0.png b/AllinonSAM/validation_images/labels/epoch_80_batch_3_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..9cc3d45b3681f1c289935dd0edfdf7ce73741ecf Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_80_batch_3_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_80_batch_3_img_1.png b/AllinonSAM/validation_images/labels/epoch_80_batch_3_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..72ee19adb698e1f9a8c9fe1e77aa832991e6a898 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_80_batch_3_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_80_batch_4_img_0.png b/AllinonSAM/validation_images/labels/epoch_80_batch_4_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..dbe34b4c2df6fbc2a0d75f9c08a84b94a81b3192 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_80_batch_4_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_80_batch_4_img_1.png b/AllinonSAM/validation_images/labels/epoch_80_batch_4_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..7f8637da4d7e4d8d18c3d549546a63bd7436b57b Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_80_batch_4_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_90_batch_0_img_0.png b/AllinonSAM/validation_images/labels/epoch_90_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..6f35230e5afadd740ce22739d5d026e3445a519a Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_90_batch_0_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_90_batch_0_img_1.png b/AllinonSAM/validation_images/labels/epoch_90_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..f37642a85c48b4b3c5d25dfd2f389cd82d108f79 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_90_batch_0_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_90_batch_1_img_0.png b/AllinonSAM/validation_images/labels/epoch_90_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..556a6b879dc4efc86a67351487e437925135325a Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_90_batch_1_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_90_batch_1_img_1.png b/AllinonSAM/validation_images/labels/epoch_90_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..d2dbdc5c6e105546d385bb30e78fa32f0e82d2ef Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_90_batch_1_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_90_batch_2_img_0.png b/AllinonSAM/validation_images/labels/epoch_90_batch_2_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..e2d76411d9770342ced97752c6efce3609e74ff4 Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_90_batch_2_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_90_batch_2_img_1.png b/AllinonSAM/validation_images/labels/epoch_90_batch_2_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..bfd9a06a855bcbf1bb415f6dd30b146cb1b85d1e Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_90_batch_2_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_90_batch_3_img_0.png b/AllinonSAM/validation_images/labels/epoch_90_batch_3_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..68ea14c68e573e2068776f53d117f5fa338fb35f Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_90_batch_3_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_90_batch_3_img_1.png b/AllinonSAM/validation_images/labels/epoch_90_batch_3_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..fce7559d8ed797d7b7dbe22bbca800b7eee243be Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_90_batch_3_img_1.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_90_batch_4_img_0.png b/AllinonSAM/validation_images/labels/epoch_90_batch_4_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..be5c6f7b96775249521883fd79afdb866244364c Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_90_batch_4_img_0.png differ diff --git a/AllinonSAM/validation_images/labels/epoch_90_batch_4_img_1.png b/AllinonSAM/validation_images/labels/epoch_90_batch_4_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..535e38060c47183f4206f65609de16c192cd875b Binary files /dev/null and b/AllinonSAM/validation_images/labels/epoch_90_batch_4_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_0_batch_0_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_0_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..1e2e0215f98db83b2057d97aefb238a2efdff953 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_0_batch_0_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_0_batch_0_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_0_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..f42a07508fdaeb157fdd7ee64642a147d1a649ae Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_0_batch_0_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_0_batch_1_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_0_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..0c2d239b8b8bf35244b96d1caaa9c990c8679279 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_0_batch_1_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_0_batch_1_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_0_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..3825d575027c0d863de1ea57d15b16f1833ad2b2 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_0_batch_1_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_0_batch_2_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_0_batch_2_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..383efcb81f20ecd6901f226ebbe148c943b880a1 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_0_batch_2_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_0_batch_2_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_0_batch_2_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..7389d76bfa51362edd0ee7946f910ba74c55095f Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_0_batch_2_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_0_batch_3_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_0_batch_3_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..426512a93585264463591f261e9791238eca5e7b Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_0_batch_3_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_0_batch_3_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_0_batch_3_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..5ead3bbe5168e2bca29fc84463e954f4a89d85b1 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_0_batch_3_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_0_batch_4_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_0_batch_4_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..adb141db63e013a2866dea23dd2970233c9022bb Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_0_batch_4_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_0_batch_4_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_0_batch_4_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..f73dca1f9a56e8ea0d1d751a6db6f1fed428ed49 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_0_batch_4_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_100_batch_0_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_100_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..8cdce250bcfc6c91daab86f2adfc6f70dfce0d3d Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_100_batch_0_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_100_batch_0_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_100_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..4c0e2c6f877f15a8ed823d0672d2315f348bc2d8 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_100_batch_0_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_100_batch_1_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_100_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..6cfc951bf8b48506bccf8dd9346a171635cedf03 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_100_batch_1_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_100_batch_1_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_100_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..c0d49228966189f43da7eda3c27943495c458f7b Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_100_batch_1_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_100_batch_2_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_100_batch_2_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..a22059b58e158571cf61c33338a04517cdf549b6 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_100_batch_2_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_100_batch_2_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_100_batch_2_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..a196de4cbd331087ba597a8622c9b7d0780cc658 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_100_batch_2_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_100_batch_3_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_100_batch_3_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..e9a685a368febf418cb42c38837fb945bbc95ecb Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_100_batch_3_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_100_batch_3_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_100_batch_3_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..7c775835ad453ef978efc5f36ab0cb296b0267a1 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_100_batch_3_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_100_batch_4_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_100_batch_4_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..d40f78073153b06d8fba59887a1da543f8a32787 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_100_batch_4_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_100_batch_4_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_100_batch_4_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..dfa4807188743f1f026191ea5225c7db425cba4c Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_100_batch_4_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_10_batch_0_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_10_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..a0350586b1300eaba60153d4c9787c4a737d78ce Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_10_batch_0_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_10_batch_0_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_10_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..a8dba5674cc4f232ae897e6ca922fcc73ce773dd Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_10_batch_0_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_10_batch_1_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_10_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..0c27a0d7052c78d83b00ed4130d11c23d252eb38 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_10_batch_1_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_10_batch_1_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_10_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..ddc0e42cf538868eee92d96016fe005912f3e2e1 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_10_batch_1_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_10_batch_2_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_10_batch_2_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..633c057f544caf64788327485252c4b03335fdc3 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_10_batch_2_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_10_batch_2_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_10_batch_2_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..1fb25971dca71c7fc493967002f13225c7bea00a Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_10_batch_2_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_10_batch_3_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_10_batch_3_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..a4c1683ed303b7fa448baadabfe67c3c220e2293 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_10_batch_3_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_10_batch_3_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_10_batch_3_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..501ac1eef67df596727536ad5c7061c88c3f7165 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_10_batch_3_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_10_batch_4_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_10_batch_4_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..fbd1847c2e2d2afe7ff6fdafe6f43c02f9133d4f Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_10_batch_4_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_10_batch_4_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_10_batch_4_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..505fdf93c2b7eeccf52bd1af5492b07c19289f2c Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_10_batch_4_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_110_batch_0_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_110_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..4a9615c0b894c4e151b5f882896b80d72d2f25df Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_110_batch_0_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_110_batch_0_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_110_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..6057078bbba0e9a8315a24b0bbcf8fd90fdfcf15 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_110_batch_0_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_110_batch_1_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_110_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..2ee4365b379b8163ee7548965e86a49ee93a833b Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_110_batch_1_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_110_batch_1_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_110_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..a34291081395e23bfed34d679d5fea0ab73e9446 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_110_batch_1_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_110_batch_2_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_110_batch_2_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..fa4e5007dd6e4ea122c2cd7abac23bfe56c02734 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_110_batch_2_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_110_batch_2_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_110_batch_2_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..723fce1277ea3a2996b84304e0d4eb50825f10e4 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_110_batch_2_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_110_batch_3_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_110_batch_3_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..9437240ebb53625c3d3d238ea592db6a04bf3763 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_110_batch_3_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_110_batch_3_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_110_batch_3_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..fdd18135a39717735efd48623f5dae783af11d42 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_110_batch_3_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_110_batch_4_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_110_batch_4_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..6a85dd14424f592013f5a8a0ded2fed233b0d076 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_110_batch_4_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_110_batch_4_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_110_batch_4_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..7012639ad04c5ef22bead7a9acd5d4ac051605e0 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_110_batch_4_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_120_batch_0_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_120_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..36725d802e5fb048ab651e65ce6af089c4c78500 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_120_batch_0_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_120_batch_0_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_120_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..a7a30bf2cb03f28f96f2f1e6772d1747bd0689ed Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_120_batch_0_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_120_batch_1_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_120_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..ea2d06f06bd5154c6fa87b7e5b2b5df219073823 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_120_batch_1_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_120_batch_1_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_120_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..bbb03b85afa1cffa8f0a921c339f4d0f6c74241b Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_120_batch_1_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_120_batch_2_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_120_batch_2_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..7b48c47f922b04150527627ced9c72333cc9399b Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_120_batch_2_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_120_batch_2_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_120_batch_2_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..10c53e9374092185982d2ad077671fd74ba73a1b Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_120_batch_2_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_120_batch_3_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_120_batch_3_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..1450a9d44bce8d61a66bb765407d1b875a1365f6 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_120_batch_3_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_120_batch_3_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_120_batch_3_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..7f5a25b4fc3454e848c042c6c8667ae1ee6eaa7a Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_120_batch_3_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_120_batch_4_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_120_batch_4_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..e0169eb5b7203f82803a4434ef1c72b74ed1d5e0 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_120_batch_4_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_120_batch_4_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_120_batch_4_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..9e1493a2855a9558752a2081ce0943200ad78c6a Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_120_batch_4_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_130_batch_0_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_130_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..1ce9fb662563d8e8cf36e2406a901f81304ad1b8 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_130_batch_0_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_130_batch_0_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_130_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..d89b369e030fa7eea7f3fe352d631786d3e9ed09 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_130_batch_0_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_130_batch_1_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_130_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..fa6bb2aca9712cd47db50f47db505f2faea371f4 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_130_batch_1_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_130_batch_1_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_130_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..39a33b9352291dea71db20b461f09679e3092596 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_130_batch_1_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_130_batch_2_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_130_batch_2_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..6d62fa434d3a3cb07d4594bb5cf679dbc9e96ed3 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_130_batch_2_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_130_batch_2_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_130_batch_2_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..e34b543d45c624fc97e156a0fe98b3893d8c5bc9 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_130_batch_2_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_130_batch_3_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_130_batch_3_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..021cfb56c753f539d26ffaef3dba4cfcd47d099d Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_130_batch_3_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_130_batch_3_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_130_batch_3_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..a800817562cf53647f765c5330278c4bd6840739 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_130_batch_3_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_130_batch_4_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_130_batch_4_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..82a3d29d119c35acaec6eb8efab166d1cf5edd97 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_130_batch_4_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_130_batch_4_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_130_batch_4_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..965cdd4d17c7578afcd4dfb29e34b99bed49ceb4 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_130_batch_4_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_140_batch_0_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_140_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..8620b9232058fd7d3b47a46acc2c0d76694c5cee Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_140_batch_0_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_140_batch_0_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_140_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..b2b69a779961afdc767c1df26b5f31d0140d05f5 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_140_batch_0_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_140_batch_1_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_140_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..874a6d88ed8199edee4af95bdc9d3a59a6ce96f0 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_140_batch_1_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_140_batch_1_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_140_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..3d303b552c1c5c87d95be6db5f1195217aad8049 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_140_batch_1_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_140_batch_2_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_140_batch_2_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..523b246799d346ed14f6ca242d1fe430f8a11dfb Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_140_batch_2_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_140_batch_2_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_140_batch_2_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..f502ac46eea82e302c4b751ea9496e76bf980235 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_140_batch_2_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_140_batch_3_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_140_batch_3_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..8555aaebc14075938057b6012e83bf0d6ffa0320 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_140_batch_3_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_140_batch_3_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_140_batch_3_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..2217fb1ef3dbbb0979757cab2968294290eca380 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_140_batch_3_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_140_batch_4_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_140_batch_4_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..2af93161007e4062ae4ac98314afbbf2c36c4678 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_140_batch_4_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_140_batch_4_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_140_batch_4_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..fdc1cc6dee67b7b9bcf0880f65f2f1e44f759958 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_140_batch_4_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_150_batch_0_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_150_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..17ef9bb8dba2e9d57e5945bea7c9d62922f87e60 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_150_batch_0_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_150_batch_0_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_150_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..cad45e337366334de97994a99242e1f126a3c2cd Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_150_batch_0_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_150_batch_1_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_150_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..f85123ba6c33dd29f2283cec86daf7d6e72a5cde Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_150_batch_1_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_150_batch_1_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_150_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..34a106b69e3c5a979edd638beaf72acf7359c605 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_150_batch_1_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_150_batch_2_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_150_batch_2_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..698280952e4e5dcb58eb4695634f6e6f42cf68a0 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_150_batch_2_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_150_batch_2_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_150_batch_2_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..e7bfff2c28ba4b163ebe3ab881c0b51ff17f2da6 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_150_batch_2_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_150_batch_3_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_150_batch_3_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..4ec6506412691088c6d119abd38edf87956baf03 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_150_batch_3_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_150_batch_3_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_150_batch_3_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..3a31e94ac30a6355a0a35df6d78f13b79865a23c Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_150_batch_3_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_150_batch_4_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_150_batch_4_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..ac2d2916c96074630defc324635e6fbf5b98d6a0 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_150_batch_4_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_150_batch_4_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_150_batch_4_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..afcd65d9d273c63d0946ae67150bfd6e7f6f86ea Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_150_batch_4_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_160_batch_0_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_160_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..49ee9c0adb62b95550f894b75c62a266b4712130 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_160_batch_0_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_160_batch_0_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_160_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..c286f63305ab4a8e112221b35396667a9638abf0 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_160_batch_0_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_160_batch_1_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_160_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..91ad77bdc3d2c6c1d05fc6af6df474589e28092f Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_160_batch_1_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_160_batch_1_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_160_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..c756292344bdf0fcf0ab2ffa3924ac1111f02e1b Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_160_batch_1_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_160_batch_2_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_160_batch_2_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..ca63edf509d654ad2d939cf394100a7f264b5fa4 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_160_batch_2_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_160_batch_2_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_160_batch_2_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..202790bcbe1b7c6c57995ffcdbfda2e57166791a Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_160_batch_2_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_160_batch_3_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_160_batch_3_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..426003a6b9cbb5478b70b9d1d972c69c5065d70a Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_160_batch_3_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_160_batch_3_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_160_batch_3_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..90d348181cde1a13dbc696a310f7e875c85c5965 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_160_batch_3_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_160_batch_4_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_160_batch_4_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..78840c0769760617252034953747276a2a0306c4 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_160_batch_4_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_160_batch_4_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_160_batch_4_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..d6d785b74790afa15194f1a0d9b15ff8df38f627 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_160_batch_4_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_170_batch_0_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_170_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..2e65deb1d4ea7e57573e679bc4b9fcbc8db1ec16 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_170_batch_0_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_170_batch_0_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_170_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..29e303128abd7c1eefd02e7da2f98591ac78e469 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_170_batch_0_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_170_batch_1_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_170_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..adc4d2cf926d72afd6a8a315c9a123f07092f7dc Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_170_batch_1_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_170_batch_1_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_170_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..94e62d7d173198ea4a2c74843f67ab3e27e7429d Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_170_batch_1_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_170_batch_2_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_170_batch_2_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..92f0e89f897b4ae8eb1fe82f48302ff3558fa790 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_170_batch_2_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_170_batch_2_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_170_batch_2_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..7ee23e77c30e3ac9fcee1c5f7ae953a68c220a12 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_170_batch_2_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_170_batch_3_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_170_batch_3_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..9f494b65dfc03c2ee533f4b313a5373650f2722d Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_170_batch_3_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_170_batch_3_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_170_batch_3_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..a8899b17bf50d85a12790303be127996bf2f4f5a Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_170_batch_3_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_170_batch_4_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_170_batch_4_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..109392a4031c47230b3b400f9d855aac8a7b2c9b Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_170_batch_4_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_170_batch_4_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_170_batch_4_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..68418870d24832676dc5db4571f2c07e94914e9e Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_170_batch_4_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_180_batch_0_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_180_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..36adcce14c51ebd3c7e9fe22054d7217574ffcec Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_180_batch_0_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_180_batch_0_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_180_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..449083a987e2ecf66081ddd0399e979c4701f0c0 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_180_batch_0_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_180_batch_1_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_180_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..1d15e91d1d4a2360e8bebf486b0e142a2d63677f Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_180_batch_1_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_180_batch_1_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_180_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..2af98e8bc38b33a30ea15fe435046506e6afb18e Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_180_batch_1_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_180_batch_2_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_180_batch_2_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..a7568fdc3f0eb2971bd425e28442ce71b6c475f5 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_180_batch_2_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_180_batch_2_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_180_batch_2_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..403f23533c9d2626adfe9c3799b00bacbd8b17b2 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_180_batch_2_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_180_batch_3_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_180_batch_3_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..111bb4588a8beb15f394f560575785124a79a40e Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_180_batch_3_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_180_batch_3_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_180_batch_3_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..9ec6ec9513e16741e0c1205b5e022e10a0fa0174 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_180_batch_3_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_180_batch_4_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_180_batch_4_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..80caae0f9fb7627b7518f56534848c93b76c7269 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_180_batch_4_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_180_batch_4_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_180_batch_4_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..111c2934c06d415596cf7c1ca096bd6f3767394c Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_180_batch_4_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_190_batch_0_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_190_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..83d466c33434bf7e8f688108d7dbfd5ac3cc4b17 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_190_batch_0_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_190_batch_0_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_190_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..5d83833556d307fadc555850330aa078389adaff Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_190_batch_0_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_190_batch_1_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_190_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..fb2df68804de7ba3def9b25a15d40ccdeaccdbb3 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_190_batch_1_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_190_batch_1_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_190_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..51660f778446282688ec4eb074e59b64191a6fc2 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_190_batch_1_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_190_batch_2_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_190_batch_2_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..00e0adab54b203808cb17bbb7b4ba8f51943760f Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_190_batch_2_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_190_batch_2_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_190_batch_2_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..89aa23f81697e00b08ca8fa9e093dd5567557fdd Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_190_batch_2_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_190_batch_3_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_190_batch_3_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..91ff0c190fe0d775de7b0312ee26379ed4e93507 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_190_batch_3_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_190_batch_3_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_190_batch_3_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..5f79e40d45f8435609bb4b99a4b09204a22e2bc4 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_190_batch_3_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_190_batch_4_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_190_batch_4_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..adf0343d34cbb97d74a740c63a8e8bb1f95a6814 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_190_batch_4_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_190_batch_4_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_190_batch_4_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..563581388446eab0cabab91fdf0d4d7f2966ebac Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_190_batch_4_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_20_batch_0_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_20_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..014e7b8cac193bdc6a809fea39046c19b591f29f Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_20_batch_0_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_20_batch_0_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_20_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..d6e33bc267fe1516ffe9fea8f0aeb5a32ed175a8 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_20_batch_0_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_20_batch_1_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_20_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..d5c703f26f82a78930d514c65ec3f6de4d97328f Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_20_batch_1_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_20_batch_1_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_20_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..90166a466b2e692de765fa6ee8d01e58c70520cf Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_20_batch_1_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_20_batch_2_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_20_batch_2_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..e20b93f65eaeb8718d276ec2e3f076c31f58d617 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_20_batch_2_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_20_batch_2_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_20_batch_2_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..07ae92021934c56e2c7c3196fdb4e79faf4c7cd0 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_20_batch_2_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_20_batch_3_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_20_batch_3_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..08a1257ab363769e8bc816ff902370e0c8b4b8bf Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_20_batch_3_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_20_batch_3_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_20_batch_3_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..5018f72b0cac0c37e2fbf9e799d9ce272c633901 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_20_batch_3_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_20_batch_4_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_20_batch_4_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..906d6d4b9e173e11db23fe785e648a9fc7f49b46 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_20_batch_4_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_20_batch_4_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_20_batch_4_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..5d460eea38e61a7f427c78ba283511f02d30be97 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_20_batch_4_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_30_batch_0_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_30_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..3706a4a6d3de9346896e80b79f6f6ac4b4da159d Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_30_batch_0_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_30_batch_0_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_30_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..72ee7ecef8a854ea468f7cd69f0584548c737407 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_30_batch_0_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_30_batch_1_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_30_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..9af734f741f5e3f69025655456963cf908a75291 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_30_batch_1_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_30_batch_1_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_30_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..51669e44f8fc10e84177999b4509e066b2099a66 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_30_batch_1_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_30_batch_2_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_30_batch_2_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..5db762a33c3fb0ed638bf4ec03f82f9d3e02c5ba Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_30_batch_2_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_30_batch_2_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_30_batch_2_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..111f4b007a1b552242c2f3184c34570c02ef8d46 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_30_batch_2_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_30_batch_3_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_30_batch_3_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..0e515a4cb862ac652e970296df0ca8b3cad42d81 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_30_batch_3_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_30_batch_3_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_30_batch_3_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..8579193c14334bf6318f93c750bf68464afadc0f Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_30_batch_3_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_30_batch_4_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_30_batch_4_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..006e7cc329da8ec5e15072de412d860106cd0235 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_30_batch_4_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_30_batch_4_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_30_batch_4_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..d6759ead6571fe2ba9d6da074d61da1607911ce8 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_30_batch_4_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_40_batch_0_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_40_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..89b4765602cf45f33b260ccf814bd7fd997f250c Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_40_batch_0_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_40_batch_0_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_40_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..88dd576424e8dc3c525ffdb43f9b26c0dc6b3754 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_40_batch_0_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_40_batch_1_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_40_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..6093735309ec22abb31cc5ece7bbb2896c45e124 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_40_batch_1_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_40_batch_1_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_40_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..e411c8043025f163828eb8e4dcd4abee1778470b Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_40_batch_1_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_40_batch_2_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_40_batch_2_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..7066812726de3b86eaa5e4e591dfd2013692ed38 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_40_batch_2_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_40_batch_2_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_40_batch_2_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..d15b277dbf250ae2082ff8f489545744ab61094f Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_40_batch_2_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_40_batch_3_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_40_batch_3_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..ae969358ca5a19245dc978cb10f0fd294f74a804 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_40_batch_3_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_40_batch_3_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_40_batch_3_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..fb7f0fbebcd18e2b3a07d86d12110a9ae01dd9de Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_40_batch_3_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_40_batch_4_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_40_batch_4_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..d4b008d00e7cd9715349152f7fa3ee00fef6b0f1 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_40_batch_4_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_40_batch_4_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_40_batch_4_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..6fd382d5c0b4249de690ac931c76ddfbcabfa8f4 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_40_batch_4_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_50_batch_0_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_50_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..a9be1ffa43b464b74eed7a11a08e8735990a96f6 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_50_batch_0_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_50_batch_0_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_50_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..d7f6b8aacfdcf75f091dc3252b278aad3d777ba8 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_50_batch_0_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_50_batch_1_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_50_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..2917a44493b0d8684157f761a5955bd013833c4f Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_50_batch_1_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_50_batch_1_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_50_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..720c289ec9bc4ab7e9f3b8e666063f94326e0b67 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_50_batch_1_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_50_batch_2_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_50_batch_2_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..688765ccb82ff7fbbab82b3fa727f9946ef00616 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_50_batch_2_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_50_batch_2_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_50_batch_2_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..8ac07d51e172341aef56f5e61749c1b8fc1e0125 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_50_batch_2_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_50_batch_3_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_50_batch_3_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..a7a6d47debb19d8aa81b02153d2eada1ba4e50d3 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_50_batch_3_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_50_batch_3_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_50_batch_3_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..e004e06e7a0f7b0511eed514bdd80d3401f91a8e Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_50_batch_3_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_50_batch_4_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_50_batch_4_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..5f3d0d7e903c5afea4321c13d6960ccedab2f04b Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_50_batch_4_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_50_batch_4_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_50_batch_4_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..ad7789f3285a5b21f35ccd66a74182aa2ca4514a Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_50_batch_4_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_60_batch_0_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_60_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..0aae97117b3bb84b131ce56a228d5a99198b7a50 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_60_batch_0_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_60_batch_0_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_60_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..aed0e134bd92c3130b5d3720a043dd63f6e00a2e Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_60_batch_0_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_60_batch_1_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_60_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..d1125bce9005ee14b4eed579b5035213b7d50194 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_60_batch_1_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_60_batch_1_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_60_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..7b5c1890b899234be59ed299adc00ced68728c43 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_60_batch_1_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_60_batch_2_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_60_batch_2_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..ec61da2716f7ec0017afaa8f6d3b94d44c8f30bd Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_60_batch_2_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_60_batch_2_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_60_batch_2_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..9a8ee59046df801b38395d252f7197b976550a56 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_60_batch_2_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_60_batch_3_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_60_batch_3_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..d7169ca3e62fe969c4c984bc2fd0b0756f682de8 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_60_batch_3_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_60_batch_3_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_60_batch_3_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..97afa6e5b411a44f05655a8c1d23cd3d791f4781 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_60_batch_3_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_60_batch_4_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_60_batch_4_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..1e2cfff3f31f55165e7c5202397d582cb4263e38 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_60_batch_4_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_60_batch_4_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_60_batch_4_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..8ce3a52cc6e8af97de0b15b568edb871ec656d59 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_60_batch_4_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_70_batch_0_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_70_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..f23f4b039f5852c8ffb07fa06af7cd1aec868315 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_70_batch_0_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_70_batch_0_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_70_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..eb3aa8e01daa8e5c0bc392ad2edb9418202e73f5 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_70_batch_0_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_70_batch_1_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_70_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..d4baf9fc736d238a694224211487bf7de6b4d8ac Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_70_batch_1_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_70_batch_1_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_70_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..66b26db731c87ff6590ad59392bcc456a4309a4e Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_70_batch_1_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_70_batch_2_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_70_batch_2_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..332a2d800c436cc5ddcdb6413d0b5d7d1a3ae7c9 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_70_batch_2_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_70_batch_2_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_70_batch_2_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..66be8d1c31335b8a5282c7a1a8f463d75a2e3133 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_70_batch_2_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_70_batch_3_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_70_batch_3_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..2225d565e8fbdf48307413f3e1c74b7cafa3dbea Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_70_batch_3_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_70_batch_3_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_70_batch_3_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..4e32d58edbe9a066e82b68b791a9c8a9b13e9d9a Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_70_batch_3_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_70_batch_4_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_70_batch_4_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..60e08089a951c6a46f3c23dcb71bdd3aed5ffd47 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_70_batch_4_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_70_batch_4_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_70_batch_4_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..339659dd8344fd2e8ed00846a15b74ff4e793782 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_70_batch_4_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_80_batch_0_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_80_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..9d22e1d99bb6ce09ecee16265b2175e36451ce0d Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_80_batch_0_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_80_batch_0_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_80_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..0c0217ad01385556da810f43fc1bb700590eb5c0 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_80_batch_0_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_80_batch_1_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_80_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..28584f686a1a06bd7ced9cc7e44532fafcd1fd27 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_80_batch_1_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_80_batch_1_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_80_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..6e08af961741b1df5b79437a0cfaeef004010d5c Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_80_batch_1_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_80_batch_2_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_80_batch_2_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..1a7246467752e6538682c05703e0da617db3c0ed Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_80_batch_2_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_80_batch_2_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_80_batch_2_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..4a6341e74e3a787325c144745dd7f9ff853a880f Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_80_batch_2_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_80_batch_3_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_80_batch_3_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..9e456676ad000250d6aaf53e7879ed6a28cbd516 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_80_batch_3_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_80_batch_3_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_80_batch_3_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..e2d81261c3e56394e6c5a3361ff90e4615a6e958 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_80_batch_3_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_80_batch_4_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_80_batch_4_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..6b512ec98fa7d68786079e56f5c5507f5fddf81b Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_80_batch_4_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_80_batch_4_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_80_batch_4_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..16af6a6a80c0e10bf662bbe3c01260d7851aabc5 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_80_batch_4_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_90_batch_0_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_90_batch_0_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..d66e37286204d388f55184d3aefea0b2c30335af Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_90_batch_0_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_90_batch_0_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_90_batch_0_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..5f3b31172ab9e6687122250507b8d06f040b9603 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_90_batch_0_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_90_batch_1_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_90_batch_1_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..2954481a02858e471842a778d74eb80135827ade Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_90_batch_1_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_90_batch_1_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_90_batch_1_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..49b9b6b3dcc411f89ce4ae528cf341aaa0300718 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_90_batch_1_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_90_batch_2_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_90_batch_2_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..a1fedc443d4343b0c76afb5b02c8ce0d6ca2cfe1 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_90_batch_2_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_90_batch_2_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_90_batch_2_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..09296330cfa4c86f09b93b92c468d373d277683d Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_90_batch_2_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_90_batch_3_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_90_batch_3_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..8953b55677d16dfe29ef54fb9d52a37e85d1719c Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_90_batch_3_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_90_batch_3_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_90_batch_3_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..3c03950d4eb65fcc608010db5baaef8c101406cc Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_90_batch_3_img_1.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_90_batch_4_img_0.png b/AllinonSAM/validation_images/pred_labels/epoch_90_batch_4_img_0.png new file mode 100644 index 0000000000000000000000000000000000000000..269b0293cf729b4eca3e7a6bbf57786770fa6d80 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_90_batch_4_img_0.png differ diff --git a/AllinonSAM/validation_images/pred_labels/epoch_90_batch_4_img_1.png b/AllinonSAM/validation_images/pred_labels/epoch_90_batch_4_img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..29b6aac794c21295ba85059d238d3d64c52b9f81 Binary files /dev/null and b/AllinonSAM/validation_images/pred_labels/epoch_90_batch_4_img_1.png differ diff --git a/AllinonSAM/vit_seg_configs.py b/AllinonSAM/vit_seg_configs.py new file mode 100644 index 0000000000000000000000000000000000000000..40a9bf3e184604d5245d7a4f4ce1127aff330eb6 --- /dev/null +++ b/AllinonSAM/vit_seg_configs.py @@ -0,0 +1,130 @@ +import ml_collections + +def get_b16_config(): + """Returns the ViT-B/16 configuration.""" + config = ml_collections.ConfigDict() + config.patches = ml_collections.ConfigDict({'size': (16, 16)}) + config.hidden_size = 768 + config.transformer = ml_collections.ConfigDict() + config.transformer.mlp_dim = 3072 + config.transformer.num_heads = 12 + config.transformer.num_layers = 12 + config.transformer.attention_dropout_rate = 0.0 + config.transformer.dropout_rate = 0.1 + + config.classifier = 'seg' + config.representation_size = None + config.resnet_pretrained_path = None + config.pretrained_path = '../model/vit_checkpoint/imagenet21k/ViT-B_16.npz' + config.patch_size = 16 + + config.decoder_channels = (256, 128, 64, 16) + config.n_classes = 2 + config.activation = 'softmax' + return config + + +def get_testing(): + """Returns a minimal configuration for testing.""" + config = ml_collections.ConfigDict() + config.patches = ml_collections.ConfigDict({'size': (16, 16)}) + config.hidden_size = 1 + config.transformer = ml_collections.ConfigDict() + config.transformer.mlp_dim = 1 + config.transformer.num_heads = 1 + config.transformer.num_layers = 1 + config.transformer.attention_dropout_rate = 0.0 + config.transformer.dropout_rate = 0.1 + config.classifier = 'token' + config.representation_size = None + return config + +def get_r50_b16_config(): + """Returns the Resnet50 + ViT-B/16 configuration.""" + config = get_b16_config() + config.patches.grid = (16, 16) + config.resnet = ml_collections.ConfigDict() + config.resnet.num_layers = (3, 4, 9) + config.resnet.width_factor = 1 + + config.classifier = 'seg' + config.pretrained_path = '../model/vit_checkpoint/imagenet21k/R50+ViT-B_16.npz' + config.decoder_channels = (256, 128, 64, 16) + config.skip_channels = [512, 256, 64, 16] + config.n_classes = 2 + config.n_skip = 3 + config.activation = 'softmax' + + return config + + +def get_b32_config(): + """Returns the ViT-B/32 configuration.""" + config = get_b16_config() + config.patches.size = (32, 32) + config.pretrained_path = '../model/vit_checkpoint/imagenet21k/ViT-B_32.npz' + return config + + +def get_l16_config(): + """Returns the ViT-L/16 configuration.""" + config = ml_collections.ConfigDict() + config.patches = ml_collections.ConfigDict({'size': (16, 16)}) + config.hidden_size = 1024 + config.transformer = ml_collections.ConfigDict() + config.transformer.mlp_dim = 4096 + config.transformer.num_heads = 16 + config.transformer.num_layers = 24 + config.transformer.attention_dropout_rate = 0.0 + config.transformer.dropout_rate = 0.1 + config.representation_size = None + + # custom + config.classifier = 'seg' + config.resnet_pretrained_path = None + config.pretrained_path = '../model/vit_checkpoint/imagenet21k/ViT-L_16.npz' + config.decoder_channels = (256, 128, 64, 16) + config.n_classes = 2 + config.activation = 'softmax' + return config + + +def get_r50_l16_config(): + """Returns the Resnet50 + ViT-L/16 configuration. customized """ + config = get_l16_config() + config.patches.grid = (16, 16) + config.resnet = ml_collections.ConfigDict() + config.resnet.num_layers = (3, 4, 9) + config.resnet.width_factor = 1 + + config.classifier = 'seg' + config.resnet_pretrained_path = '../model/vit_checkpoint/imagenet21k/R50+ViT-B_16.npz' + config.decoder_channels = (256, 128, 64, 16) + config.skip_channels = [512, 256, 64, 16] + config.n_classes = 2 + config.activation = 'softmax' + return config + + +def get_l32_config(): + """Returns the ViT-L/32 configuration.""" + config = get_l16_config() + config.patches.size = (32, 32) + return config + + +def get_h14_config(): + """Returns the ViT-L/16 configuration.""" + config = ml_collections.ConfigDict() + config.patches = ml_collections.ConfigDict({'size': (14, 14)}) + config.hidden_size = 1280 + config.transformer = ml_collections.ConfigDict() + config.transformer.mlp_dim = 5120 + config.transformer.num_heads = 16 + config.transformer.num_layers = 32 + config.transformer.attention_dropout_rate = 0.0 + config.transformer.dropout_rate = 0.1 + config.classifier = 'token' + config.representation_size = None + + return config \ No newline at end of file diff --git a/AllinonSAM/vit_seg_modeling.py b/AllinonSAM/vit_seg_modeling.py new file mode 100644 index 0000000000000000000000000000000000000000..38517479851622aa5e4f7102c7fdd1e95d9472d2 --- /dev/null +++ b/AllinonSAM/vit_seg_modeling.py @@ -0,0 +1,453 @@ +# coding=utf-8 +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import copy +import logging +import math + +from os.path import join as pjoin + +import torch +import torch.nn as nn +import numpy as np + +from torch.nn import CrossEntropyLoss, Dropout, Softmax, Linear, Conv2d, LayerNorm +from torch.nn.modules.utils import _pair +from scipy import ndimage +import vit_seg_configs as configs +from vit_seg_modeling_resnet_skip import ResNetV2 + + +logger = logging.getLogger(__name__) + + +ATTENTION_Q = "MultiHeadDotProductAttention_1/query" +ATTENTION_K = "MultiHeadDotProductAttention_1/key" +ATTENTION_V = "MultiHeadDotProductAttention_1/value" +ATTENTION_OUT = "MultiHeadDotProductAttention_1/out" +FC_0 = "MlpBlock_3/Dense_0" +FC_1 = "MlpBlock_3/Dense_1" +ATTENTION_NORM = "LayerNorm_0" +MLP_NORM = "LayerNorm_2" + + +def np2th(weights, conv=False): + """Possibly convert HWIO to OIHW.""" + if conv: + weights = weights.transpose([3, 2, 0, 1]) + return torch.from_numpy(weights) + + +def swish(x): + return x * torch.sigmoid(x) + + +ACT2FN = {"gelu": torch.nn.functional.gelu, "relu": torch.nn.functional.relu, "swish": swish} + + +class Attention(nn.Module): + def __init__(self, config, vis): + super(Attention, self).__init__() + self.vis = vis + self.num_attention_heads = config.transformer["num_heads"] + self.attention_head_size = int(config.hidden_size / self.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = Linear(config.hidden_size, self.all_head_size) + self.key = Linear(config.hidden_size, self.all_head_size) + self.value = Linear(config.hidden_size, self.all_head_size) + + self.out = Linear(config.hidden_size, config.hidden_size) + self.attn_dropout = Dropout(config.transformer["attention_dropout_rate"]) + self.proj_dropout = Dropout(config.transformer["attention_dropout_rate"]) + + self.softmax = Softmax(dim=-1) + + def transpose_for_scores(self, x): + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(*new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward(self, hidden_states): + mixed_query_layer = self.query(hidden_states) + mixed_key_layer = self.key(hidden_states) + mixed_value_layer = self.value(hidden_states) + + query_layer = self.transpose_for_scores(mixed_query_layer) + key_layer = self.transpose_for_scores(mixed_key_layer) + value_layer = self.transpose_for_scores(mixed_value_layer) + + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + attention_probs = self.softmax(attention_scores) + weights = attention_probs if self.vis else None + attention_probs = self.attn_dropout(attention_probs) + + context_layer = torch.matmul(attention_probs, value_layer) + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(*new_context_layer_shape) + attention_output = self.out(context_layer) + attention_output = self.proj_dropout(attention_output) + return attention_output, weights + + +class Mlp(nn.Module): + def __init__(self, config): + super(Mlp, self).__init__() + self.fc1 = Linear(config.hidden_size, config.transformer["mlp_dim"]) + self.fc2 = Linear(config.transformer["mlp_dim"], config.hidden_size) + self.act_fn = ACT2FN["gelu"] + self.dropout = Dropout(config.transformer["dropout_rate"]) + + self._init_weights() + + def _init_weights(self): + nn.init.xavier_uniform_(self.fc1.weight) + nn.init.xavier_uniform_(self.fc2.weight) + nn.init.normal_(self.fc1.bias, std=1e-6) + nn.init.normal_(self.fc2.bias, std=1e-6) + + def forward(self, x): + x = self.fc1(x) + x = self.act_fn(x) + x = self.dropout(x) + x = self.fc2(x) + x = self.dropout(x) + return x + + +class Embeddings(nn.Module): + """Construct the embeddings from patch, position embeddings. + """ + def __init__(self, config, img_size, in_channels=3): + super(Embeddings, self).__init__() + self.hybrid = None + self.config = config + img_size = _pair(img_size) + + if config.patches.get("grid") is not None: # ResNet + grid_size = config.patches["grid"] + patch_size = (img_size[0] // 16 // grid_size[0], img_size[1] // 16 // grid_size[1]) + patch_size_real = (patch_size[0] * 16, patch_size[1] * 16) + n_patches = (img_size[0] // patch_size_real[0]) * (img_size[1] // patch_size_real[1]) + self.hybrid = True + else: + patch_size = _pair(config.patches["size"]) + n_patches = (img_size[0] // patch_size[0]) * (img_size[1] // patch_size[1]) + self.hybrid = False + + if self.hybrid: + self.hybrid_model = ResNetV2(block_units=config.resnet.num_layers, width_factor=config.resnet.width_factor) + in_channels = self.hybrid_model.width * 16 + self.patch_embeddings = Conv2d(in_channels=in_channels, + out_channels=config.hidden_size, + kernel_size=patch_size, + stride=patch_size) + self.position_embeddings = nn.Parameter(torch.zeros(1, n_patches, config.hidden_size)) + + self.dropout = Dropout(config.transformer["dropout_rate"]) + + + def forward(self, x): + if self.hybrid: + x, features = self.hybrid_model(x) + else: + features = None + x = self.patch_embeddings(x) # (B, hidden. n_patches^(1/2), n_patches^(1/2)) + x = x.flatten(2) + x = x.transpose(-1, -2) # (B, n_patches, hidden) + + embeddings = x + self.position_embeddings + embeddings = self.dropout(embeddings) + return embeddings, features + + +class Block(nn.Module): + def __init__(self, config, vis): + super(Block, self).__init__() + self.hidden_size = config.hidden_size + self.attention_norm = LayerNorm(config.hidden_size, eps=1e-6) + self.ffn_norm = LayerNorm(config.hidden_size, eps=1e-6) + self.ffn = Mlp(config) + self.attn = Attention(config, vis) + + def forward(self, x): + h = x + x = self.attention_norm(x) + x, weights = self.attn(x) + x = x + h + + h = x + x = self.ffn_norm(x) + x = self.ffn(x) + x = x + h + return x, weights + + def load_from(self, weights, n_block): + ROOT = f"Transformer/encoderblock_{n_block}" + with torch.no_grad(): + query_weight = np2th(weights[pjoin(ROOT, ATTENTION_Q, "kernel")]).view(self.hidden_size, self.hidden_size).t() + key_weight = np2th(weights[pjoin(ROOT, ATTENTION_K, "kernel")]).view(self.hidden_size, self.hidden_size).t() + value_weight = np2th(weights[pjoin(ROOT, ATTENTION_V, "kernel")]).view(self.hidden_size, self.hidden_size).t() + out_weight = np2th(weights[pjoin(ROOT, ATTENTION_OUT, "kernel")]).view(self.hidden_size, self.hidden_size).t() + + query_bias = np2th(weights[pjoin(ROOT, ATTENTION_Q, "bias")]).view(-1) + key_bias = np2th(weights[pjoin(ROOT, ATTENTION_K, "bias")]).view(-1) + value_bias = np2th(weights[pjoin(ROOT, ATTENTION_V, "bias")]).view(-1) + out_bias = np2th(weights[pjoin(ROOT, ATTENTION_OUT, "bias")]).view(-1) + + self.attn.query.weight.copy_(query_weight) + self.attn.key.weight.copy_(key_weight) + self.attn.value.weight.copy_(value_weight) + self.attn.out.weight.copy_(out_weight) + self.attn.query.bias.copy_(query_bias) + self.attn.key.bias.copy_(key_bias) + self.attn.value.bias.copy_(value_bias) + self.attn.out.bias.copy_(out_bias) + + mlp_weight_0 = np2th(weights[pjoin(ROOT, FC_0, "kernel")]).t() + mlp_weight_1 = np2th(weights[pjoin(ROOT, FC_1, "kernel")]).t() + mlp_bias_0 = np2th(weights[pjoin(ROOT, FC_0, "bias")]).t() + mlp_bias_1 = np2th(weights[pjoin(ROOT, FC_1, "bias")]).t() + + self.ffn.fc1.weight.copy_(mlp_weight_0) + self.ffn.fc2.weight.copy_(mlp_weight_1) + self.ffn.fc1.bias.copy_(mlp_bias_0) + self.ffn.fc2.bias.copy_(mlp_bias_1) + + self.attention_norm.weight.copy_(np2th(weights[pjoin(ROOT, ATTENTION_NORM, "scale")])) + self.attention_norm.bias.copy_(np2th(weights[pjoin(ROOT, ATTENTION_NORM, "bias")])) + self.ffn_norm.weight.copy_(np2th(weights[pjoin(ROOT, MLP_NORM, "scale")])) + self.ffn_norm.bias.copy_(np2th(weights[pjoin(ROOT, MLP_NORM, "bias")])) + + +class Encoder(nn.Module): + def __init__(self, config, vis): + super(Encoder, self).__init__() + self.vis = vis + self.layer = nn.ModuleList() + self.encoder_norm = LayerNorm(config.hidden_size, eps=1e-6) + for _ in range(config.transformer["num_layers"]): + layer = Block(config, vis) + self.layer.append(copy.deepcopy(layer)) + + def forward(self, hidden_states): + attn_weights = [] + for layer_block in self.layer: + hidden_states, weights = layer_block(hidden_states) + if self.vis: + attn_weights.append(weights) + encoded = self.encoder_norm(hidden_states) + return encoded, attn_weights + + +class Transformer(nn.Module): + def __init__(self, config, img_size, vis): + super(Transformer, self).__init__() + self.embeddings = Embeddings(config, img_size=img_size) + self.encoder = Encoder(config, vis) + + def forward(self, input_ids): + embedding_output, features = self.embeddings(input_ids) + encoded, attn_weights = self.encoder(embedding_output) # (B, n_patch, hidden) + return encoded, attn_weights, features + + +class Conv2dReLU(nn.Sequential): + def __init__( + self, + in_channels, + out_channels, + kernel_size, + padding=0, + stride=1, + use_batchnorm=True, + ): + conv = nn.Conv2d( + in_channels, + out_channels, + kernel_size, + stride=stride, + padding=padding, + bias=not (use_batchnorm), + ) + relu = nn.ReLU(inplace=True) + + bn = nn.BatchNorm2d(out_channels) + + super(Conv2dReLU, self).__init__(conv, bn, relu) + + +class DecoderBlock(nn.Module): + def __init__( + self, + in_channels, + out_channels, + skip_channels=0, + use_batchnorm=True, + ): + super().__init__() + self.conv1 = Conv2dReLU( + in_channels + skip_channels, + out_channels, + kernel_size=3, + padding=1, + use_batchnorm=use_batchnorm, + ) + self.conv2 = Conv2dReLU( + out_channels, + out_channels, + kernel_size=3, + padding=1, + use_batchnorm=use_batchnorm, + ) + self.up = nn.UpsamplingBilinear2d(scale_factor=2) + + def forward(self, x, skip=None): + x = self.up(x) + if skip is not None: + x = torch.cat([x, skip], dim=1) + x = self.conv1(x) + x = self.conv2(x) + return x + + +class SegmentationHead(nn.Sequential): + + def __init__(self, in_channels, out_channels, kernel_size=3, upsampling=1): + conv2d = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, padding=kernel_size // 2) + upsampling = nn.UpsamplingBilinear2d(scale_factor=upsampling) if upsampling > 1 else nn.Identity() + super().__init__(conv2d, upsampling) + + +class DecoderCup(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + head_channels = 512 + self.conv_more = Conv2dReLU( + config.hidden_size, + head_channels, + kernel_size=3, + padding=1, + use_batchnorm=True, + ) + decoder_channels = config.decoder_channels + in_channels = [head_channels] + list(decoder_channels[:-1]) + out_channels = decoder_channels + + if self.config.n_skip != 0: + skip_channels = self.config.skip_channels + for i in range(4-self.config.n_skip): # re-select the skip channels according to n_skip + skip_channels[3-i]=0 + + else: + skip_channels=[0,0,0,0] + + blocks = [ + DecoderBlock(in_ch, out_ch, sk_ch) for in_ch, out_ch, sk_ch in zip(in_channels, out_channels, skip_channels) + ] + self.blocks = nn.ModuleList(blocks) + + def forward(self, hidden_states, features=None): + B, n_patch, hidden = hidden_states.size() # reshape from (B, n_patch, hidden) to (B, h, w, hidden) + h, w = int(np.sqrt(n_patch)), int(np.sqrt(n_patch)) + x = hidden_states.permute(0, 2, 1) + x = x.contiguous().view(B, hidden, h, w) + x = self.conv_more(x) + for i, decoder_block in enumerate(self.blocks): + if features is not None: + skip = features[i] if (i < self.config.n_skip) else None + else: + skip = None + x = decoder_block(x, skip=skip) + return x + + +class VisionTransformer(nn.Module): + def __init__(self, config, img_size=224, num_classes=21843, zero_head=False, vis=False): + super(VisionTransformer, self).__init__() + self.num_classes = num_classes + self.zero_head = zero_head + self.classifier = config.classifier + self.transformer = Transformer(config, img_size, vis) + self.decoder = DecoderCup(config) + self.segmentation_head = SegmentationHead( + in_channels=config['decoder_channels'][-1], + out_channels=config['n_classes'], + kernel_size=3, + ) + self.config = config + self.soft = nn.Softmax(dim=1) + + def forward(self, x, text_dummy): + if x.size()[1] == 1: + x = x.repeat(1,3,1,1) + x, attn_weights, features = self.transformer(x) # (B, n_patch, hidden) + x = self.decoder(x, features) + logits = self.segmentation_head(x) + return self.soft(logits),0 + + def load_from(self, weights): + with torch.no_grad(): + + res_weight = weights + self.transformer.embeddings.patch_embeddings.weight.copy_(np2th(weights["embedding/kernel"], conv=True)) + self.transformer.embeddings.patch_embeddings.bias.copy_(np2th(weights["embedding/bias"])) + + self.transformer.encoder.encoder_norm.weight.copy_(np2th(weights["Transformer/encoder_norm/scale"])) + self.transformer.encoder.encoder_norm.bias.copy_(np2th(weights["Transformer/encoder_norm/bias"])) + + posemb = np2th(weights["Transformer/posembed_input/pos_embedding"]) + + posemb_new = self.transformer.embeddings.position_embeddings + if posemb.size() == posemb_new.size(): + self.transformer.embeddings.position_embeddings.copy_(posemb) + elif posemb.size()[1]-1 == posemb_new.size()[1]: + posemb = posemb[:, 1:] + self.transformer.embeddings.position_embeddings.copy_(posemb) + else: + logger.info("load_pretrained: resized variant: %s to %s" % (posemb.size(), posemb_new.size())) + ntok_new = posemb_new.size(1) + if self.classifier == "seg": + _, posemb_grid = posemb[:, :1], posemb[0, 1:] + gs_old = int(np.sqrt(len(posemb_grid))) + gs_new = int(np.sqrt(ntok_new)) + print('load_pretrained: grid-size from %s to %s' % (gs_old, gs_new)) + posemb_grid = posemb_grid.reshape(gs_old, gs_old, -1) + zoom = (gs_new / gs_old, gs_new / gs_old, 1) + posemb_grid = ndimage.zoom(posemb_grid, zoom, order=1) # th2np + posemb_grid = posemb_grid.reshape(1, gs_new * gs_new, -1) + posemb = posemb_grid + self.transformer.embeddings.position_embeddings.copy_(np2th(posemb)) + + # Encoder whole + for bname, block in self.transformer.encoder.named_children(): + for uname, unit in block.named_children(): + unit.load_from(weights, n_block=uname) + + if self.transformer.embeddings.hybrid: + self.transformer.embeddings.hybrid_model.root.conv.weight.copy_(np2th(res_weight["conv_root/kernel"], conv=True)) + gn_weight = np2th(res_weight["gn_root/scale"]).view(-1) + gn_bias = np2th(res_weight["gn_root/bias"]).view(-1) + self.transformer.embeddings.hybrid_model.root.gn.weight.copy_(gn_weight) + self.transformer.embeddings.hybrid_model.root.gn.bias.copy_(gn_bias) + + for bname, block in self.transformer.embeddings.hybrid_model.body.named_children(): + for uname, unit in block.named_children(): + unit.load_from(res_weight, n_block=bname, n_unit=uname) + +CONFIGS = { + 'ViT-B_16': configs.get_b16_config(), + 'ViT-B_32': configs.get_b32_config(), + 'ViT-L_16': configs.get_l16_config(), + 'ViT-L_32': configs.get_l32_config(), + 'ViT-H_14': configs.get_h14_config(), + 'R50-ViT-B_16': configs.get_r50_b16_config(), + 'R50-ViT-L_16': configs.get_r50_l16_config(), + 'testing': configs.get_testing(), +} + diff --git a/AllinonSAM/vit_seg_modeling_resnet_skip.py b/AllinonSAM/vit_seg_modeling_resnet_skip.py new file mode 100644 index 0000000000000000000000000000000000000000..0ae80ef7d96c46cb91bda849901aef34008e62ed --- /dev/null +++ b/AllinonSAM/vit_seg_modeling_resnet_skip.py @@ -0,0 +1,160 @@ +import math + +from os.path import join as pjoin +from collections import OrderedDict + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +def np2th(weights, conv=False): + """Possibly convert HWIO to OIHW.""" + if conv: + weights = weights.transpose([3, 2, 0, 1]) + return torch.from_numpy(weights) + + +class StdConv2d(nn.Conv2d): + + def forward(self, x): + w = self.weight + v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False) + w = (w - m) / torch.sqrt(v + 1e-5) + return F.conv2d(x, w, self.bias, self.stride, self.padding, + self.dilation, self.groups) + + +def conv3x3(cin, cout, stride=1, groups=1, bias=False): + return StdConv2d(cin, cout, kernel_size=3, stride=stride, + padding=1, bias=bias, groups=groups) + + +def conv1x1(cin, cout, stride=1, bias=False): + return StdConv2d(cin, cout, kernel_size=1, stride=stride, + padding=0, bias=bias) + + +class PreActBottleneck(nn.Module): + """Pre-activation (v2) bottleneck block. + """ + + def __init__(self, cin, cout=None, cmid=None, stride=1): + super().__init__() + cout = cout or cin + cmid = cmid or cout//4 + + self.gn1 = nn.GroupNorm(32, cmid, eps=1e-6) + self.conv1 = conv1x1(cin, cmid, bias=False) + self.gn2 = nn.GroupNorm(32, cmid, eps=1e-6) + self.conv2 = conv3x3(cmid, cmid, stride, bias=False) # Original code has it on conv1!! + self.gn3 = nn.GroupNorm(32, cout, eps=1e-6) + self.conv3 = conv1x1(cmid, cout, bias=False) + self.relu = nn.ReLU(inplace=True) + + if (stride != 1 or cin != cout): + # Projection also with pre-activation according to paper. + self.downsample = conv1x1(cin, cout, stride, bias=False) + self.gn_proj = nn.GroupNorm(cout, cout) + + def forward(self, x): + + # Residual branch + residual = x + if hasattr(self, 'downsample'): + residual = self.downsample(x) + residual = self.gn_proj(residual) + + # Unit's branch + y = self.relu(self.gn1(self.conv1(x))) + y = self.relu(self.gn2(self.conv2(y))) + y = self.gn3(self.conv3(y)) + + y = self.relu(residual + y) + return y + + def load_from(self, weights, n_block, n_unit): + conv1_weight = np2th(weights[pjoin(n_block, n_unit, "conv1/kernel")], conv=True) + conv2_weight = np2th(weights[pjoin(n_block, n_unit, "conv2/kernel")], conv=True) + conv3_weight = np2th(weights[pjoin(n_block, n_unit, "conv3/kernel")], conv=True) + + gn1_weight = np2th(weights[pjoin(n_block, n_unit, "gn1/scale")]) + gn1_bias = np2th(weights[pjoin(n_block, n_unit, "gn1/bias")]) + + gn2_weight = np2th(weights[pjoin(n_block, n_unit, "gn2/scale")]) + gn2_bias = np2th(weights[pjoin(n_block, n_unit, "gn2/bias")]) + + gn3_weight = np2th(weights[pjoin(n_block, n_unit, "gn3/scale")]) + gn3_bias = np2th(weights[pjoin(n_block, n_unit, "gn3/bias")]) + + self.conv1.weight.copy_(conv1_weight) + self.conv2.weight.copy_(conv2_weight) + self.conv3.weight.copy_(conv3_weight) + + self.gn1.weight.copy_(gn1_weight.view(-1)) + self.gn1.bias.copy_(gn1_bias.view(-1)) + + self.gn2.weight.copy_(gn2_weight.view(-1)) + self.gn2.bias.copy_(gn2_bias.view(-1)) + + self.gn3.weight.copy_(gn3_weight.view(-1)) + self.gn3.bias.copy_(gn3_bias.view(-1)) + + if hasattr(self, 'downsample'): + proj_conv_weight = np2th(weights[pjoin(n_block, n_unit, "conv_proj/kernel")], conv=True) + proj_gn_weight = np2th(weights[pjoin(n_block, n_unit, "gn_proj/scale")]) + proj_gn_bias = np2th(weights[pjoin(n_block, n_unit, "gn_proj/bias")]) + + self.downsample.weight.copy_(proj_conv_weight) + self.gn_proj.weight.copy_(proj_gn_weight.view(-1)) + self.gn_proj.bias.copy_(proj_gn_bias.view(-1)) + +class ResNetV2(nn.Module): + """Implementation of Pre-activation (v2) ResNet mode.""" + + def __init__(self, block_units, width_factor): + super().__init__() + width = int(64 * width_factor) + self.width = width + + self.root = nn.Sequential(OrderedDict([ + ('conv', StdConv2d(3, width, kernel_size=7, stride=2, bias=False, padding=3)), + ('gn', nn.GroupNorm(32, width, eps=1e-6)), + ('relu', nn.ReLU(inplace=True)), + # ('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=0)) + ])) + + self.body = nn.Sequential(OrderedDict([ + ('block1', nn.Sequential(OrderedDict( + [('unit1', PreActBottleneck(cin=width, cout=width*4, cmid=width))] + + [(f'unit{i:d}', PreActBottleneck(cin=width*4, cout=width*4, cmid=width)) for i in range(2, block_units[0] + 1)], + ))), + ('block2', nn.Sequential(OrderedDict( + [('unit1', PreActBottleneck(cin=width*4, cout=width*8, cmid=width*2, stride=2))] + + [(f'unit{i:d}', PreActBottleneck(cin=width*8, cout=width*8, cmid=width*2)) for i in range(2, block_units[1] + 1)], + ))), + ('block3', nn.Sequential(OrderedDict( + [('unit1', PreActBottleneck(cin=width*8, cout=width*16, cmid=width*4, stride=2))] + + [(f'unit{i:d}', PreActBottleneck(cin=width*16, cout=width*16, cmid=width*4)) for i in range(2, block_units[2] + 1)], + ))), + ])) + + def forward(self, x): + features = [] + b, c, in_size, _ = x.size() + x = self.root(x) + features.append(x) + x = nn.MaxPool2d(kernel_size=3, stride=2, padding=0)(x) + for i in range(len(self.body)-1): + x = self.body[i](x) + right_size = int(in_size / 4 / (i+1)) + if x.size()[2] != right_size: + pad = right_size - x.size()[2] + assert pad < 3 and pad > 0, "x {} should {}".format(x.size(), right_size) + feat = torch.zeros((b, x.size()[1], right_size, right_size), device=x.device) + feat[:, :, 0:x.size()[2], 0:x.size()[3]] = x[:] + else: + feat = x + features.append(feat) + x = self.body[-1](x) + return x, features[::-1] \ No newline at end of file diff --git a/AllinonSAM/wandb/debug-cli.abdelrahman.elsayed.log b/AllinonSAM/wandb/debug-cli.abdelrahman.elsayed.log new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/AllinonSAM/wandb/debug-cli.sarim.hashmi.log b/AllinonSAM/wandb/debug-cli.sarim.hashmi.log new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/AllinonSAM/wandb/debug-internal.log b/AllinonSAM/wandb/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..b066a9d3fd1975d59f8c20de2321ebc015bc9e96 --- /dev/null +++ b/AllinonSAM/wandb/debug-internal.log @@ -0,0 +1,181 @@ +2024-10-18 21:21:46,165 INFO StreamThr :3327812 [internal.py:wandb_internal():87] W&B internal server running at pid: 3327812, started at: 2024-10-18 21:21:46.164863 +2024-10-18 21:21:46,166 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: status +2024-10-18 21:21:46,167 INFO WriterThread:3327812 [datastore.py:open_for_write():85] open: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/run-w102ona2.wandb +2024-10-18 21:21:46,168 DEBUG SenderThread:3327812 [sender.py:send():336] send: header +2024-10-18 21:21:46,222 DEBUG SenderThread:3327812 [sender.py:send():336] send: run +2024-10-18 21:21:46,898 INFO SenderThread:3327812 [dir_watcher.py:__init__():219] watching files in: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/files +2024-10-18 21:21:46,898 INFO SenderThread:3327812 [sender.py:_start_run_threads():1078] run started: w102ona2 with start time 1729272106.164478 +2024-10-18 21:21:46,898 DEBUG SenderThread:3327812 [sender.py:send_request():363] send_request: summary_record +2024-10-18 21:21:46,898 INFO SenderThread:3327812 [sender.py:_save_file():1332] saving file wandb-summary.json with policy end +2024-10-18 21:21:46,900 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: check_version +2024-10-18 21:21:46,900 DEBUG SenderThread:3327812 [sender.py:send_request():363] send_request: check_version +2024-10-18 21:21:46,969 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: run_start +2024-10-18 21:21:47,000 DEBUG HandlerThread:3327812 [system_info.py:__init__():31] System info init +2024-10-18 21:21:47,000 DEBUG HandlerThread:3327812 [system_info.py:__init__():46] System info init done +2024-10-18 21:21:47,000 INFO HandlerThread:3327812 [system_monitor.py:start():183] Starting system monitor +2024-10-18 21:21:47,000 INFO SystemMonitor:3327812 [system_monitor.py:_start():147] Starting system asset monitoring threads +2024-10-18 21:21:47,000 INFO HandlerThread:3327812 [system_monitor.py:probe():204] Collecting system info +2024-10-18 21:21:47,000 INFO SystemMonitor:3327812 [interfaces.py:start():187] Started cpu monitoring +2024-10-18 21:21:47,001 INFO SystemMonitor:3327812 [interfaces.py:start():187] Started disk monitoring +2024-10-18 21:21:47,002 INFO SystemMonitor:3327812 [interfaces.py:start():187] Started gpu monitoring +2024-10-18 21:21:47,002 INFO SystemMonitor:3327812 [interfaces.py:start():187] Started memory monitoring +2024-10-18 21:21:47,002 INFO SystemMonitor:3327812 [interfaces.py:start():187] Started network monitoring +2024-10-18 21:21:47,040 DEBUG HandlerThread:3327812 [system_info.py:probe():195] Probing system +2024-10-18 21:21:47,045 DEBUG HandlerThread:3327812 [system_info.py:_probe_git():180] Probing git +2024-10-18 21:21:47,059 DEBUG HandlerThread:3327812 [system_info.py:_probe_git():188] Probing git done +2024-10-18 21:21:47,060 DEBUG HandlerThread:3327812 [system_info.py:probe():240] Probing system done +2024-10-18 21:21:47,060 DEBUG HandlerThread:3327812 [system_monitor.py:probe():213] {'os': 'Linux-5.15.133-ql-generic-13.0-9-x86_64-with-glibc2.17', 'python': '3.8.16', 'heartbeatAt': '2024-10-18T17:21:47.040398', 'startedAt': '2024-10-18T17:21:46.154298', 'docker': None, 'cuda': None, 'args': (), 'state': 'running', 'program': '/home/abdelrahman.elsayed/sarim_code/train_baselines.py', 'codePath': 'train_baselines.py', 'git': {'remote': 'https://github.com/JayParanjape/SVDSAM.git', 'commit': '5936d0eff64d84fbefed6ecfe4bcc841459c2fc3'}, 'email': 'amra51548@gmail.com', 'root': '/home/abdelrahman.elsayed/sarim_code', 'host': 'ws-l6-014', 'username': 'abdelrahman.elsayed', 'executable': '/home/abdelrahman.elsayed/.conda/envs/s-sam/bin/python', 'cpu_count': 16, 'cpu_count_logical': 32, 'cpu_freq': {'current': 3881.4324687499998, 'min': 2200.0, 'max': 3900.0}, 'cpu_freq_per_core': [{'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3613.224, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}], 'disk': {'total': 1.0, 'used': 0.042255401611328125}, 'gpu': 'NVIDIA GeForce RTX 4090', 'gpu_count': 1, 'gpu_devices': [{'name': 'NVIDIA GeForce RTX 4090', 'memory_total': 25757220864}], 'memory': {'total': 62.65230178833008}} +2024-10-18 21:21:47,060 INFO HandlerThread:3327812 [system_monitor.py:probe():214] Finished collecting system info +2024-10-18 21:21:47,060 INFO HandlerThread:3327812 [system_monitor.py:probe():217] Publishing system info +2024-10-18 21:21:47,060 DEBUG HandlerThread:3327812 [system_info.py:_save_pip():51] Saving list of pip packages installed into the current environment +2024-10-18 21:21:47,061 DEBUG HandlerThread:3327812 [system_info.py:_save_pip():67] Saving pip packages done +2024-10-18 21:21:47,061 DEBUG HandlerThread:3327812 [system_info.py:_save_conda():74] Saving list of conda packages installed into the current environment +2024-10-18 21:21:47,899 INFO Thread-13 :3327812 [dir_watcher.py:_on_file_created():278] file/dir created: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/files/wandb-summary.json +2024-10-18 21:21:47,899 INFO Thread-13 :3327812 [dir_watcher.py:_on_file_created():278] file/dir created: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/files/conda-environment.yaml +2024-10-18 21:21:47,899 INFO Thread-13 :3327812 [dir_watcher.py:_on_file_created():278] file/dir created: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/files/requirements.txt +2024-10-18 21:21:49,899 DEBUG HandlerThread:3327812 [system_info.py:_save_conda():86] Saving conda packages done +2024-10-18 21:21:49,900 INFO HandlerThread:3327812 [system_monitor.py:probe():219] Finished publishing system info +2024-10-18 21:21:49,901 INFO Thread-13 :3327812 [dir_watcher.py:_on_file_modified():295] file/dir modified: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/files/conda-environment.yaml +2024-10-18 21:21:49,901 INFO Thread-13 :3327812 [dir_watcher.py:_on_file_created():278] file/dir created: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/files/wandb-metadata.json +2024-10-18 21:21:49,908 DEBUG SenderThread:3327812 [sender.py:send():336] send: files +2024-10-18 21:21:49,908 INFO SenderThread:3327812 [sender.py:_save_file():1332] saving file wandb-metadata.json with policy now +2024-10-18 21:21:49,912 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: stop_status +2024-10-18 21:21:49,913 DEBUG SenderThread:3327812 [sender.py:send_request():363] send_request: stop_status +2024-10-18 21:21:50,224 DEBUG SenderThread:3327812 [sender.py:send():336] send: telemetry +2024-10-18 21:21:50,813 INFO wandb-upload_0:3327812 [upload_job.py:push():138] Uploaded file /tmp/slurm-abdelrahman.elsayed-44778/tmps66kwbdjwandb/ai9lj76l-wandb-metadata.json +2024-10-18 21:21:50,901 INFO Thread-13 :3327812 [dir_watcher.py:_on_file_created():278] file/dir created: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/files/output.log +2024-10-18 21:21:51,162 DEBUG SenderThread:3327812 [sender.py:send():336] send: exit +2024-10-18 21:21:51,162 INFO SenderThread:3327812 [sender.py:send_exit():559] handling exit code: 1 +2024-10-18 21:21:51,162 INFO SenderThread:3327812 [sender.py:send_exit():561] handling runtime: 4 +2024-10-18 21:21:51,164 INFO SenderThread:3327812 [sender.py:_save_file():1332] saving file wandb-summary.json with policy end +2024-10-18 21:21:51,164 INFO SenderThread:3327812 [sender.py:send_exit():567] send defer +2024-10-18 21:21:51,164 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:21:51,164 INFO HandlerThread:3327812 [handler.py:handle_request_defer():170] handle defer: 0 +2024-10-18 21:21:51,164 DEBUG SenderThread:3327812 [sender.py:send_request():363] send_request: defer +2024-10-18 21:21:51,164 INFO SenderThread:3327812 [sender.py:send_request_defer():583] handle sender defer: 0 +2024-10-18 21:21:51,164 INFO SenderThread:3327812 [sender.py:transition_state():587] send defer: 1 +2024-10-18 21:21:51,164 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:21:51,164 INFO HandlerThread:3327812 [handler.py:handle_request_defer():170] handle defer: 1 +2024-10-18 21:21:51,164 DEBUG SenderThread:3327812 [sender.py:send_request():363] send_request: defer +2024-10-18 21:21:51,164 INFO SenderThread:3327812 [sender.py:send_request_defer():583] handle sender defer: 1 +2024-10-18 21:21:51,164 INFO SenderThread:3327812 [sender.py:transition_state():587] send defer: 2 +2024-10-18 21:21:51,164 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:21:51,164 INFO HandlerThread:3327812 [handler.py:handle_request_defer():170] handle defer: 2 +2024-10-18 21:21:51,165 INFO HandlerThread:3327812 [system_monitor.py:finish():193] Stopping system monitor +2024-10-18 21:21:51,165 DEBUG SystemMonitor:3327812 [system_monitor.py:_start():161] Starting system metrics aggregation loop +2024-10-18 21:21:51,165 DEBUG SystemMonitor:3327812 [system_monitor.py:_start():168] Finished system metrics aggregation loop +2024-10-18 21:21:51,165 DEBUG SystemMonitor:3327812 [system_monitor.py:_start():172] Publishing last batch of metrics +2024-10-18 21:21:51,165 INFO HandlerThread:3327812 [interfaces.py:finish():199] Joined cpu monitor +2024-10-18 21:21:51,166 INFO HandlerThread:3327812 [interfaces.py:finish():199] Joined disk monitor +2024-10-18 21:21:51,200 INFO HandlerThread:3327812 [interfaces.py:finish():199] Joined gpu monitor +2024-10-18 21:21:51,201 INFO HandlerThread:3327812 [interfaces.py:finish():199] Joined memory monitor +2024-10-18 21:21:51,201 INFO HandlerThread:3327812 [interfaces.py:finish():199] Joined network monitor +2024-10-18 21:21:51,201 DEBUG SenderThread:3327812 [sender.py:send_request():363] send_request: defer +2024-10-18 21:21:51,201 INFO SenderThread:3327812 [sender.py:send_request_defer():583] handle sender defer: 2 +2024-10-18 21:21:51,201 INFO SenderThread:3327812 [sender.py:transition_state():587] send defer: 3 +2024-10-18 21:21:51,201 DEBUG SenderThread:3327812 [sender.py:send():336] send: stats +2024-10-18 21:21:51,201 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:21:51,202 INFO HandlerThread:3327812 [handler.py:handle_request_defer():170] handle defer: 3 +2024-10-18 21:21:51,202 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: status_report +2024-10-18 21:21:51,202 DEBUG SenderThread:3327812 [sender.py:send_request():363] send_request: defer +2024-10-18 21:21:51,202 INFO SenderThread:3327812 [sender.py:send_request_defer():583] handle sender defer: 3 +2024-10-18 21:21:51,202 INFO SenderThread:3327812 [sender.py:transition_state():587] send defer: 4 +2024-10-18 21:21:51,202 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:21:51,202 INFO HandlerThread:3327812 [handler.py:handle_request_defer():170] handle defer: 4 +2024-10-18 21:21:51,202 DEBUG SenderThread:3327812 [sender.py:send_request():363] send_request: defer +2024-10-18 21:21:51,202 INFO SenderThread:3327812 [sender.py:send_request_defer():583] handle sender defer: 4 +2024-10-18 21:21:51,202 INFO SenderThread:3327812 [sender.py:transition_state():587] send defer: 5 +2024-10-18 21:21:51,202 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:21:51,202 INFO HandlerThread:3327812 [handler.py:handle_request_defer():170] handle defer: 5 +2024-10-18 21:21:51,202 DEBUG SenderThread:3327812 [sender.py:send():336] send: summary +2024-10-18 21:21:51,203 INFO SenderThread:3327812 [sender.py:_save_file():1332] saving file wandb-summary.json with policy end +2024-10-18 21:21:51,203 DEBUG SenderThread:3327812 [sender.py:send_request():363] send_request: defer +2024-10-18 21:21:51,203 INFO SenderThread:3327812 [sender.py:send_request_defer():583] handle sender defer: 5 +2024-10-18 21:21:51,203 INFO SenderThread:3327812 [sender.py:transition_state():587] send defer: 6 +2024-10-18 21:21:51,203 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:21:51,203 INFO HandlerThread:3327812 [handler.py:handle_request_defer():170] handle defer: 6 +2024-10-18 21:21:51,203 DEBUG SenderThread:3327812 [sender.py:send_request():363] send_request: defer +2024-10-18 21:21:51,203 INFO SenderThread:3327812 [sender.py:send_request_defer():583] handle sender defer: 6 +2024-10-18 21:21:51,206 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: status_report +2024-10-18 21:21:51,483 INFO SenderThread:3327812 [sender.py:transition_state():587] send defer: 7 +2024-10-18 21:21:51,483 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:21:51,483 INFO HandlerThread:3327812 [handler.py:handle_request_defer():170] handle defer: 7 +2024-10-18 21:21:51,483 DEBUG SenderThread:3327812 [sender.py:send_request():363] send_request: defer +2024-10-18 21:21:51,483 INFO SenderThread:3327812 [sender.py:send_request_defer():583] handle sender defer: 7 +2024-10-18 21:21:51,902 INFO Thread-13 :3327812 [dir_watcher.py:_on_file_modified():295] file/dir modified: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/files/wandb-summary.json +2024-10-18 21:21:51,903 INFO Thread-13 :3327812 [dir_watcher.py:_on_file_modified():295] file/dir modified: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/files/config.yaml +2024-10-18 21:21:52,163 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: poll_exit +2024-10-18 21:21:52,904 INFO Thread-13 :3327812 [dir_watcher.py:_on_file_modified():295] file/dir modified: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/files/output.log +2024-10-18 21:21:54,233 INFO SenderThread:3327812 [sender.py:transition_state():587] send defer: 8 +2024-10-18 21:21:54,233 DEBUG SenderThread:3327812 [sender.py:send_request():363] send_request: poll_exit +2024-10-18 21:21:54,233 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:21:54,233 INFO HandlerThread:3327812 [handler.py:handle_request_defer():170] handle defer: 8 +2024-10-18 21:21:54,234 DEBUG SenderThread:3327812 [sender.py:send_request():363] send_request: defer +2024-10-18 21:21:54,234 INFO SenderThread:3327812 [sender.py:send_request_defer():583] handle sender defer: 8 +2024-10-18 21:21:54,246 INFO SenderThread:3327812 [sender.py:transition_state():587] send defer: 9 +2024-10-18 21:21:54,246 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:21:54,246 DEBUG SenderThread:3327812 [sender.py:send():336] send: artifact +2024-10-18 21:21:54,247 INFO HandlerThread:3327812 [handler.py:handle_request_defer():170] handle defer: 9 +2024-10-18 21:21:54,906 INFO Thread-13 :3327812 [dir_watcher.py:_on_file_modified():295] file/dir modified: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/files/output.log +2024-10-18 21:21:55,665 INFO SenderThread:3327812 [sender.py:send_artifact():1428] sent artifact job-https___github.com_JayParanjape_SVDSAM.git_train_baselines.py - {'id': 'QXJ0aWZhY3Q6MTI4NTc5Njg5Ng==', 'digest': '5f8b773fe3bdf0ea6bd47c21f45cc4fb', 'state': 'COMMITTED', 'aliases': [{'artifactCollectionName': 'job-https___github.com_JayParanjape_SVDSAM.git_train_baselines.py', 'alias': 'latest'}, {'artifactCollectionName': 'job-https___github.com_JayParanjape_SVDSAM.git_train_baselines.py', 'alias': 'v1'}], 'artifactSequence': {'id': 'QXJ0aWZhY3RDb2xsZWN0aW9uOjQ3ODg1ODkyMQ==', 'latestArtifact': {'id': 'QXJ0aWZhY3Q6MTI4NTc5Njg5Ng==', 'versionIndex': 1}}, 'version': 'v1'} +2024-10-18 21:21:55,665 DEBUG SenderThread:3327812 [sender.py:send_request():363] send_request: defer +2024-10-18 21:21:55,665 INFO SenderThread:3327812 [sender.py:send_request_defer():583] handle sender defer: 9 +2024-10-18 21:21:55,665 INFO SenderThread:3327812 [dir_watcher.py:finish():365] shutting down directory watcher +2024-10-18 21:21:55,907 INFO SenderThread:3327812 [dir_watcher.py:finish():395] scan: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/files +2024-10-18 21:21:55,908 INFO SenderThread:3327812 [dir_watcher.py:finish():409] scan save: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/files/output.log output.log +2024-10-18 21:21:55,908 INFO SenderThread:3327812 [dir_watcher.py:finish():409] scan save: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/files/wandb-metadata.json wandb-metadata.json +2024-10-18 21:21:55,908 INFO SenderThread:3327812 [dir_watcher.py:finish():409] scan save: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/files/conda-environment.yaml conda-environment.yaml +2024-10-18 21:21:55,908 INFO SenderThread:3327812 [dir_watcher.py:finish():409] scan save: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/files/wandb-summary.json wandb-summary.json +2024-10-18 21:21:55,910 INFO SenderThread:3327812 [dir_watcher.py:finish():409] scan save: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/files/config.yaml config.yaml +2024-10-18 21:21:55,910 INFO SenderThread:3327812 [dir_watcher.py:finish():409] scan save: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/files/requirements.txt requirements.txt +2024-10-18 21:21:55,910 INFO SenderThread:3327812 [sender.py:transition_state():587] send defer: 10 +2024-10-18 21:21:55,911 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:21:55,911 INFO HandlerThread:3327812 [handler.py:handle_request_defer():170] handle defer: 10 +2024-10-18 21:21:55,914 DEBUG SenderThread:3327812 [sender.py:send_request():363] send_request: defer +2024-10-18 21:21:55,914 INFO SenderThread:3327812 [sender.py:send_request_defer():583] handle sender defer: 10 +2024-10-18 21:21:55,914 INFO SenderThread:3327812 [file_pusher.py:finish():164] shutting down file pusher +2024-10-18 21:21:56,529 INFO wandb-upload_0:3327812 [upload_job.py:push():138] Uploaded file /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/files/output.log +2024-10-18 21:21:56,680 INFO wandb-upload_3:3327812 [upload_job.py:push():138] Uploaded file /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/files/config.yaml +2024-10-18 21:21:56,741 INFO wandb-upload_2:3327812 [upload_job.py:push():138] Uploaded file /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/files/wandb-summary.json +2024-10-18 21:21:56,790 INFO wandb-upload_4:3327812 [upload_job.py:push():138] Uploaded file /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/files/requirements.txt +2024-10-18 21:21:56,801 INFO wandb-upload_1:3327812 [upload_job.py:push():138] Uploaded file /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/files/conda-environment.yaml +2024-10-18 21:21:56,915 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: status_report +2024-10-18 21:21:57,002 INFO Thread-12 :3327812 [sender.py:transition_state():587] send defer: 11 +2024-10-18 21:21:57,002 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:21:57,002 INFO HandlerThread:3327812 [handler.py:handle_request_defer():170] handle defer: 11 +2024-10-18 21:21:57,003 DEBUG SenderThread:3327812 [sender.py:send_request():363] send_request: defer +2024-10-18 21:21:57,003 INFO SenderThread:3327812 [sender.py:send_request_defer():583] handle sender defer: 11 +2024-10-18 21:21:57,003 INFO SenderThread:3327812 [file_pusher.py:join():169] waiting for file pusher +2024-10-18 21:21:57,003 INFO SenderThread:3327812 [sender.py:transition_state():587] send defer: 12 +2024-10-18 21:21:57,003 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:21:57,003 INFO HandlerThread:3327812 [handler.py:handle_request_defer():170] handle defer: 12 +2024-10-18 21:21:57,003 DEBUG SenderThread:3327812 [sender.py:send_request():363] send_request: defer +2024-10-18 21:21:57,003 INFO SenderThread:3327812 [sender.py:send_request_defer():583] handle sender defer: 12 +2024-10-18 21:21:57,166 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: keepalive +2024-10-18 21:21:57,365 INFO SenderThread:3327812 [sender.py:transition_state():587] send defer: 13 +2024-10-18 21:21:57,365 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:21:57,365 INFO HandlerThread:3327812 [handler.py:handle_request_defer():170] handle defer: 13 +2024-10-18 21:21:57,365 DEBUG SenderThread:3327812 [sender.py:send_request():363] send_request: defer +2024-10-18 21:21:57,365 INFO SenderThread:3327812 [sender.py:send_request_defer():583] handle sender defer: 13 +2024-10-18 21:21:57,365 INFO SenderThread:3327812 [sender.py:transition_state():587] send defer: 14 +2024-10-18 21:21:57,366 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:21:57,366 INFO HandlerThread:3327812 [handler.py:handle_request_defer():170] handle defer: 14 +2024-10-18 21:21:57,366 DEBUG SenderThread:3327812 [sender.py:send():336] send: final +2024-10-18 21:21:57,366 DEBUG SenderThread:3327812 [sender.py:send():336] send: footer +2024-10-18 21:21:57,366 DEBUG SenderThread:3327812 [sender.py:send_request():363] send_request: defer +2024-10-18 21:21:57,366 INFO SenderThread:3327812 [sender.py:send_request_defer():583] handle sender defer: 14 +2024-10-18 21:21:57,366 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: poll_exit +2024-10-18 21:21:57,366 DEBUG SenderThread:3327812 [sender.py:send_request():363] send_request: poll_exit +2024-10-18 21:21:57,367 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: server_info +2024-10-18 21:21:57,367 DEBUG SenderThread:3327812 [sender.py:send_request():363] send_request: server_info +2024-10-18 21:21:57,368 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: get_summary +2024-10-18 21:21:57,369 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: sampled_history +2024-10-18 21:21:57,621 INFO MainThread:3327812 [wandb_run.py:_footer_history_summary_info():3422] rendering history +2024-10-18 21:21:57,621 INFO MainThread:3327812 [wandb_run.py:_footer_history_summary_info():3454] rendering summary +2024-10-18 21:21:57,621 INFO MainThread:3327812 [wandb_run.py:_footer_sync_info():3380] logging synced files +2024-10-18 21:21:57,621 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: shutdown +2024-10-18 21:21:57,621 INFO HandlerThread:3327812 [handler.py:finish():842] shutting down handler +2024-10-18 21:21:58,367 INFO WriterThread:3327812 [datastore.py:close():298] close: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/run-w102ona2.wandb +2024-10-18 21:21:58,621 INFO SenderThread:3327812 [sender.py:finish():1504] shutting down sender +2024-10-18 21:21:58,622 INFO SenderThread:3327812 [file_pusher.py:finish():164] shutting down file pusher +2024-10-18 21:21:58,622 INFO SenderThread:3327812 [file_pusher.py:join():169] waiting for file pusher diff --git a/AllinonSAM/wandb/debug.log b/AllinonSAM/wandb/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..4919901d195e1239334fb54bc5f35836d23b5254 --- /dev/null +++ b/AllinonSAM/wandb/debug.log @@ -0,0 +1,27 @@ +2024-10-18 21:21:46,160 INFO MainThread:3327679 [wandb_setup.py:_flush():76] Configure stats pid to 3327679 +2024-10-18 21:21:46,160 INFO MainThread:3327679 [wandb_setup.py:_flush():76] Loading settings from /home/abdelrahman.elsayed/.config/wandb/settings +2024-10-18 21:21:46,160 INFO MainThread:3327679 [wandb_setup.py:_flush():76] Loading settings from /home/abdelrahman.elsayed/sarim_code/wandb/settings +2024-10-18 21:21:46,160 INFO MainThread:3327679 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-10-18 21:21:46,160 INFO MainThread:3327679 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-10-18 21:21:46,161 INFO MainThread:3327679 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': 'train_baselines.py', 'program': '/home/abdelrahman.elsayed/sarim_code/train_baselines.py'} +2024-10-18 21:21:46,161 INFO MainThread:3327679 [wandb_init.py:_log_setup():506] Logging user logs to /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/logs/debug.log +2024-10-18 21:21:46,161 INFO MainThread:3327679 [wandb_init.py:_log_setup():507] Logging internal logs to /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/logs/debug-internal.log +2024-10-18 21:21:46,161 INFO MainThread:3327679 [wandb_init.py:init():546] calling init triggers +2024-10-18 21:21:46,161 INFO MainThread:3327679 [wandb_init.py:init():552] wandb.init called with sweep_config: {} +config: {'learning_rate': 0.0001, 'batch_size': 2, 'num_epochs': 500, 'reg_multiplier': 0.01} +2024-10-18 21:21:46,161 INFO MainThread:3327679 [wandb_init.py:init():602] starting backend +2024-10-18 21:21:46,161 INFO MainThread:3327679 [wandb_init.py:init():606] setting up manager +2024-10-18 21:21:46,162 INFO MainThread:3327679 [backend.py:_multiprocessing_setup():106] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-10-18 21:21:46,164 INFO MainThread:3327679 [wandb_init.py:init():613] backend started and connected +2024-10-18 21:21:46,166 INFO MainThread:3327679 [wandb_init.py:init():701] updated telemetry +2024-10-18 21:21:46,222 INFO MainThread:3327679 [wandb_init.py:init():741] communicating run to backend with 60.0 second timeout +2024-10-18 21:21:46,900 INFO MainThread:3327679 [wandb_run.py:_on_init():2133] communicating current version +2024-10-18 21:21:46,965 INFO MainThread:3327679 [wandb_run.py:_on_init():2142] got version response upgrade_message: "wandb version 0.18.5 is available! To upgrade, please run:\n $ pip install wandb --upgrade" + +2024-10-18 21:21:46,965 INFO MainThread:3327679 [wandb_init.py:init():789] starting run threads in backend +2024-10-18 21:21:49,912 INFO MainThread:3327679 [wandb_run.py:_console_start():2114] atexit reg +2024-10-18 21:21:49,912 INFO MainThread:3327679 [wandb_run.py:_redirect():1969] redirect: SettingsConsole.WRAP_RAW +2024-10-18 21:21:49,913 INFO MainThread:3327679 [wandb_run.py:_redirect():2034] Wrapping output streams. +2024-10-18 21:21:49,913 INFO MainThread:3327679 [wandb_run.py:_redirect():2059] Redirects installed. +2024-10-18 21:21:49,913 INFO MainThread:3327679 [wandb_init.py:init():831] run started, returning control to user process +2024-10-18 21:21:58,625 WARNING MsgRouterThr:3327679 [router.py:message_loop():77] message_loop has been closed diff --git a/AllinonSAM/wandb/run-20240915_202516-fsqumi24/files/conda-environment.yaml b/AllinonSAM/wandb/run-20240915_202516-fsqumi24/files/conda-environment.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/AllinonSAM/wandb/run-20240915_202516-fsqumi24/files/config.yaml b/AllinonSAM/wandb/run-20240915_202516-fsqumi24/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2149eaae40aa1a875e91e82e67ee93c599424598 --- /dev/null +++ b/AllinonSAM/wandb/run-20240915_202516-fsqumi24/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +learning_rate: + desc: null + value: 0.0001 +batch_size: + desc: null + value: 32 +num_epochs: + desc: null + value: 1000 +reg_multiplier: + desc: null + value: 0 +_wandb: + desc: null + value: + python_version: 3.12.1 + cli_version: 0.17.5 + framework: torch + is_jupyter_run: false + is_kaggle_kernel: true + start_time: 1726417516 + t: + 1: + - 1 + - 41 + - 55 + - 105 + 2: + - 1 + - 41 + - 55 + - 105 + 3: + - 16 + - 23 + 4: 3.12.1 + 5: 0.17.5 + 8: + - 2 + - 5 + 13: linux-x86_64 diff --git a/AllinonSAM/wandb/run-20240915_202516-fsqumi24/files/output.log b/AllinonSAM/wandb/run-20240915_202516-fsqumi24/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..6d7857c465e13c34ecac1fab9d21494feaee599b --- /dev/null +++ b/AllinonSAM/wandb/run-20240915_202516-fsqumi24/files/output.log @@ -0,0 +1,53 @@ +Training parameters: +---------- +number of trainable parameters: 1034496 +batch size: 32 +num epochs: 1000 +Epoch 0/999 +---------- +Traceback (most recent call last): + File "/l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/driver_scratchpad.py", line 382, in + main_train(data_config, model_config, args.pretrained_path, args.save_path, args.training_strategy, device=args.device) + File "/l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/driver_scratchpad.py", line 361, in main_train + model = train_dl(model, dataset_dict, dataset_sizes, criterion, optimizer, exp_lr_scheduler, save_path, num_epochs=training_params['num_epochs'], bs=training_params['batch_size'], device=device, retain_graph=retain_graph, neg2pos_ratio=data_config['data']['negative_to_positive_ratio'], reg_multiplier=model_config['training']['reg_multiplier']) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/train.py", line 182, in train_dl + outputs, reg_loss = model(inputs, text) + ^^^^^^^^^^^^^^^^^^^ + File "/home/sarim.hashmi/anaconda3/envs/AI702/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1553, in _wrapped_call_impl + return self._call_impl(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/home/sarim.hashmi/anaconda3/envs/AI702/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1562, in _call_impl + return forward_call(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/model.py", line 162, in forward + image_embeddings, reg_loss = self.sam_encoder(x_img) + ^^^^^^^^^^^^^^^^^^^^^^^ + File "/home/sarim.hashmi/anaconda3/envs/AI702/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1553, in _wrapped_call_impl + return self._call_impl(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/home/sarim.hashmi/anaconda3/envs/AI702/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1562, in _call_impl + return forward_call(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/prompt_adapted_segment_anything/modeling/image_encoder.py", line 178, in forward + x, loss = blk(x) + ^^^^^^ + File "/home/sarim.hashmi/anaconda3/envs/AI702/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1553, in _wrapped_call_impl + return self._call_impl(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/home/sarim.hashmi/anaconda3/envs/AI702/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1562, in _call_impl + return forward_call(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/prompt_adapted_segment_anything/modeling/image_encoder.py", line 281, in forward + x, reg_loss1 = self.attn(x) + ^^^^^^^^^^^^ + File "/home/sarim.hashmi/anaconda3/envs/AI702/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1553, in _wrapped_call_impl + return self._call_impl(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/home/sarim.hashmi/anaconda3/envs/AI702/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1562, in _call_impl + return forward_call(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/prompt_adapted_segment_anything/modeling/image_encoder.py", line 345, in forward + attn = (q * self.scale) @ k.transpose(-2, -1) + ~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~ +torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 1.50 GiB. GPU 0 has a total capacity of 23.65 GiB of which 1.25 GiB is free. Including non-PyTorch memory, this process has 21.14 GiB memory in use. Of the allocated memory 20.60 GiB is allocated by PyTorch, and 61.92 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) \ No newline at end of file diff --git a/AllinonSAM/wandb/run-20240915_202516-fsqumi24/files/requirements.txt b/AllinonSAM/wandb/run-20240915_202516-fsqumi24/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..4f9a3909a6c3c0ad41a7c480c097a5cffd1b26cb --- /dev/null +++ b/AllinonSAM/wandb/run-20240915_202516-fsqumi24/files/requirements.txt @@ -0,0 +1,507 @@ +Babel==2.14.0 +Brotli==1.1.0 +CoLT5-attention==0.11.1 +Deprecated==1.2.14 +GitPython==3.1.43 +Jinja2==3.1.3 +Mako==1.3.5 +Markdown==3.6 +MarkupSafe==2.1.4 +PTable==0.9.2 +PuLP==2.9.0 +PyGithub==1.59.1 +PyJWT==2.9.0 +PyNaCl==1.5.0 +PyPika==0.48.9 +PySocks==1.7.1 +PyYAML==6.0.1 +Pygments==2.15.1 +Pygments==2.17.2 +SQLAlchemy==2.0.32 +Send2Trash==1.8.2 +SimpleITK==2.3.1 +TorchFix==0.5.0 +Werkzeug==3.0.3 +absl-py==2.1.0 +accelerate==0.33.0 +aiohttp==3.9.3 +aiosignal==1.3.1 +albucore==0.0.13 +albumentations==1.4.13 +alembic==1.13.2 +annotated-types==0.6.0 +anyio==4.2.0 +anytree==2.12.1 +appdirs==1.4.4 +arel==0.3.0 +argon2-cffi-bindings==21.2.0 +argon2-cffi==23.1.0 +argparse==1.4.0 +arrow==1.3.0 +asgiref==3.8.1 +asttokens==2.0.5 +async-asgi-testclient==1.4.11 +async-lru==2.0.4 +asyncio==3.4.3 +attrs==23.2.0 +autocommand==2.2.2 +backcall==0.2.0 +backoff==2.2.1 +backports.tarfile==1.2.0 +bcrypt==4.2.0 +beartype==0.18.5 +beautifulsoup4==4.12.3 +bidict==0.23.1 +bitsandbytes==0.43.3 +black==24.8.0 +bleach==6.1.0 +blis==0.7.11 +boto3==1.35.10 +botocore==1.35.10 +build==1.2.1 +cacheout==0.14.1 +cachetools==4.2.4 +catalogue==2.0.10 +certifi==2024.2.2 +cffi==1.16.0 +charset-normalizer==3.3.2 +chex==0.1.86 +chroma-hnswlib==0.7.3 +chromadb==0.4.24 +clarifai-grpc==10.7.3 +clarifai==10.7.0 +click==8.1.7 +clip==1.0 +cloudpathlib==0.16.0 +cohere==5.6.2 +colorama==0.4.6 +coloredlogs==15.0.1 +comm==0.2.1 +confection==0.1.4 +contextlib2==21.6.0 +contourpy==1.2.1 +crewai-tools==0.1.6 +crewai==0.28.8 +cryptography==42.0.8 +cycler==0.12.1 +cymem==2.0.8 +dataclasses-json==0.6.5 +datasets==2.19.1 +debugpy==1.6.7 +decorator==5.1.1 +defusedxml==0.7.1 +deprecation==2.1.0 +dill==0.3.8 +distinctipy==1.3.4 +distro==1.9.0 +docker-pycreds==0.4.0 +docstring_parser==0.16 +efficientnet-pytorch==0.7.1 +einops-exts==0.0.4 +einops==0.8.0 +einx==0.3.0 +embedchain==0.1.113 +en-core-web-sm==3.7.1 +etils==1.9.3 +eval_type_backport==0.2.0 +executing==2.1.0 +faiss-cpu==1.8.0.post1 +fastapi==0.109.0 +fastavro==1.9.5 +fastjsonschema==2.19.1 +ffmpeg-python==0.2.0 +filelock==3.13.1 +flake8==7.1.1 +flatbuffers==24.3.25 +flax==0.9.0 +flwr-datasets==0.2.0 +flwr==1.10.0 +fonttools==4.50.0 +fqdn==1.5.1 +fr-core-news-sm==3.7.0 +frozendict==2.4.4 +frozenlist==1.4.1 +fsspec==2023.12.2 +ftfy==6.2.3 +future==1.0.0 +gitdb==4.0.11 +giturlparse==0.12.0 +google-ai-generativelanguage==0.6.1 +google-api-core==2.18.0 +google-api-python-client==2.125.0 +google-auth-httplib2==0.2.0 +google-auth==2.29.0 +google-cloud-aiplatform==1.48.0 +google-cloud-bigquery==3.20.1 +google-cloud-core==2.4.1 +google-cloud-resource-manager==1.12.3 +google-cloud-storage==2.16.0 +google-crc32c==1.5.0 +google-generativeai==0.5.0 +google-resumable-media==2.7.0 +google==3.0.0 +googleapis-common-protos==1.63.0 +gptcache==0.1.44 +gpytorch==1.12 +graphviz==0.20.1 +greenlet==3.0.3 +groq==0.5.0 +grpc-google-iam-v1==0.13.0 +grpcio-status==1.62.1 +grpcio-tools==1.62.3 +grpcio==1.64.3 +h11==0.14.0 +h2==4.1.0 +h5py==3.11.0 +hpack==4.0.0 +httpcore==1.0.5 +httplib2==0.22.0 +httptools==0.6.1 +httpx-sse==0.4.0 +httpx==0.27.0 +huggingface-hub==0.24.5 +humanfriendly==10.0 +humanize==4.10.0 +hyperframe==6.0.1 +idna==3.6 +imagecodecs==2024.1.1 +imageio==2.34.0 +importlib_metadata==8.0.0 +importlib_metadata==8.4.0 +importlib_resources==6.4.0 +importlib_resources==6.4.4 +imutils==0.5.4 +inflect==7.3.1 +iniconfig==2.0.0 +inquirerpy==0.3.4 +instructor==0.5.2 +iopath==0.1.10 +ipykernel==5.5.6 +ipykernel==6.28.0 +ipython-genutils==0.2.0 +ipython==7.16.1 +ipython==8.20.0 +isoduration==20.11.0 +iterators==0.0.2 +jaraco.context==5.3.0 +jaraco.functools==4.0.1 +jaraco.text==3.12.1 +jax==0.4.31 +jaxlib==0.4.31 +jaxtyping==0.2.19 +jedi==0.18.1 +jedi==0.19.1 +jiter==0.4.2 +jmespath==1.0.1 +joblib==1.4.2 +json5==0.9.14 +json_repair==0.25.3 +jsonpatch==1.33 +jsonpointer==2.4 +jsonref==1.1.0 +jsonschema-specifications==2023.12.1 +jsonschema==4.20.0 +jupyter-events==0.9.0 +jupyter-lsp==2.2.2 +jupyter_client==8.6.0 +jupyter_core==5.5.0 +jupyter_core==5.7.1 +jupyter_server==2.12.5 +jupyter_server_terminals==0.5.2 +jupyterlab==4.0.12 +jupyterlab_pygments==0.3.0 +jupyterlab_server==2.25.2 +jupyterplot==0.0.3 +kiwisolver==1.4.5 +kubernetes==30.1.0 +lancedb==0.5.7 +langchain-cohere==0.1.5 +langchain-community==0.0.29 +langchain-core==0.1.52 +langchain-experimental==0.0.55 +langchain-groq==0.1.3 +langchain-openai==0.1.7 +langchain-text-splitters==0.0.2 +langchain==0.1.13 +langcodes==3.4.0 +langsmith==0.1.108 +language_data==1.2.0 +lazy_loader==0.4 +lib==4.0.0 +libcst==1.1.0 +lightning-utilities==0.11.2 +linear-operator==0.5.3 +local-attention==1.9.15 +loguru==0.7.2 +lrcurve==1.1.0 +lxml==5.2.2 +marisa-trie==1.1.0 +markdown-it-py==3.0.0 +marshmallow==3.21.2 +matplotlib-inline==0.1.6 +matplotlib==3.8.4 +mccabe==0.7.0 +mdurl==0.1.2 +mem0ai==0.0.20 +mistune==3.0.2 +ml-dtypes==0.4.0 +mmh3==4.1.0 +monai==1.3.1 +monotonic==1.6 +more-itertools==10.3.0 +mpmath==1.3.0 +msgpack==1.0.8 +multidict==6.0.5 +multiprocess==0.70.16 +munch==4.0.0 +murmurhash==1.0.10 +mutagen==1.47.0 +mypy-extensions==1.0.0 +nbclient==0.9.0 +nbconvert==7.14.2 +nbformat==5.9.2 +nest-asyncio==1.6.0 +networkx==3.2.1 +nibabel==5.2.1 +nilearn==0.10.4 +nltk==3.5 +nodeenv==1.9.1 +notebook==7.0.7 +notebook_shim==0.2.3 +numerize==0.12 +numpy==1.26.3 +nvidia-cublas-cu12==12.1.3.1 +nvidia-cuda-cupti-cu12==12.1.105 +nvidia-cuda-nvrtc-cu12==12.1.105 +nvidia-cuda-runtime-cu12==12.1.105 +nvidia-cudnn-cu12==9.1.0.70 +nvidia-cufft-cu12==11.0.2.54 +nvidia-curand-cu12==10.3.2.106 +nvidia-cusolver-cu12==11.4.5.107 +nvidia-cusparse-cu12==12.1.0.106 +nvidia-nccl-cu12==2.20.5 +nvidia-nvjitlink-cu12==12.3.101 +nvidia-nvtx-cu12==12.1.105 +oauthlib==3.2.2 +onnxruntime==1.19.0 +openai==1.43.0 +opencv-python-headless==4.10.0.84 +opencv-python==4.9.0.80 +opentelemetry-api==1.27.0 +opentelemetry-exporter-otlp-proto-common==1.27.0 +opentelemetry-exporter-otlp-proto-grpc==1.27.0 +opentelemetry-exporter-otlp-proto-http==1.27.0 +opentelemetry-instrumentation-asgi==0.48b0 +opentelemetry-instrumentation-fastapi==0.48b0 +opentelemetry-instrumentation==0.48b0 +opentelemetry-proto==1.27.0 +opentelemetry-sdk==1.27.0 +opentelemetry-semantic-conventions==0.48b0 +opentelemetry-util-http==0.48b0 +opt-einsum==3.3.0 +optax==0.2.3 +orbax-checkpoint==0.6.1 +orjson==3.10.3 +outcome==1.3.0.post0 +overrides==7.7.0 +packaging==23.2 +packaging==24.1 +pandas==2.1.4 +pandocfilters==1.5.1 +parameterized==0.9.0 +parso==0.8.3 +pathspec==0.12.1 +peft==0.12.0 +pendulum==3.0.0 +pexpect==4.8.0 +pexpect==4.9.0 +pfzy==0.3.4 +pickleshare==0.7.5 +pillow==10.2.0 +pinecone-plugin-inference==1.0.3 +pinecone-plugin-interface==0.0.7 +pinecone==5.1.0 +pip==24.2 +platformdirs==3.10.0 +platformdirs==4.2.0 +platformdirs==4.2.2 +pluggy==1.5.0 +portalocker==2.10.1 +posthog==3.6.0 +preshed==3.0.9 +pretrainedmodels==0.7.4 +prettytable==3.11.0 +prometheus-client==0.19.0 +prompt-toolkit==3.0.43 +proto-plus==1.23.0 +protobuf==3.20.3 +psutil==5.9.0 +ptflops==0.7.3 +ptyprocess==0.7.0 +pulsar-client==3.5.0 +pure-eval==0.2.2 +py==1.11.0 +pyarrow-hotfix==0.6 +pyarrow==16.0.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pycodestyle==2.12.1 +pycparser==2.21 +pycryptodome==3.20.0 +pycryptodomex==3.20.0 +pydantic==2.8.2 +pydantic_core==2.20.1 +pydicom==2.4.4 +pyflakes==3.2.0 +pylance==0.9.18 +pynrrd==0.4.3 +pyparsing==3.1.2 +pypdf==4.3.1 +pyproject_hooks==1.1.0 +pyright==1.1.378 +pysbd==0.3.4 +pytest==8.3.2 +python-dateutil==2.8.2 +python-dotenv==1.0.0 +python-json-logger==2.0.7 +python-magic==0.4.27 +python-multipart==0.0.5 +python-rapidjson==1.20 +pytorch-lightning==2.2.1 +pytorch-metric-learning==2.6.0 +pytube==15.0.0 +pytz==2024.1 +pyzmq==25.1.2 +qdrant-client==1.11.1 +rarfile==4.2 +ratelimit==2.2.1 +ratelimiter==1.2.0.post0 +referencing==0.33.0 +regex==2023.12.25 +requests-oauthlib==2.0.0 +requests-toolbelt==1.0.0 +requests==2.31.0 +retry==0.9.2 +rfc3339-validator==0.1.4 +rfc3986-validator==0.1.1 +rich==13.7.1 +rpds-py==0.17.1 +rsa==4.9 +s3transfer==0.10.2 +safetensors==0.4.2 +schedulefree==1.2.7 +schema==0.7.5 +scikit-image==0.23.1 +scikit-learn==1.5.0 +scipy==1.13.0 +seaborn==0.13.2 +segmentation-models-pytorch==0.3.3 +selenium==4.24.0 +semver==3.0.2 +sentence-transformers==3.0.1 +sentencepiece==0.2.0 +sentry-sdk==2.12.0 +setproctitle==1.3.3 +setuptools==74.1.1 +shapely==2.0.2 +shellingham==1.5.4 +shortuuid==1.0.13 +six==1.16.0 +skypilot==0.6.1 +smart-open==6.4.0 +smmap==5.0.1 +sniffio==1.3.0 +sortedcontainers==2.4.0 +soupsieve==2.5 +spacy-legacy==3.0.12 +spacy-loggers==1.0.5 +spacy==3.7.4 +srsly==2.4.8 +sse-starlette==2.1.0 +stack-data==0.2.0 +starlette==0.35.1 +stringcase==1.2.0 +stripe==10.10.0 +supervisely==6.73.181 +swarms-cloud==0.3.7 +swarms-memory==0.1.2 +swarms==5.6.6 +sympy==1.12 +tabulate==0.9.0 +tenacity==8.5.0 +tensorboard-data-server==0.7.2 +tensorboard==2.17.0 +tensorboardX==2.6.2.2 +tensorstore==0.1.64 +termcolor==2.4.0 +terminado==0.18.0 +thinc==8.2.3 +threadpoolctl==3.5.0 +tifffile==2024.2.12 +tiktoken==0.7.0 +time-machine==2.15.0 +timm==0.9.2 +tinycss2==1.2.1 +tokenizers==0.19.1 +toml==0.10.2 +tomli==2.0.1 +tomli==2.0.1 +tomli_w==1.0.0 +toolz==0.12.1 +torch==2.4.0 +torchmetrics==1.3.2 +torchsummary==1.5.1 +torchtext==0.5.0 +torchview==0.2.6 +torchvision==0.19.0 +tornado==6.3.3 +tornado==6.4 +tqdm==4.66.5 +traitlets==5.14.1 +traitlets==5.7.1 +transformers==4.44.2 +trimesh==3.23.5 +trio-websocket==0.11.1 +trio==0.26.2 +triton==3.0.0 +tritonclient==2.49.0 +typeguard==4.3.0 +typeguard==4.3.0 +typer==0.9.4 +types-python-dateutil==2.8.19.20240106 +types-requests==2.32.0.20240712 +typing-inspect==0.9.0 +typing_extensions==4.12.2 +typing_extensions==4.12.2 +tzdata==2024.1 +uri-template==1.3.0 +uritemplate==4.1.1 +urllib3==2.2.2 +uvicorn==0.30.6 +uvloop==0.20.0 +varname==0.13.3 +vector-quantize-pytorch==1.15.6 +vertexai==1.46.0 +vision-mamba==0.1.0 +wandb==0.17.5 +wasabi==1.1.2 +watchfiles==0.24.0 +wcwidth==0.2.13 +weasel==0.3.4 +webcolors==1.13 +webencodings==0.5.1 +websocket-client==1.8.0 +websockets==10.4 +wheel==0.41.2 +wheel==0.43.0 +wrapt==1.16.0 +wsproto==1.2.0 +xxhash==3.4.1 +yacs==0.1.8 +yarl==1.9.4 +youtube-transcript-api==0.6.2 +yt-dlp==2023.12.30 +zetascale==2.7.0 +zipp==3.19.2 +zipp==3.20.1 +zstd==1.5.5.1 \ No newline at end of file diff --git a/AllinonSAM/wandb/run-20240915_202516-fsqumi24/files/wandb-metadata.json b/AllinonSAM/wandb/run-20240915_202516-fsqumi24/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..82f5deda05b9652eb33c9ce3bdde2a271eeedf5b --- /dev/null +++ b/AllinonSAM/wandb/run-20240915_202516-fsqumi24/files/wandb-metadata.json @@ -0,0 +1,210 @@ +{ + "os": "Linux-5.15.133-ql-generic-13.0-9-x86_64-with-glibc2.35", + "python": "3.12.1", + "heartbeatAt": "2024-09-15T16:25:17.663120", + "startedAt": "2024-09-15T16:25:16.799116", + "docker": null, + "cuda": null, + "args": [ + "--model_config", + "model_svdtuning.yml", + "--data_config", + "config_arcade.yml", + "--save_path", + "./temp.pth" + ], + "state": "running", + "program": "/l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/driver_scratchpad.py", + "codePathLocal": "driver_scratchpad.py", + "codePath": "driver_scratchpad.py", + "git": { + "remote": "https://github.com/JayParanjape/SVDSAM.git", + "commit": "5936d0eff64d84fbefed6ecfe4bcc841459c2fc3" + }, + "cpu_count": 16, + "cpu_count_logical": 32, + "cpu_freq": { + "current": 3.890843750000002, + "min": 2200.0, + "max": 3900.0 + }, + "cpu_freq_per_core": [ + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 4.038, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + } + ], + "disk": { + "/": { + "total": 1.0, + "used": 0.04192352294921875 + } + }, + "gpu": "NVIDIA GeForce RTX 4090", + "gpu_count": 1, + "gpu_devices": [ + { + "name": "NVIDIA GeForce RTX 4090", + "memory_total": 25757220864 + } + ], + "memory": { + "total": 62.65229415893555 + } +} diff --git a/AllinonSAM/wandb/run-20240915_202516-fsqumi24/files/wandb-summary.json b/AllinonSAM/wandb/run-20240915_202516-fsqumi24/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..b1d4cf96d64955a2ae8b982ce021e29fde546a1a --- /dev/null +++ b/AllinonSAM/wandb/run-20240915_202516-fsqumi24/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 4}} \ No newline at end of file diff --git a/AllinonSAM/wandb/run-20240915_202516-fsqumi24/logs/debug-internal.log b/AllinonSAM/wandb/run-20240915_202516-fsqumi24/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..d5ab75b883ca37f75b5529f5d1b1944d510287e2 --- /dev/null +++ b/AllinonSAM/wandb/run-20240915_202516-fsqumi24/logs/debug-internal.log @@ -0,0 +1,156 @@ +2024-09-15 20:25:16,836 INFO StreamThr :1017431 [internal.py:wandb_internal():85] W&B internal server running at pid: 1017431, started at: 2024-09-15 20:25:16.834265 +2024-09-15 20:25:16,838 DEBUG HandlerThread:1017431 [handler.py:handle_request():158] handle_request: status +2024-09-15 20:25:16,841 INFO WriterThread:1017431 [datastore.py:open_for_write():87] open: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202516-fsqumi24/run-fsqumi24.wandb +2024-09-15 20:25:16,844 DEBUG SenderThread:1017431 [sender.py:send():379] send: header +2024-09-15 20:25:16,847 DEBUG SenderThread:1017431 [sender.py:send():379] send: run +2024-09-15 20:25:17,462 INFO SenderThread:1017431 [dir_watcher.py:__init__():211] watching files in: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202516-fsqumi24/files +2024-09-15 20:25:17,462 INFO SenderThread:1017431 [sender.py:_start_run_threads():1188] run started: fsqumi24 with start time 1726417516.83529 +2024-09-15 20:25:17,469 DEBUG HandlerThread:1017431 [handler.py:handle_request():158] handle_request: check_version +2024-09-15 20:25:17,469 DEBUG SenderThread:1017431 [sender.py:send_request():406] send_request: check_version +2024-09-15 20:25:17,560 DEBUG HandlerThread:1017431 [handler.py:handle_request():158] handle_request: run_start +2024-09-15 20:25:17,594 DEBUG HandlerThread:1017431 [system_info.py:__init__():26] System info init +2024-09-15 20:25:17,594 DEBUG HandlerThread:1017431 [system_info.py:__init__():41] System info init done +2024-09-15 20:25:17,594 INFO HandlerThread:1017431 [system_monitor.py:start():194] Starting system monitor +2024-09-15 20:25:17,594 INFO SystemMonitor:1017431 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-09-15 20:25:17,594 INFO HandlerThread:1017431 [system_monitor.py:probe():214] Collecting system info +2024-09-15 20:25:17,595 INFO SystemMonitor:1017431 [interfaces.py:start():188] Started cpu monitoring +2024-09-15 20:25:17,595 INFO SystemMonitor:1017431 [interfaces.py:start():188] Started disk monitoring +2024-09-15 20:25:17,595 INFO SystemMonitor:1017431 [interfaces.py:start():188] Started gpu monitoring +2024-09-15 20:25:17,596 INFO SystemMonitor:1017431 [interfaces.py:start():188] Started memory monitoring +2024-09-15 20:25:17,596 INFO SystemMonitor:1017431 [interfaces.py:start():188] Started network monitoring +2024-09-15 20:25:17,663 DEBUG HandlerThread:1017431 [system_info.py:probe():152] Probing system +2024-09-15 20:25:17,664 DEBUG HandlerThread:1017431 [system_info.py:_probe_git():137] Probing git +2024-09-15 20:25:17,670 DEBUG HandlerThread:1017431 [system_info.py:_probe_git():145] Probing git done +2024-09-15 20:25:17,670 DEBUG HandlerThread:1017431 [system_info.py:probe():200] Probing system done +2024-09-15 20:25:17,670 DEBUG HandlerThread:1017431 [system_monitor.py:probe():223] {'os': 'Linux-5.15.133-ql-generic-13.0-9-x86_64-with-glibc2.35', 'python': '3.12.1', 'heartbeatAt': '2024-09-15T16:25:17.663120', 'startedAt': '2024-09-15T16:25:16.799116', 'docker': None, 'cuda': None, 'args': ('--model_config', 'model_svdtuning.yml', '--data_config', 'config_arcade.yml', '--save_path', './temp.pth'), 'state': 'running', 'program': '/l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/driver_scratchpad.py', 'codePathLocal': 'driver_scratchpad.py', 'codePath': 'driver_scratchpad.py', 'git': {'remote': 'https://github.com/JayParanjape/SVDSAM.git', 'commit': '5936d0eff64d84fbefed6ecfe4bcc841459c2fc3'}, 'cpu_count': 16, 'cpu_count_logical': 32, 'cpu_freq': {'current': 3.890843750000002, 'min': 2200.0, 'max': 3900.0}, 'cpu_freq_per_core': [{'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 4.038, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}], 'disk': {'/': {'total': 1.0, 'used': 0.04192352294921875}}, 'gpu': 'NVIDIA GeForce RTX 4090', 'gpu_count': 1, 'gpu_devices': [{'name': 'NVIDIA GeForce RTX 4090', 'memory_total': 25757220864}], 'memory': {'total': 62.65229415893555}} +2024-09-15 20:25:17,670 INFO HandlerThread:1017431 [system_monitor.py:probe():224] Finished collecting system info +2024-09-15 20:25:17,670 INFO HandlerThread:1017431 [system_monitor.py:probe():227] Publishing system info +2024-09-15 20:25:17,670 DEBUG HandlerThread:1017431 [system_info.py:_save_conda():209] Saving list of conda packages installed into the current environment +2024-09-15 20:25:17,673 ERROR HandlerThread:1017431 [system_info.py:_save_conda():223] Error saving conda packages: [Errno 2] No such file or directory: 'conda' +Traceback (most recent call last): + File "/home/sarim.hashmi/anaconda3/envs/AI702/lib/python3.12/site-packages/wandb/sdk/internal/system/system_info.py", line 216, in _save_conda + subprocess.call( + File "/home/sarim.hashmi/anaconda3/envs/AI702/lib/python3.12/subprocess.py", line 389, in call + with Popen(*popenargs, **kwargs) as p: + ^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/home/sarim.hashmi/anaconda3/envs/AI702/lib/python3.12/subprocess.py", line 1026, in __init__ + self._execute_child(args, executable, preexec_fn, close_fds, + File "/home/sarim.hashmi/anaconda3/envs/AI702/lib/python3.12/subprocess.py", line 1950, in _execute_child + raise child_exception_type(errno_num, err_msg, err_filename) +FileNotFoundError: [Errno 2] No such file or directory: 'conda' +2024-09-15 20:25:17,675 DEBUG HandlerThread:1017431 [system_info.py:_save_conda():224] Saving conda packages done +2024-09-15 20:25:17,679 INFO HandlerThread:1017431 [system_monitor.py:probe():229] Finished publishing system info +2024-09-15 20:25:17,690 DEBUG SenderThread:1017431 [sender.py:send():379] send: files +2024-09-15 20:25:17,690 INFO SenderThread:1017431 [sender.py:_save_file():1454] saving file wandb-metadata.json with policy now +2024-09-15 20:25:18,242 DEBUG HandlerThread:1017431 [handler.py:handle_request():158] handle_request: python_packages +2024-09-15 20:25:18,242 DEBUG HandlerThread:1017431 [handler.py:handle_request():158] handle_request: stop_status +2024-09-15 20:25:18,242 DEBUG HandlerThread:1017431 [handler.py:handle_request():158] handle_request: internal_messages +2024-09-15 20:25:18,242 DEBUG SenderThread:1017431 [sender.py:send_request():406] send_request: python_packages +2024-09-15 20:25:18,248 DEBUG SenderThread:1017431 [sender.py:send_request():406] send_request: stop_status +2024-09-15 20:25:18,467 INFO Thread-12 :1017431 [dir_watcher.py:_on_file_created():271] file/dir created: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202516-fsqumi24/files/wandb-metadata.json +2024-09-15 20:25:18,467 INFO Thread-12 :1017431 [dir_watcher.py:_on_file_created():271] file/dir created: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202516-fsqumi24/files/requirements.txt +2024-09-15 20:25:18,467 INFO Thread-12 :1017431 [dir_watcher.py:_on_file_created():271] file/dir created: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202516-fsqumi24/files/conda-environment.yaml +2024-09-15 20:25:18,503 DEBUG SenderThread:1017431 [sender.py:send():379] send: telemetry +2024-09-15 20:25:18,701 INFO wandb-upload_0:1017431 [upload_job.py:push():130] Uploaded file /tmp/slurm-sarim.hashmi-40491/tmpd9rujjtvwandb/nxnn2ty6-wandb-metadata.json +2024-09-15 20:25:19,241 DEBUG HandlerThread:1017431 [handler.py:handle_request():158] handle_request: internal_messages +2024-09-15 20:25:19,468 INFO Thread-12 :1017431 [dir_watcher.py:_on_file_created():271] file/dir created: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202516-fsqumi24/files/output.log +2024-09-15 20:25:20,241 DEBUG HandlerThread:1017431 [handler.py:handle_request():158] handle_request: internal_messages +2024-09-15 20:25:21,240 DEBUG HandlerThread:1017431 [handler.py:handle_request():158] handle_request: internal_messages +2024-09-15 20:25:21,471 INFO Thread-12 :1017431 [dir_watcher.py:_on_file_modified():288] file/dir modified: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202516-fsqumi24/files/output.log +2024-09-15 20:25:21,584 DEBUG SenderThread:1017431 [sender.py:send():379] send: exit +2024-09-15 20:25:21,584 INFO SenderThread:1017431 [sender.py:send_exit():586] handling exit code: 1 +2024-09-15 20:25:21,584 INFO SenderThread:1017431 [sender.py:send_exit():588] handling runtime: 4 +2024-09-15 20:25:21,588 INFO SenderThread:1017431 [sender.py:_save_file():1454] saving file wandb-summary.json with policy end +2024-09-15 20:25:21,588 INFO SenderThread:1017431 [sender.py:send_exit():594] send defer +2024-09-15 20:25:21,589 DEBUG HandlerThread:1017431 [handler.py:handle_request():158] handle_request: defer +2024-09-15 20:25:21,589 INFO HandlerThread:1017431 [handler.py:handle_request_defer():184] handle defer: 0 +2024-09-15 20:25:21,589 DEBUG SenderThread:1017431 [sender.py:send_request():406] send_request: defer +2024-09-15 20:25:21,589 INFO SenderThread:1017431 [sender.py:send_request_defer():610] handle sender defer: 0 +2024-09-15 20:25:21,589 INFO SenderThread:1017431 [sender.py:transition_state():614] send defer: 1 +2024-09-15 20:25:21,589 DEBUG HandlerThread:1017431 [handler.py:handle_request():158] handle_request: defer +2024-09-15 20:25:21,589 INFO HandlerThread:1017431 [handler.py:handle_request_defer():184] handle defer: 1 +2024-09-15 20:25:21,589 DEBUG SenderThread:1017431 [sender.py:send_request():406] send_request: defer +2024-09-15 20:25:21,589 INFO SenderThread:1017431 [sender.py:send_request_defer():610] handle sender defer: 1 +2024-09-15 20:25:21,589 INFO SenderThread:1017431 [sender.py:transition_state():614] send defer: 2 +2024-09-15 20:25:21,589 DEBUG HandlerThread:1017431 [handler.py:handle_request():158] handle_request: defer +2024-09-15 20:25:21,589 INFO HandlerThread:1017431 [handler.py:handle_request_defer():184] handle defer: 2 +2024-09-15 20:25:21,589 INFO HandlerThread:1017431 [system_monitor.py:finish():203] Stopping system monitor +2024-09-15 20:25:21,589 DEBUG SystemMonitor:1017431 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-09-15 20:25:21,590 DEBUG SystemMonitor:1017431 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-09-15 20:25:21,590 INFO HandlerThread:1017431 [interfaces.py:finish():200] Joined cpu monitor +2024-09-15 20:25:21,590 DEBUG SystemMonitor:1017431 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-09-15 20:25:21,590 INFO HandlerThread:1017431 [interfaces.py:finish():200] Joined disk monitor +2024-09-15 20:25:21,650 INFO HandlerThread:1017431 [interfaces.py:finish():200] Joined gpu monitor +2024-09-15 20:25:21,650 INFO HandlerThread:1017431 [interfaces.py:finish():200] Joined memory monitor +2024-09-15 20:25:21,650 INFO HandlerThread:1017431 [interfaces.py:finish():200] Joined network monitor +2024-09-15 20:25:21,651 DEBUG SenderThread:1017431 [sender.py:send_request():406] send_request: defer +2024-09-15 20:25:21,651 INFO SenderThread:1017431 [sender.py:send_request_defer():610] handle sender defer: 2 +2024-09-15 20:25:21,651 INFO SenderThread:1017431 [sender.py:transition_state():614] send defer: 3 +2024-09-15 20:25:21,651 DEBUG SenderThread:1017431 [sender.py:send():379] send: stats +2024-09-15 20:25:21,651 DEBUG HandlerThread:1017431 [handler.py:handle_request():158] handle_request: defer +2024-09-15 20:25:21,651 INFO HandlerThread:1017431 [handler.py:handle_request_defer():184] handle defer: 3 +2024-09-15 20:25:21,651 DEBUG SenderThread:1017431 [sender.py:send_request():406] send_request: defer +2024-09-15 20:25:21,651 INFO SenderThread:1017431 [sender.py:send_request_defer():610] handle sender defer: 3 +2024-09-15 20:25:21,651 INFO SenderThread:1017431 [sender.py:transition_state():614] send defer: 4 +2024-09-15 20:25:21,652 DEBUG HandlerThread:1017431 [handler.py:handle_request():158] handle_request: defer +2024-09-15 20:25:21,652 INFO HandlerThread:1017431 [handler.py:handle_request_defer():184] handle defer: 4 +2024-09-15 20:25:21,652 DEBUG SenderThread:1017431 [sender.py:send_request():406] send_request: defer +2024-09-15 20:25:21,652 INFO SenderThread:1017431 [sender.py:send_request_defer():610] handle sender defer: 4 +2024-09-15 20:25:21,652 INFO SenderThread:1017431 [sender.py:transition_state():614] send defer: 5 +2024-09-15 20:25:21,652 DEBUG HandlerThread:1017431 [handler.py:handle_request():158] handle_request: defer +2024-09-15 20:25:21,652 INFO HandlerThread:1017431 [handler.py:handle_request_defer():184] handle defer: 5 +2024-09-15 20:25:21,652 DEBUG SenderThread:1017431 [sender.py:send():379] send: summary +2024-09-15 20:25:21,655 INFO SenderThread:1017431 [sender.py:_save_file():1454] saving file wandb-summary.json with policy end +2024-09-15 20:25:21,655 DEBUG SenderThread:1017431 [sender.py:send_request():406] send_request: defer +2024-09-15 20:25:21,655 INFO SenderThread:1017431 [sender.py:send_request_defer():610] handle sender defer: 5 +2024-09-15 20:25:21,655 INFO SenderThread:1017431 [sender.py:transition_state():614] send defer: 6 +2024-09-15 20:25:21,655 DEBUG HandlerThread:1017431 [handler.py:handle_request():158] handle_request: defer +2024-09-15 20:25:21,655 INFO HandlerThread:1017431 [handler.py:handle_request_defer():184] handle defer: 6 +2024-09-15 20:25:21,655 DEBUG SenderThread:1017431 [sender.py:send_request():406] send_request: defer +2024-09-15 20:25:21,655 INFO SenderThread:1017431 [sender.py:send_request_defer():610] handle sender defer: 6 +2024-09-15 20:25:21,657 DEBUG HandlerThread:1017431 [handler.py:handle_request():158] handle_request: status_report +2024-09-15 20:25:21,906 INFO SenderThread:1017431 [sender.py:transition_state():614] send defer: 7 +2024-09-15 20:25:21,907 DEBUG HandlerThread:1017431 [handler.py:handle_request():158] handle_request: defer +2024-09-15 20:25:21,907 INFO HandlerThread:1017431 [handler.py:handle_request_defer():184] handle defer: 7 +2024-09-15 20:25:21,907 DEBUG SenderThread:1017431 [sender.py:send_request():406] send_request: defer +2024-09-15 20:25:21,907 INFO SenderThread:1017431 [sender.py:send_request_defer():610] handle sender defer: 7 +2024-09-15 20:25:22,472 INFO Thread-12 :1017431 [dir_watcher.py:_on_file_modified():288] file/dir modified: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202516-fsqumi24/files/config.yaml +2024-09-15 20:25:22,473 INFO Thread-12 :1017431 [dir_watcher.py:_on_file_created():271] file/dir created: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202516-fsqumi24/files/wandb-summary.json +2024-09-15 20:25:22,523 INFO SenderThread:1017431 [sender.py:transition_state():614] send defer: 8 +2024-09-15 20:25:22,523 DEBUG HandlerThread:1017431 [handler.py:handle_request():158] handle_request: defer +2024-09-15 20:25:22,523 INFO HandlerThread:1017431 [handler.py:handle_request_defer():184] handle defer: 8 +2024-09-15 20:25:22,523 DEBUG SenderThread:1017431 [sender.py:send_request():406] send_request: defer +2024-09-15 20:25:22,523 INFO SenderThread:1017431 [sender.py:send_request_defer():610] handle sender defer: 8 +2024-09-15 20:25:22,523 INFO SenderThread:1017431 [job_builder.py:build():440] Attempting to build job artifact +2024-09-15 20:25:22,523 INFO SenderThread:1017431 [job_builder.py:_get_source_type():569] is repo sourced job +2024-09-15 20:25:22,540 INFO SenderThread:1017431 [job_builder.py:build():545] adding wandb-job metadata file +2024-09-15 20:25:22,563 INFO SenderThread:1017431 [sender.py:transition_state():614] send defer: 9 +2024-09-15 20:25:22,563 DEBUG HandlerThread:1017431 [handler.py:handle_request():158] handle_request: defer +2024-09-15 20:25:22,563 DEBUG SenderThread:1017431 [sender.py:send():379] send: artifact +2024-09-15 20:25:22,563 INFO HandlerThread:1017431 [handler.py:handle_request_defer():184] handle defer: 9 +2024-09-15 20:25:22,584 DEBUG HandlerThread:1017431 [handler.py:handle_request():158] handle_request: poll_exit +2024-09-15 20:25:23,473 INFO Thread-12 :1017431 [dir_watcher.py:_on_file_modified():288] file/dir modified: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202516-fsqumi24/files/output.log +2024-09-15 20:25:24,300 INFO wandb-upload_1:1017431 [upload_job.py:push():88] Uploaded file /tmp/slurm-sarim.hashmi-40491/tmpzptm8lcg/wandb-job.json +2024-09-15 20:25:25,531 INFO wandb-upload_0:1017431 [upload_job.py:push():88] Uploaded file /home/sarim.hashmi/.local/share/wandb/artifacts/staging/tmpk978xhju +2024-09-15 20:25:25,839 WARNING StreamThr :1017431 [internal.py:is_dead():413] Internal process exiting, parent pid 1014314 disappeared +2024-09-15 20:25:25,839 ERROR StreamThr :1017431 [internal.py:wandb_internal():151] Internal process shutdown. +2024-09-15 20:25:26,584 INFO HandlerThread:1017431 [handler.py:finish():882] shutting down handler +2024-09-15 20:25:26,584 INFO WriterThread:1017431 [datastore.py:close():296] close: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202516-fsqumi24/run-fsqumi24.wandb +2024-09-15 20:25:26,961 INFO SenderThread:1017431 [sender.py:send_artifact():1537] sent artifact job-https___github.com_JayParanjape_SVDSAM.git_driver_scratchpad.py - {'id': 'QXJ0aWZhY3Q6MTIyOTY1MDQ5Mw==', 'state': 'PENDING', 'artifactSequence': {'id': 'QXJ0aWZhY3RDb2xsZWN0aW9uOjQ1NjQ4NDk4Ng==', 'latestArtifact': None}} +2024-09-15 20:25:26,961 INFO SenderThread:1017431 [sender.py:finish():1615] shutting down sender +2024-09-15 20:25:26,961 INFO SenderThread:1017431 [dir_watcher.py:finish():358] shutting down directory watcher +2024-09-15 20:25:27,474 INFO SenderThread:1017431 [dir_watcher.py:finish():388] scan: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202516-fsqumi24/files +2024-09-15 20:25:27,474 INFO SenderThread:1017431 [dir_watcher.py:finish():402] scan save: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202516-fsqumi24/files/requirements.txt requirements.txt +2024-09-15 20:25:27,474 INFO SenderThread:1017431 [dir_watcher.py:finish():402] scan save: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202516-fsqumi24/files/wandb-metadata.json wandb-metadata.json +2024-09-15 20:25:27,474 INFO SenderThread:1017431 [dir_watcher.py:finish():402] scan save: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202516-fsqumi24/files/conda-environment.yaml conda-environment.yaml +2024-09-15 20:25:27,475 INFO SenderThread:1017431 [dir_watcher.py:finish():402] scan save: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202516-fsqumi24/files/output.log output.log +2024-09-15 20:25:27,475 INFO SenderThread:1017431 [dir_watcher.py:finish():402] scan save: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202516-fsqumi24/files/wandb-summary.json wandb-summary.json +2024-09-15 20:25:27,476 INFO SenderThread:1017431 [dir_watcher.py:finish():402] scan save: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202516-fsqumi24/files/config.yaml config.yaml +2024-09-15 20:25:27,477 INFO SenderThread:1017431 [file_pusher.py:finish():169] shutting down file pusher +2024-09-15 20:25:27,477 INFO SenderThread:1017431 [file_pusher.py:join():175] waiting for file pusher +2024-09-15 20:25:28,076 INFO wandb-upload_1:1017431 [upload_job.py:push():130] Uploaded file /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202516-fsqumi24/files/requirements.txt +2024-09-15 20:25:28,129 INFO wandb-upload_0:1017431 [upload_job.py:push():130] Uploaded file /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202516-fsqumi24/files/output.log +2024-09-15 20:25:28,278 INFO wandb-upload_3:1017431 [upload_job.py:push():130] Uploaded file /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202516-fsqumi24/files/config.yaml +2024-09-15 20:25:28,280 INFO wandb-upload_2:1017431 [upload_job.py:push():130] Uploaded file /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202516-fsqumi24/files/wandb-summary.json +2024-09-15 20:25:28,979 INFO SenderThread:1017431 [file_stream.py:finish():601] file stream finish called +2024-09-15 20:25:29,237 INFO SenderThread:1017431 [file_stream.py:finish():605] file stream finish is done diff --git a/AllinonSAM/wandb/run-20240915_202516-fsqumi24/logs/debug.log b/AllinonSAM/wandb/run-20240915_202516-fsqumi24/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..5fda53de8fc05f75e8bf086bae99a43303a5dae9 --- /dev/null +++ b/AllinonSAM/wandb/run-20240915_202516-fsqumi24/logs/debug.log @@ -0,0 +1,28 @@ +2024-09-15 20:25:16,824 INFO MainThread:1014314 [wandb_setup.py:_flush():76] Current SDK version is 0.17.5 +2024-09-15 20:25:16,825 INFO MainThread:1014314 [wandb_setup.py:_flush():76] Configure stats pid to 1014314 +2024-09-15 20:25:16,825 INFO MainThread:1014314 [wandb_setup.py:_flush():76] Loading settings from /home/sarim.hashmi/.config/wandb/settings +2024-09-15 20:25:16,825 INFO MainThread:1014314 [wandb_setup.py:_flush():76] Loading settings from /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/settings +2024-09-15 20:25:16,825 INFO MainThread:1014314 [wandb_setup.py:_flush():76] Loading settings from environment variables: {'api_key': '***REDACTED***'} +2024-09-15 20:25:16,825 INFO MainThread:1014314 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-09-15 20:25:16,825 INFO MainThread:1014314 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': 'driver_scratchpad.py', 'program_abspath': '/l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/driver_scratchpad.py', 'program': '/l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/driver_scratchpad.py'} +2024-09-15 20:25:16,825 INFO MainThread:1014314 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-09-15 20:25:16,826 INFO MainThread:1014314 [wandb_init.py:_log_setup():529] Logging user logs to /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202516-fsqumi24/logs/debug.log +2024-09-15 20:25:16,826 INFO MainThread:1014314 [wandb_init.py:_log_setup():530] Logging internal logs to /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202516-fsqumi24/logs/debug-internal.log +2024-09-15 20:25:16,826 INFO MainThread:1014314 [wandb_init.py:init():569] calling init triggers +2024-09-15 20:25:16,826 INFO MainThread:1014314 [wandb_init.py:init():576] wandb.init called with sweep_config: {} +config: {'learning_rate': 0.0001, 'batch_size': 32, 'num_epochs': 1000, 'reg_multiplier': 0} +2024-09-15 20:25:16,826 INFO MainThread:1014314 [wandb_init.py:init():619] starting backend +2024-09-15 20:25:16,826 INFO MainThread:1014314 [wandb_init.py:init():623] setting up manager +2024-09-15 20:25:16,833 INFO MainThread:1014314 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-09-15 20:25:16,834 INFO MainThread:1014314 [wandb_init.py:init():631] backend started and connected +2024-09-15 20:25:16,840 INFO MainThread:1014314 [wandb_init.py:init():720] updated telemetry +2024-09-15 20:25:16,846 INFO MainThread:1014314 [wandb_init.py:init():753] communicating run to backend with 90.0 second timeout +2024-09-15 20:25:17,468 INFO MainThread:1014314 [wandb_run.py:_on_init():2435] communicating current version +2024-09-15 20:25:17,553 INFO MainThread:1014314 [wandb_run.py:_on_init():2444] got version response upgrade_message: "wandb version 0.18.0 is available! To upgrade, please run:\n $ pip install wandb --upgrade" + +2024-09-15 20:25:17,554 INFO MainThread:1014314 [wandb_init.py:init():804] starting run threads in backend +2024-09-15 20:25:18,240 INFO MainThread:1014314 [wandb_run.py:_console_start():2413] atexit reg +2024-09-15 20:25:18,240 INFO MainThread:1014314 [wandb_run.py:_redirect():2255] redirect: wrap_raw +2024-09-15 20:25:18,240 INFO MainThread:1014314 [wandb_run.py:_redirect():2320] Wrapping output streams. +2024-09-15 20:25:18,240 INFO MainThread:1014314 [wandb_run.py:_redirect():2345] Redirects installed. +2024-09-15 20:25:18,243 INFO MainThread:1014314 [wandb_init.py:init():847] run started, returning control to user process diff --git a/AllinonSAM/wandb/run-20240915_202516-fsqumi24/run-fsqumi24.wandb b/AllinonSAM/wandb/run-20240915_202516-fsqumi24/run-fsqumi24.wandb new file mode 100644 index 0000000000000000000000000000000000000000..4c56d81dcd6863483ad6f2f03f00b809cc0cedd0 Binary files /dev/null and b/AllinonSAM/wandb/run-20240915_202516-fsqumi24/run-fsqumi24.wandb differ diff --git a/AllinonSAM/wandb/run-20240915_202715-9m35lk87/files/conda-environment.yaml b/AllinonSAM/wandb/run-20240915_202715-9m35lk87/files/conda-environment.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/AllinonSAM/wandb/run-20240915_202715-9m35lk87/files/config.yaml b/AllinonSAM/wandb/run-20240915_202715-9m35lk87/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1df599166260d954620c2436cd854a7bfec2cdc6 --- /dev/null +++ b/AllinonSAM/wandb/run-20240915_202715-9m35lk87/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +learning_rate: + desc: null + value: 0.0001 +batch_size: + desc: null + value: 8 +num_epochs: + desc: null + value: 200 +reg_multiplier: + desc: null + value: 0 +_wandb: + desc: null + value: + python_version: 3.12.1 + cli_version: 0.17.5 + framework: torch + is_jupyter_run: false + is_kaggle_kernel: true + start_time: 1726417635 + t: + 1: + - 1 + - 41 + - 55 + - 105 + 2: + - 1 + - 41 + - 55 + - 105 + 3: + - 16 + - 23 + 4: 3.12.1 + 5: 0.17.5 + 8: + - 2 + - 5 + 13: linux-x86_64 diff --git a/AllinonSAM/wandb/run-20240915_202715-9m35lk87/files/output.log b/AllinonSAM/wandb/run-20240915_202715-9m35lk87/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..865c57e57bcedd122422c84fbe8974786b5b8935 --- /dev/null +++ b/AllinonSAM/wandb/run-20240915_202715-9m35lk87/files/output.log @@ -0,0 +1,25 @@ +Training parameters: +---------- +number of trainable parameters: 1034496 +batch size: 8 +num epochs: 200 +Epoch 0/199 +---------- + + + + + + + + +Traceback (most recent call last): + File "/l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/driver_scratchpad.py", line 382, in + main_train(data_config, model_config, args.pretrained_path, args.save_path, args.training_strategy, device=args.device) + File "/l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/driver_scratchpad.py", line 361, in main_train + model = train_dl(model, dataset_dict, dataset_sizes, criterion, optimizer, exp_lr_scheduler, save_path, num_epochs=training_params['num_epochs'], bs=training_params['batch_size'], device=device, retain_graph=retain_graph, neg2pos_ratio=data_config['data']['negative_to_positive_ratio'], reg_multiplier=model_config['training']['reg_multiplier']) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/train.py", line 199, in train_dl + running_loss += loss.item() * inputs.size(0) + ^^^^^^^^^^^ +KeyboardInterrupt \ No newline at end of file diff --git a/AllinonSAM/wandb/run-20240915_202715-9m35lk87/files/requirements.txt b/AllinonSAM/wandb/run-20240915_202715-9m35lk87/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..4f9a3909a6c3c0ad41a7c480c097a5cffd1b26cb --- /dev/null +++ b/AllinonSAM/wandb/run-20240915_202715-9m35lk87/files/requirements.txt @@ -0,0 +1,507 @@ +Babel==2.14.0 +Brotli==1.1.0 +CoLT5-attention==0.11.1 +Deprecated==1.2.14 +GitPython==3.1.43 +Jinja2==3.1.3 +Mako==1.3.5 +Markdown==3.6 +MarkupSafe==2.1.4 +PTable==0.9.2 +PuLP==2.9.0 +PyGithub==1.59.1 +PyJWT==2.9.0 +PyNaCl==1.5.0 +PyPika==0.48.9 +PySocks==1.7.1 +PyYAML==6.0.1 +Pygments==2.15.1 +Pygments==2.17.2 +SQLAlchemy==2.0.32 +Send2Trash==1.8.2 +SimpleITK==2.3.1 +TorchFix==0.5.0 +Werkzeug==3.0.3 +absl-py==2.1.0 +accelerate==0.33.0 +aiohttp==3.9.3 +aiosignal==1.3.1 +albucore==0.0.13 +albumentations==1.4.13 +alembic==1.13.2 +annotated-types==0.6.0 +anyio==4.2.0 +anytree==2.12.1 +appdirs==1.4.4 +arel==0.3.0 +argon2-cffi-bindings==21.2.0 +argon2-cffi==23.1.0 +argparse==1.4.0 +arrow==1.3.0 +asgiref==3.8.1 +asttokens==2.0.5 +async-asgi-testclient==1.4.11 +async-lru==2.0.4 +asyncio==3.4.3 +attrs==23.2.0 +autocommand==2.2.2 +backcall==0.2.0 +backoff==2.2.1 +backports.tarfile==1.2.0 +bcrypt==4.2.0 +beartype==0.18.5 +beautifulsoup4==4.12.3 +bidict==0.23.1 +bitsandbytes==0.43.3 +black==24.8.0 +bleach==6.1.0 +blis==0.7.11 +boto3==1.35.10 +botocore==1.35.10 +build==1.2.1 +cacheout==0.14.1 +cachetools==4.2.4 +catalogue==2.0.10 +certifi==2024.2.2 +cffi==1.16.0 +charset-normalizer==3.3.2 +chex==0.1.86 +chroma-hnswlib==0.7.3 +chromadb==0.4.24 +clarifai-grpc==10.7.3 +clarifai==10.7.0 +click==8.1.7 +clip==1.0 +cloudpathlib==0.16.0 +cohere==5.6.2 +colorama==0.4.6 +coloredlogs==15.0.1 +comm==0.2.1 +confection==0.1.4 +contextlib2==21.6.0 +contourpy==1.2.1 +crewai-tools==0.1.6 +crewai==0.28.8 +cryptography==42.0.8 +cycler==0.12.1 +cymem==2.0.8 +dataclasses-json==0.6.5 +datasets==2.19.1 +debugpy==1.6.7 +decorator==5.1.1 +defusedxml==0.7.1 +deprecation==2.1.0 +dill==0.3.8 +distinctipy==1.3.4 +distro==1.9.0 +docker-pycreds==0.4.0 +docstring_parser==0.16 +efficientnet-pytorch==0.7.1 +einops-exts==0.0.4 +einops==0.8.0 +einx==0.3.0 +embedchain==0.1.113 +en-core-web-sm==3.7.1 +etils==1.9.3 +eval_type_backport==0.2.0 +executing==2.1.0 +faiss-cpu==1.8.0.post1 +fastapi==0.109.0 +fastavro==1.9.5 +fastjsonschema==2.19.1 +ffmpeg-python==0.2.0 +filelock==3.13.1 +flake8==7.1.1 +flatbuffers==24.3.25 +flax==0.9.0 +flwr-datasets==0.2.0 +flwr==1.10.0 +fonttools==4.50.0 +fqdn==1.5.1 +fr-core-news-sm==3.7.0 +frozendict==2.4.4 +frozenlist==1.4.1 +fsspec==2023.12.2 +ftfy==6.2.3 +future==1.0.0 +gitdb==4.0.11 +giturlparse==0.12.0 +google-ai-generativelanguage==0.6.1 +google-api-core==2.18.0 +google-api-python-client==2.125.0 +google-auth-httplib2==0.2.0 +google-auth==2.29.0 +google-cloud-aiplatform==1.48.0 +google-cloud-bigquery==3.20.1 +google-cloud-core==2.4.1 +google-cloud-resource-manager==1.12.3 +google-cloud-storage==2.16.0 +google-crc32c==1.5.0 +google-generativeai==0.5.0 +google-resumable-media==2.7.0 +google==3.0.0 +googleapis-common-protos==1.63.0 +gptcache==0.1.44 +gpytorch==1.12 +graphviz==0.20.1 +greenlet==3.0.3 +groq==0.5.0 +grpc-google-iam-v1==0.13.0 +grpcio-status==1.62.1 +grpcio-tools==1.62.3 +grpcio==1.64.3 +h11==0.14.0 +h2==4.1.0 +h5py==3.11.0 +hpack==4.0.0 +httpcore==1.0.5 +httplib2==0.22.0 +httptools==0.6.1 +httpx-sse==0.4.0 +httpx==0.27.0 +huggingface-hub==0.24.5 +humanfriendly==10.0 +humanize==4.10.0 +hyperframe==6.0.1 +idna==3.6 +imagecodecs==2024.1.1 +imageio==2.34.0 +importlib_metadata==8.0.0 +importlib_metadata==8.4.0 +importlib_resources==6.4.0 +importlib_resources==6.4.4 +imutils==0.5.4 +inflect==7.3.1 +iniconfig==2.0.0 +inquirerpy==0.3.4 +instructor==0.5.2 +iopath==0.1.10 +ipykernel==5.5.6 +ipykernel==6.28.0 +ipython-genutils==0.2.0 +ipython==7.16.1 +ipython==8.20.0 +isoduration==20.11.0 +iterators==0.0.2 +jaraco.context==5.3.0 +jaraco.functools==4.0.1 +jaraco.text==3.12.1 +jax==0.4.31 +jaxlib==0.4.31 +jaxtyping==0.2.19 +jedi==0.18.1 +jedi==0.19.1 +jiter==0.4.2 +jmespath==1.0.1 +joblib==1.4.2 +json5==0.9.14 +json_repair==0.25.3 +jsonpatch==1.33 +jsonpointer==2.4 +jsonref==1.1.0 +jsonschema-specifications==2023.12.1 +jsonschema==4.20.0 +jupyter-events==0.9.0 +jupyter-lsp==2.2.2 +jupyter_client==8.6.0 +jupyter_core==5.5.0 +jupyter_core==5.7.1 +jupyter_server==2.12.5 +jupyter_server_terminals==0.5.2 +jupyterlab==4.0.12 +jupyterlab_pygments==0.3.0 +jupyterlab_server==2.25.2 +jupyterplot==0.0.3 +kiwisolver==1.4.5 +kubernetes==30.1.0 +lancedb==0.5.7 +langchain-cohere==0.1.5 +langchain-community==0.0.29 +langchain-core==0.1.52 +langchain-experimental==0.0.55 +langchain-groq==0.1.3 +langchain-openai==0.1.7 +langchain-text-splitters==0.0.2 +langchain==0.1.13 +langcodes==3.4.0 +langsmith==0.1.108 +language_data==1.2.0 +lazy_loader==0.4 +lib==4.0.0 +libcst==1.1.0 +lightning-utilities==0.11.2 +linear-operator==0.5.3 +local-attention==1.9.15 +loguru==0.7.2 +lrcurve==1.1.0 +lxml==5.2.2 +marisa-trie==1.1.0 +markdown-it-py==3.0.0 +marshmallow==3.21.2 +matplotlib-inline==0.1.6 +matplotlib==3.8.4 +mccabe==0.7.0 +mdurl==0.1.2 +mem0ai==0.0.20 +mistune==3.0.2 +ml-dtypes==0.4.0 +mmh3==4.1.0 +monai==1.3.1 +monotonic==1.6 +more-itertools==10.3.0 +mpmath==1.3.0 +msgpack==1.0.8 +multidict==6.0.5 +multiprocess==0.70.16 +munch==4.0.0 +murmurhash==1.0.10 +mutagen==1.47.0 +mypy-extensions==1.0.0 +nbclient==0.9.0 +nbconvert==7.14.2 +nbformat==5.9.2 +nest-asyncio==1.6.0 +networkx==3.2.1 +nibabel==5.2.1 +nilearn==0.10.4 +nltk==3.5 +nodeenv==1.9.1 +notebook==7.0.7 +notebook_shim==0.2.3 +numerize==0.12 +numpy==1.26.3 +nvidia-cublas-cu12==12.1.3.1 +nvidia-cuda-cupti-cu12==12.1.105 +nvidia-cuda-nvrtc-cu12==12.1.105 +nvidia-cuda-runtime-cu12==12.1.105 +nvidia-cudnn-cu12==9.1.0.70 +nvidia-cufft-cu12==11.0.2.54 +nvidia-curand-cu12==10.3.2.106 +nvidia-cusolver-cu12==11.4.5.107 +nvidia-cusparse-cu12==12.1.0.106 +nvidia-nccl-cu12==2.20.5 +nvidia-nvjitlink-cu12==12.3.101 +nvidia-nvtx-cu12==12.1.105 +oauthlib==3.2.2 +onnxruntime==1.19.0 +openai==1.43.0 +opencv-python-headless==4.10.0.84 +opencv-python==4.9.0.80 +opentelemetry-api==1.27.0 +opentelemetry-exporter-otlp-proto-common==1.27.0 +opentelemetry-exporter-otlp-proto-grpc==1.27.0 +opentelemetry-exporter-otlp-proto-http==1.27.0 +opentelemetry-instrumentation-asgi==0.48b0 +opentelemetry-instrumentation-fastapi==0.48b0 +opentelemetry-instrumentation==0.48b0 +opentelemetry-proto==1.27.0 +opentelemetry-sdk==1.27.0 +opentelemetry-semantic-conventions==0.48b0 +opentelemetry-util-http==0.48b0 +opt-einsum==3.3.0 +optax==0.2.3 +orbax-checkpoint==0.6.1 +orjson==3.10.3 +outcome==1.3.0.post0 +overrides==7.7.0 +packaging==23.2 +packaging==24.1 +pandas==2.1.4 +pandocfilters==1.5.1 +parameterized==0.9.0 +parso==0.8.3 +pathspec==0.12.1 +peft==0.12.0 +pendulum==3.0.0 +pexpect==4.8.0 +pexpect==4.9.0 +pfzy==0.3.4 +pickleshare==0.7.5 +pillow==10.2.0 +pinecone-plugin-inference==1.0.3 +pinecone-plugin-interface==0.0.7 +pinecone==5.1.0 +pip==24.2 +platformdirs==3.10.0 +platformdirs==4.2.0 +platformdirs==4.2.2 +pluggy==1.5.0 +portalocker==2.10.1 +posthog==3.6.0 +preshed==3.0.9 +pretrainedmodels==0.7.4 +prettytable==3.11.0 +prometheus-client==0.19.0 +prompt-toolkit==3.0.43 +proto-plus==1.23.0 +protobuf==3.20.3 +psutil==5.9.0 +ptflops==0.7.3 +ptyprocess==0.7.0 +pulsar-client==3.5.0 +pure-eval==0.2.2 +py==1.11.0 +pyarrow-hotfix==0.6 +pyarrow==16.0.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pycodestyle==2.12.1 +pycparser==2.21 +pycryptodome==3.20.0 +pycryptodomex==3.20.0 +pydantic==2.8.2 +pydantic_core==2.20.1 +pydicom==2.4.4 +pyflakes==3.2.0 +pylance==0.9.18 +pynrrd==0.4.3 +pyparsing==3.1.2 +pypdf==4.3.1 +pyproject_hooks==1.1.0 +pyright==1.1.378 +pysbd==0.3.4 +pytest==8.3.2 +python-dateutil==2.8.2 +python-dotenv==1.0.0 +python-json-logger==2.0.7 +python-magic==0.4.27 +python-multipart==0.0.5 +python-rapidjson==1.20 +pytorch-lightning==2.2.1 +pytorch-metric-learning==2.6.0 +pytube==15.0.0 +pytz==2024.1 +pyzmq==25.1.2 +qdrant-client==1.11.1 +rarfile==4.2 +ratelimit==2.2.1 +ratelimiter==1.2.0.post0 +referencing==0.33.0 +regex==2023.12.25 +requests-oauthlib==2.0.0 +requests-toolbelt==1.0.0 +requests==2.31.0 +retry==0.9.2 +rfc3339-validator==0.1.4 +rfc3986-validator==0.1.1 +rich==13.7.1 +rpds-py==0.17.1 +rsa==4.9 +s3transfer==0.10.2 +safetensors==0.4.2 +schedulefree==1.2.7 +schema==0.7.5 +scikit-image==0.23.1 +scikit-learn==1.5.0 +scipy==1.13.0 +seaborn==0.13.2 +segmentation-models-pytorch==0.3.3 +selenium==4.24.0 +semver==3.0.2 +sentence-transformers==3.0.1 +sentencepiece==0.2.0 +sentry-sdk==2.12.0 +setproctitle==1.3.3 +setuptools==74.1.1 +shapely==2.0.2 +shellingham==1.5.4 +shortuuid==1.0.13 +six==1.16.0 +skypilot==0.6.1 +smart-open==6.4.0 +smmap==5.0.1 +sniffio==1.3.0 +sortedcontainers==2.4.0 +soupsieve==2.5 +spacy-legacy==3.0.12 +spacy-loggers==1.0.5 +spacy==3.7.4 +srsly==2.4.8 +sse-starlette==2.1.0 +stack-data==0.2.0 +starlette==0.35.1 +stringcase==1.2.0 +stripe==10.10.0 +supervisely==6.73.181 +swarms-cloud==0.3.7 +swarms-memory==0.1.2 +swarms==5.6.6 +sympy==1.12 +tabulate==0.9.0 +tenacity==8.5.0 +tensorboard-data-server==0.7.2 +tensorboard==2.17.0 +tensorboardX==2.6.2.2 +tensorstore==0.1.64 +termcolor==2.4.0 +terminado==0.18.0 +thinc==8.2.3 +threadpoolctl==3.5.0 +tifffile==2024.2.12 +tiktoken==0.7.0 +time-machine==2.15.0 +timm==0.9.2 +tinycss2==1.2.1 +tokenizers==0.19.1 +toml==0.10.2 +tomli==2.0.1 +tomli==2.0.1 +tomli_w==1.0.0 +toolz==0.12.1 +torch==2.4.0 +torchmetrics==1.3.2 +torchsummary==1.5.1 +torchtext==0.5.0 +torchview==0.2.6 +torchvision==0.19.0 +tornado==6.3.3 +tornado==6.4 +tqdm==4.66.5 +traitlets==5.14.1 +traitlets==5.7.1 +transformers==4.44.2 +trimesh==3.23.5 +trio-websocket==0.11.1 +trio==0.26.2 +triton==3.0.0 +tritonclient==2.49.0 +typeguard==4.3.0 +typeguard==4.3.0 +typer==0.9.4 +types-python-dateutil==2.8.19.20240106 +types-requests==2.32.0.20240712 +typing-inspect==0.9.0 +typing_extensions==4.12.2 +typing_extensions==4.12.2 +tzdata==2024.1 +uri-template==1.3.0 +uritemplate==4.1.1 +urllib3==2.2.2 +uvicorn==0.30.6 +uvloop==0.20.0 +varname==0.13.3 +vector-quantize-pytorch==1.15.6 +vertexai==1.46.0 +vision-mamba==0.1.0 +wandb==0.17.5 +wasabi==1.1.2 +watchfiles==0.24.0 +wcwidth==0.2.13 +weasel==0.3.4 +webcolors==1.13 +webencodings==0.5.1 +websocket-client==1.8.0 +websockets==10.4 +wheel==0.41.2 +wheel==0.43.0 +wrapt==1.16.0 +wsproto==1.2.0 +xxhash==3.4.1 +yacs==0.1.8 +yarl==1.9.4 +youtube-transcript-api==0.6.2 +yt-dlp==2023.12.30 +zetascale==2.7.0 +zipp==3.19.2 +zipp==3.20.1 +zstd==1.5.5.1 \ No newline at end of file diff --git a/AllinonSAM/wandb/run-20240915_202715-9m35lk87/files/wandb-metadata.json b/AllinonSAM/wandb/run-20240915_202715-9m35lk87/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..c15deda710ebafef479ab9a86cd372b56d8de4cb --- /dev/null +++ b/AllinonSAM/wandb/run-20240915_202715-9m35lk87/files/wandb-metadata.json @@ -0,0 +1,210 @@ +{ + "os": "Linux-5.15.133-ql-generic-13.0-9-x86_64-with-glibc2.35", + "python": "3.12.1", + "heartbeatAt": "2024-09-15T16:27:16.583335", + "startedAt": "2024-09-15T16:27:15.717993", + "docker": null, + "cuda": null, + "args": [ + "--model_config", + "model_svdtuning.yml", + "--data_config", + "config_arcade.yml", + "--save_path", + "./temp.pth" + ], + "state": "running", + "program": "/l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/driver_scratchpad.py", + "codePathLocal": "driver_scratchpad.py", + "codePath": "driver_scratchpad.py", + "git": { + "remote": "https://github.com/JayParanjape/SVDSAM.git", + "commit": "5936d0eff64d84fbefed6ecfe4bcc841459c2fc3" + }, + "cpu_count": 16, + "cpu_count_logical": 32, + "cpu_freq": { + "current": 3.891031250000002, + "min": 2200.0, + "max": 3900.0 + }, + "cpu_freq_per_core": [ + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 4.025, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + } + ], + "disk": { + "/": { + "total": 1.0, + "used": 0.04192352294921875 + } + }, + "gpu": "NVIDIA GeForce RTX 4090", + "gpu_count": 1, + "gpu_devices": [ + { + "name": "NVIDIA GeForce RTX 4090", + "memory_total": 25757220864 + } + ], + "memory": { + "total": 62.65229415893555 + } +} diff --git a/AllinonSAM/wandb/run-20240915_202715-9m35lk87/files/wandb-summary.json b/AllinonSAM/wandb/run-20240915_202715-9m35lk87/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..8cc12bfe91c134785c665544a6f0142ff356421d --- /dev/null +++ b/AllinonSAM/wandb/run-20240915_202715-9m35lk87/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 19}} \ No newline at end of file diff --git a/AllinonSAM/wandb/run-20240915_202715-9m35lk87/logs/debug-internal.log b/AllinonSAM/wandb/run-20240915_202715-9m35lk87/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..572687b94c815ced9f8d5b356194f1e92831268f --- /dev/null +++ b/AllinonSAM/wandb/run-20240915_202715-9m35lk87/logs/debug-internal.log @@ -0,0 +1,232 @@ +2024-09-15 20:27:15,747 INFO StreamThr :1022491 [internal.py:wandb_internal():85] W&B internal server running at pid: 1022491, started at: 2024-09-15 20:27:15.744795 +2024-09-15 20:27:15,748 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: status +2024-09-15 20:27:15,749 INFO WriterThread:1022491 [datastore.py:open_for_write():87] open: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202715-9m35lk87/run-9m35lk87.wandb +2024-09-15 20:27:15,752 DEBUG SenderThread:1022491 [sender.py:send():379] send: header +2024-09-15 20:27:15,752 DEBUG SenderThread:1022491 [sender.py:send():379] send: run +2024-09-15 20:27:16,371 INFO SenderThread:1022491 [dir_watcher.py:__init__():211] watching files in: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202715-9m35lk87/files +2024-09-15 20:27:16,371 INFO SenderThread:1022491 [sender.py:_start_run_threads():1188] run started: 9m35lk87 with start time 1726417635.745257 +2024-09-15 20:27:16,376 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: check_version +2024-09-15 20:27:16,376 DEBUG SenderThread:1022491 [sender.py:send_request():406] send_request: check_version +2024-09-15 20:27:16,473 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: run_start +2024-09-15 20:27:16,504 DEBUG HandlerThread:1022491 [system_info.py:__init__():26] System info init +2024-09-15 20:27:16,504 DEBUG HandlerThread:1022491 [system_info.py:__init__():41] System info init done +2024-09-15 20:27:16,504 INFO HandlerThread:1022491 [system_monitor.py:start():194] Starting system monitor +2024-09-15 20:27:16,504 INFO SystemMonitor:1022491 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-09-15 20:27:16,505 INFO HandlerThread:1022491 [system_monitor.py:probe():214] Collecting system info +2024-09-15 20:27:16,505 INFO SystemMonitor:1022491 [interfaces.py:start():188] Started cpu monitoring +2024-09-15 20:27:16,505 INFO SystemMonitor:1022491 [interfaces.py:start():188] Started disk monitoring +2024-09-15 20:27:16,506 INFO SystemMonitor:1022491 [interfaces.py:start():188] Started gpu monitoring +2024-09-15 20:27:16,506 INFO SystemMonitor:1022491 [interfaces.py:start():188] Started memory monitoring +2024-09-15 20:27:16,506 INFO SystemMonitor:1022491 [interfaces.py:start():188] Started network monitoring +2024-09-15 20:27:16,583 DEBUG HandlerThread:1022491 [system_info.py:probe():152] Probing system +2024-09-15 20:27:16,584 DEBUG HandlerThread:1022491 [system_info.py:_probe_git():137] Probing git +2024-09-15 20:27:16,591 DEBUG HandlerThread:1022491 [system_info.py:_probe_git():145] Probing git done +2024-09-15 20:27:16,591 DEBUG HandlerThread:1022491 [system_info.py:probe():200] Probing system done +2024-09-15 20:27:16,592 DEBUG HandlerThread:1022491 [system_monitor.py:probe():223] {'os': 'Linux-5.15.133-ql-generic-13.0-9-x86_64-with-glibc2.35', 'python': '3.12.1', 'heartbeatAt': '2024-09-15T16:27:16.583335', 'startedAt': '2024-09-15T16:27:15.717993', 'docker': None, 'cuda': None, 'args': ('--model_config', 'model_svdtuning.yml', '--data_config', 'config_arcade.yml', '--save_path', './temp.pth'), 'state': 'running', 'program': '/l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/driver_scratchpad.py', 'codePathLocal': 'driver_scratchpad.py', 'codePath': 'driver_scratchpad.py', 'git': {'remote': 'https://github.com/JayParanjape/SVDSAM.git', 'commit': '5936d0eff64d84fbefed6ecfe4bcc841459c2fc3'}, 'cpu_count': 16, 'cpu_count_logical': 32, 'cpu_freq': {'current': 3.891031250000002, 'min': 2200.0, 'max': 3900.0}, 'cpu_freq_per_core': [{'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 4.025, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}], 'disk': {'/': {'total': 1.0, 'used': 0.04192352294921875}}, 'gpu': 'NVIDIA GeForce RTX 4090', 'gpu_count': 1, 'gpu_devices': [{'name': 'NVIDIA GeForce RTX 4090', 'memory_total': 25757220864}], 'memory': {'total': 62.65229415893555}} +2024-09-15 20:27:16,592 INFO HandlerThread:1022491 [system_monitor.py:probe():224] Finished collecting system info +2024-09-15 20:27:16,592 INFO HandlerThread:1022491 [system_monitor.py:probe():227] Publishing system info +2024-09-15 20:27:16,592 DEBUG HandlerThread:1022491 [system_info.py:_save_conda():209] Saving list of conda packages installed into the current environment +2024-09-15 20:27:16,595 ERROR HandlerThread:1022491 [system_info.py:_save_conda():223] Error saving conda packages: [Errno 2] No such file or directory: 'conda' +Traceback (most recent call last): + File "/home/sarim.hashmi/anaconda3/envs/AI702/lib/python3.12/site-packages/wandb/sdk/internal/system/system_info.py", line 216, in _save_conda + subprocess.call( + File "/home/sarim.hashmi/anaconda3/envs/AI702/lib/python3.12/subprocess.py", line 389, in call + with Popen(*popenargs, **kwargs) as p: + ^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/home/sarim.hashmi/anaconda3/envs/AI702/lib/python3.12/subprocess.py", line 1026, in __init__ + self._execute_child(args, executable, preexec_fn, close_fds, + File "/home/sarim.hashmi/anaconda3/envs/AI702/lib/python3.12/subprocess.py", line 1950, in _execute_child + raise child_exception_type(errno_num, err_msg, err_filename) +FileNotFoundError: [Errno 2] No such file or directory: 'conda' +2024-09-15 20:27:16,596 DEBUG HandlerThread:1022491 [system_info.py:_save_conda():224] Saving conda packages done +2024-09-15 20:27:16,600 INFO HandlerThread:1022491 [system_monitor.py:probe():229] Finished publishing system info +2024-09-15 20:27:16,610 DEBUG SenderThread:1022491 [sender.py:send():379] send: files +2024-09-15 20:27:16,610 INFO SenderThread:1022491 [sender.py:_save_file():1454] saving file wandb-metadata.json with policy now +2024-09-15 20:27:16,946 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: python_packages +2024-09-15 20:27:16,947 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: stop_status +2024-09-15 20:27:16,947 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: internal_messages +2024-09-15 20:27:16,947 DEBUG SenderThread:1022491 [sender.py:send_request():406] send_request: python_packages +2024-09-15 20:27:16,952 DEBUG SenderThread:1022491 [sender.py:send_request():406] send_request: stop_status +2024-09-15 20:27:17,315 DEBUG SenderThread:1022491 [sender.py:send():379] send: telemetry +2024-09-15 20:27:17,375 INFO Thread-12 :1022491 [dir_watcher.py:_on_file_created():271] file/dir created: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202715-9m35lk87/files/conda-environment.yaml +2024-09-15 20:27:17,376 INFO Thread-12 :1022491 [dir_watcher.py:_on_file_created():271] file/dir created: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202715-9m35lk87/files/requirements.txt +2024-09-15 20:27:17,376 INFO Thread-12 :1022491 [dir_watcher.py:_on_file_created():271] file/dir created: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202715-9m35lk87/files/wandb-metadata.json +2024-09-15 20:27:17,376 INFO Thread-12 :1022491 [dir_watcher.py:_on_file_created():271] file/dir created: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202715-9m35lk87/files/output.log +2024-09-15 20:27:17,682 INFO wandb-upload_0:1022491 [upload_job.py:push():130] Uploaded file /tmp/slurm-sarim.hashmi-40491/tmp6jw8ow2kwandb/8tqza9bz-wandb-metadata.json +2024-09-15 20:27:17,945 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: internal_messages +2024-09-15 20:27:18,945 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: internal_messages +2024-09-15 20:27:19,377 INFO Thread-12 :1022491 [dir_watcher.py:_on_file_modified():288] file/dir modified: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202715-9m35lk87/files/output.log +2024-09-15 20:27:19,945 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: internal_messages +2024-09-15 20:27:20,799 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: status_report +2024-09-15 20:27:20,945 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: internal_messages +2024-09-15 20:27:21,378 INFO Thread-12 :1022491 [dir_watcher.py:_on_file_modified():288] file/dir modified: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202715-9m35lk87/files/output.log +2024-09-15 20:27:21,945 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: internal_messages +2024-09-15 20:27:22,945 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: internal_messages +2024-09-15 20:27:23,379 INFO Thread-12 :1022491 [dir_watcher.py:_on_file_modified():288] file/dir modified: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202715-9m35lk87/files/output.log +2024-09-15 20:27:23,945 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: internal_messages +2024-09-15 20:27:24,945 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: internal_messages +2024-09-15 20:27:25,379 INFO Thread-12 :1022491 [dir_watcher.py:_on_file_modified():288] file/dir modified: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202715-9m35lk87/files/output.log +2024-09-15 20:27:25,919 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: status_report +2024-09-15 20:27:25,945 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: internal_messages +2024-09-15 20:27:26,945 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: internal_messages +2024-09-15 20:27:27,380 INFO Thread-12 :1022491 [dir_watcher.py:_on_file_modified():288] file/dir modified: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202715-9m35lk87/files/output.log +2024-09-15 20:27:27,946 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: internal_messages +2024-09-15 20:27:28,946 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: internal_messages +2024-09-15 20:27:29,381 INFO Thread-12 :1022491 [dir_watcher.py:_on_file_modified():288] file/dir modified: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202715-9m35lk87/files/output.log +2024-09-15 20:27:29,946 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: internal_messages +2024-09-15 20:27:30,946 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: internal_messages +2024-09-15 20:27:30,978 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: status_report +2024-09-15 20:27:31,381 INFO Thread-12 :1022491 [dir_watcher.py:_on_file_modified():288] file/dir modified: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202715-9m35lk87/files/output.log +2024-09-15 20:27:31,945 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: stop_status +2024-09-15 20:27:31,945 DEBUG SenderThread:1022491 [sender.py:send_request():406] send_request: stop_status +2024-09-15 20:27:31,946 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: internal_messages +2024-09-15 20:27:32,946 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: internal_messages +2024-09-15 20:27:33,382 INFO Thread-12 :1022491 [dir_watcher.py:_on_file_modified():288] file/dir modified: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202715-9m35lk87/files/output.log +2024-09-15 20:27:33,946 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: internal_messages +2024-09-15 20:27:34,946 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: internal_messages +2024-09-15 20:27:35,383 INFO Thread-12 :1022491 [dir_watcher.py:_on_file_modified():288] file/dir modified: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202715-9m35lk87/files/output.log +2024-09-15 20:27:35,769 DEBUG SenderThread:1022491 [sender.py:send():379] send: exit +2024-09-15 20:27:35,769 INFO SenderThread:1022491 [sender.py:send_exit():586] handling exit code: 255 +2024-09-15 20:27:35,769 INFO SenderThread:1022491 [sender.py:send_exit():588] handling runtime: 19 +2024-09-15 20:27:35,773 INFO SenderThread:1022491 [sender.py:_save_file():1454] saving file wandb-summary.json with policy end +2024-09-15 20:27:35,773 INFO SenderThread:1022491 [sender.py:send_exit():594] send defer +2024-09-15 20:27:35,773 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: defer +2024-09-15 20:27:35,773 INFO HandlerThread:1022491 [handler.py:handle_request_defer():184] handle defer: 0 +2024-09-15 20:27:35,773 DEBUG SenderThread:1022491 [sender.py:send_request():406] send_request: defer +2024-09-15 20:27:35,773 INFO SenderThread:1022491 [sender.py:send_request_defer():610] handle sender defer: 0 +2024-09-15 20:27:35,773 INFO SenderThread:1022491 [sender.py:transition_state():614] send defer: 1 +2024-09-15 20:27:35,773 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: defer +2024-09-15 20:27:35,773 INFO HandlerThread:1022491 [handler.py:handle_request_defer():184] handle defer: 1 +2024-09-15 20:27:35,773 DEBUG SenderThread:1022491 [sender.py:send_request():406] send_request: defer +2024-09-15 20:27:35,773 INFO SenderThread:1022491 [sender.py:send_request_defer():610] handle sender defer: 1 +2024-09-15 20:27:35,773 INFO SenderThread:1022491 [sender.py:transition_state():614] send defer: 2 +2024-09-15 20:27:35,774 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: defer +2024-09-15 20:27:35,774 INFO HandlerThread:1022491 [handler.py:handle_request_defer():184] handle defer: 2 +2024-09-15 20:27:35,774 INFO HandlerThread:1022491 [system_monitor.py:finish():203] Stopping system monitor +2024-09-15 20:27:35,774 INFO HandlerThread:1022491 [interfaces.py:finish():200] Joined cpu monitor +2024-09-15 20:27:35,774 DEBUG SystemMonitor:1022491 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-09-15 20:27:35,774 INFO HandlerThread:1022491 [interfaces.py:finish():200] Joined disk monitor +2024-09-15 20:27:35,774 DEBUG SystemMonitor:1022491 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-09-15 20:27:35,774 DEBUG SystemMonitor:1022491 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-09-15 20:27:35,830 INFO HandlerThread:1022491 [interfaces.py:finish():200] Joined gpu monitor +2024-09-15 20:27:35,830 INFO HandlerThread:1022491 [interfaces.py:finish():200] Joined memory monitor +2024-09-15 20:27:35,830 INFO HandlerThread:1022491 [interfaces.py:finish():200] Joined network monitor +2024-09-15 20:27:35,831 DEBUG SenderThread:1022491 [sender.py:send_request():406] send_request: defer +2024-09-15 20:27:35,831 INFO SenderThread:1022491 [sender.py:send_request_defer():610] handle sender defer: 2 +2024-09-15 20:27:35,831 INFO SenderThread:1022491 [sender.py:transition_state():614] send defer: 3 +2024-09-15 20:27:35,831 DEBUG SenderThread:1022491 [sender.py:send():379] send: stats +2024-09-15 20:27:35,831 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: defer +2024-09-15 20:27:35,831 INFO HandlerThread:1022491 [handler.py:handle_request_defer():184] handle defer: 3 +2024-09-15 20:27:35,831 DEBUG SenderThread:1022491 [sender.py:send_request():406] send_request: defer +2024-09-15 20:27:35,831 INFO SenderThread:1022491 [sender.py:send_request_defer():610] handle sender defer: 3 +2024-09-15 20:27:35,831 INFO SenderThread:1022491 [sender.py:transition_state():614] send defer: 4 +2024-09-15 20:27:35,831 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: defer +2024-09-15 20:27:35,831 INFO HandlerThread:1022491 [handler.py:handle_request_defer():184] handle defer: 4 +2024-09-15 20:27:35,832 DEBUG SenderThread:1022491 [sender.py:send_request():406] send_request: defer +2024-09-15 20:27:35,832 INFO SenderThread:1022491 [sender.py:send_request_defer():610] handle sender defer: 4 +2024-09-15 20:27:35,832 INFO SenderThread:1022491 [sender.py:transition_state():614] send defer: 5 +2024-09-15 20:27:35,832 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: defer +2024-09-15 20:27:35,832 INFO HandlerThread:1022491 [handler.py:handle_request_defer():184] handle defer: 5 +2024-09-15 20:27:35,832 DEBUG SenderThread:1022491 [sender.py:send():379] send: summary +2024-09-15 20:27:35,838 INFO SenderThread:1022491 [sender.py:_save_file():1454] saving file wandb-summary.json with policy end +2024-09-15 20:27:35,838 DEBUG SenderThread:1022491 [sender.py:send_request():406] send_request: defer +2024-09-15 20:27:35,838 INFO SenderThread:1022491 [sender.py:send_request_defer():610] handle sender defer: 5 +2024-09-15 20:27:35,838 INFO SenderThread:1022491 [sender.py:transition_state():614] send defer: 6 +2024-09-15 20:27:35,838 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: defer +2024-09-15 20:27:35,838 INFO HandlerThread:1022491 [handler.py:handle_request_defer():184] handle defer: 6 +2024-09-15 20:27:35,838 DEBUG SenderThread:1022491 [sender.py:send_request():406] send_request: defer +2024-09-15 20:27:35,838 INFO SenderThread:1022491 [sender.py:send_request_defer():610] handle sender defer: 6 +2024-09-15 20:27:35,840 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: status_report +2024-09-15 20:27:36,081 INFO SenderThread:1022491 [sender.py:transition_state():614] send defer: 7 +2024-09-15 20:27:36,081 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: defer +2024-09-15 20:27:36,081 INFO HandlerThread:1022491 [handler.py:handle_request_defer():184] handle defer: 7 +2024-09-15 20:27:36,081 DEBUG SenderThread:1022491 [sender.py:send_request():406] send_request: defer +2024-09-15 20:27:36,081 INFO SenderThread:1022491 [sender.py:send_request_defer():610] handle sender defer: 7 +2024-09-15 20:27:36,386 INFO Thread-12 :1022491 [dir_watcher.py:_on_file_modified():288] file/dir modified: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202715-9m35lk87/files/config.yaml +2024-09-15 20:27:36,386 INFO Thread-12 :1022491 [dir_watcher.py:_on_file_created():271] file/dir created: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202715-9m35lk87/files/wandb-summary.json +2024-09-15 20:27:36,769 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: poll_exit +2024-09-15 20:27:37,365 INFO SenderThread:1022491 [sender.py:transition_state():614] send defer: 8 +2024-09-15 20:27:37,365 DEBUG SenderThread:1022491 [sender.py:send_request():406] send_request: poll_exit +2024-09-15 20:27:37,365 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: defer +2024-09-15 20:27:37,365 INFO HandlerThread:1022491 [handler.py:handle_request_defer():184] handle defer: 8 +2024-09-15 20:27:37,365 DEBUG SenderThread:1022491 [sender.py:send_request():406] send_request: defer +2024-09-15 20:27:37,365 INFO SenderThread:1022491 [sender.py:send_request_defer():610] handle sender defer: 8 +2024-09-15 20:27:37,365 INFO SenderThread:1022491 [job_builder.py:build():440] Attempting to build job artifact +2024-09-15 20:27:37,365 INFO SenderThread:1022491 [job_builder.py:_get_source_type():569] is repo sourced job +2024-09-15 20:27:37,381 INFO SenderThread:1022491 [job_builder.py:build():545] adding wandb-job metadata file +2024-09-15 20:27:37,386 INFO Thread-12 :1022491 [dir_watcher.py:_on_file_modified():288] file/dir modified: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202715-9m35lk87/files/output.log +2024-09-15 20:27:37,387 INFO SenderThread:1022491 [sender.py:transition_state():614] send defer: 9 +2024-09-15 20:27:37,388 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: defer +2024-09-15 20:27:37,388 DEBUG SenderThread:1022491 [sender.py:send():379] send: artifact +2024-09-15 20:27:37,388 INFO HandlerThread:1022491 [handler.py:handle_request_defer():184] handle defer: 9 +2024-09-15 20:27:37,769 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: poll_exit +2024-09-15 20:27:38,561 INFO SenderThread:1022491 [sender.py:send_artifact():1537] sent artifact job-https___github.com_JayParanjape_SVDSAM.git_driver_scratchpad.py - {'id': 'QXJ0aWZhY3Q6MTIyOTY1MDQ5Mw==', 'state': 'COMMITTED', 'artifactSequence': {'id': 'QXJ0aWZhY3RDb2xsZWN0aW9uOjQ1NjQ4NDk4Ng==', 'latestArtifact': {'id': 'QXJ0aWZhY3Q6MTIyOTY1MDQ5Mw==', 'versionIndex': 0}}} +2024-09-15 20:27:38,561 DEBUG SenderThread:1022491 [sender.py:send_request():406] send_request: defer +2024-09-15 20:27:38,561 INFO SenderThread:1022491 [sender.py:send_request_defer():610] handle sender defer: 9 +2024-09-15 20:27:38,561 INFO SenderThread:1022491 [dir_watcher.py:finish():358] shutting down directory watcher +2024-09-15 20:27:39,387 INFO SenderThread:1022491 [dir_watcher.py:finish():388] scan: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202715-9m35lk87/files +2024-09-15 20:27:39,387 INFO SenderThread:1022491 [dir_watcher.py:finish():402] scan save: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202715-9m35lk87/files/config.yaml config.yaml +2024-09-15 20:27:39,387 INFO SenderThread:1022491 [dir_watcher.py:finish():402] scan save: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202715-9m35lk87/files/wandb-metadata.json wandb-metadata.json +2024-09-15 20:27:39,387 INFO SenderThread:1022491 [dir_watcher.py:finish():402] scan save: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202715-9m35lk87/files/wandb-summary.json wandb-summary.json +2024-09-15 20:27:39,389 INFO SenderThread:1022491 [dir_watcher.py:finish():402] scan save: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202715-9m35lk87/files/output.log output.log +2024-09-15 20:27:39,390 INFO SenderThread:1022491 [dir_watcher.py:finish():402] scan save: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202715-9m35lk87/files/conda-environment.yaml conda-environment.yaml +2024-09-15 20:27:39,391 INFO SenderThread:1022491 [dir_watcher.py:finish():402] scan save: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202715-9m35lk87/files/requirements.txt requirements.txt +2024-09-15 20:27:39,391 INFO SenderThread:1022491 [sender.py:transition_state():614] send defer: 10 +2024-09-15 20:27:39,391 DEBUG SenderThread:1022491 [sender.py:send_request():406] send_request: poll_exit +2024-09-15 20:27:39,391 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: defer +2024-09-15 20:27:39,392 INFO HandlerThread:1022491 [handler.py:handle_request_defer():184] handle defer: 10 +2024-09-15 20:27:39,392 DEBUG SenderThread:1022491 [sender.py:send_request():406] send_request: defer +2024-09-15 20:27:39,392 INFO SenderThread:1022491 [sender.py:send_request_defer():610] handle sender defer: 10 +2024-09-15 20:27:39,392 INFO SenderThread:1022491 [file_pusher.py:finish():169] shutting down file pusher +2024-09-15 20:27:39,770 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: poll_exit +2024-09-15 20:27:39,770 DEBUG SenderThread:1022491 [sender.py:send_request():406] send_request: poll_exit +2024-09-15 20:27:39,934 INFO wandb-upload_0:1022491 [upload_job.py:push():130] Uploaded file /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202715-9m35lk87/files/config.yaml +2024-09-15 20:27:40,224 INFO wandb-upload_1:1022491 [upload_job.py:push():130] Uploaded file /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202715-9m35lk87/files/wandb-summary.json +2024-09-15 20:27:40,244 INFO wandb-upload_2:1022491 [upload_job.py:push():130] Uploaded file /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202715-9m35lk87/files/output.log +2024-09-15 20:27:40,276 INFO wandb-upload_3:1022491 [upload_job.py:push():130] Uploaded file /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202715-9m35lk87/files/requirements.txt +2024-09-15 20:27:40,476 INFO Thread-11 (_thread_body):1022491 [sender.py:transition_state():614] send defer: 11 +2024-09-15 20:27:40,476 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: defer +2024-09-15 20:27:40,477 INFO HandlerThread:1022491 [handler.py:handle_request_defer():184] handle defer: 11 +2024-09-15 20:27:40,477 DEBUG SenderThread:1022491 [sender.py:send_request():406] send_request: defer +2024-09-15 20:27:40,477 INFO SenderThread:1022491 [sender.py:send_request_defer():610] handle sender defer: 11 +2024-09-15 20:27:40,477 INFO SenderThread:1022491 [file_pusher.py:join():175] waiting for file pusher +2024-09-15 20:27:40,477 INFO SenderThread:1022491 [sender.py:transition_state():614] send defer: 12 +2024-09-15 20:27:40,477 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: defer +2024-09-15 20:27:40,477 INFO HandlerThread:1022491 [handler.py:handle_request_defer():184] handle defer: 12 +2024-09-15 20:27:40,477 DEBUG SenderThread:1022491 [sender.py:send_request():406] send_request: defer +2024-09-15 20:27:40,477 INFO SenderThread:1022491 [sender.py:send_request_defer():610] handle sender defer: 12 +2024-09-15 20:27:40,477 INFO SenderThread:1022491 [file_stream.py:finish():601] file stream finish called +2024-09-15 20:27:40,770 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: poll_exit +2024-09-15 20:27:40,884 INFO SenderThread:1022491 [file_stream.py:finish():605] file stream finish is done +2024-09-15 20:27:40,884 INFO SenderThread:1022491 [sender.py:transition_state():614] send defer: 13 +2024-09-15 20:27:40,884 DEBUG SenderThread:1022491 [sender.py:send_request():406] send_request: poll_exit +2024-09-15 20:27:40,884 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: defer +2024-09-15 20:27:40,884 INFO HandlerThread:1022491 [handler.py:handle_request_defer():184] handle defer: 13 +2024-09-15 20:27:40,884 DEBUG SenderThread:1022491 [sender.py:send_request():406] send_request: defer +2024-09-15 20:27:40,884 INFO SenderThread:1022491 [sender.py:send_request_defer():610] handle sender defer: 13 +2024-09-15 20:27:40,884 INFO SenderThread:1022491 [sender.py:transition_state():614] send defer: 14 +2024-09-15 20:27:40,884 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: defer +2024-09-15 20:27:40,884 DEBUG SenderThread:1022491 [sender.py:send():379] send: final +2024-09-15 20:27:40,885 INFO HandlerThread:1022491 [handler.py:handle_request_defer():184] handle defer: 14 +2024-09-15 20:27:40,885 DEBUG SenderThread:1022491 [sender.py:send():379] send: footer +2024-09-15 20:27:40,885 DEBUG SenderThread:1022491 [sender.py:send_request():406] send_request: defer +2024-09-15 20:27:40,885 INFO SenderThread:1022491 [sender.py:send_request_defer():610] handle sender defer: 14 +2024-09-15 20:27:40,885 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: poll_exit +2024-09-15 20:27:40,885 DEBUG SenderThread:1022491 [sender.py:send_request():406] send_request: poll_exit +2024-09-15 20:27:40,885 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: server_info +2024-09-15 20:27:40,885 DEBUG SenderThread:1022491 [sender.py:send_request():406] send_request: server_info +2024-09-15 20:27:40,885 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: poll_exit +2024-09-15 20:27:40,886 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: get_summary +2024-09-15 20:27:40,886 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: sampled_history +2024-09-15 20:27:40,887 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: internal_messages +2024-09-15 20:27:41,118 DEBUG SenderThread:1022491 [sender.py:send_request():406] send_request: poll_exit +2024-09-15 20:27:41,118 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: status_report +2024-09-15 20:27:41,119 INFO MainThread:1022491 [wandb_run.py:_footer_history_summary_info():4016] rendering history +2024-09-15 20:27:41,119 INFO MainThread:1022491 [wandb_run.py:_footer_history_summary_info():4048] rendering summary +2024-09-15 20:27:41,119 INFO MainThread:1022491 [wandb_run.py:_footer_sync_info():3975] logging synced files +2024-09-15 20:27:41,119 DEBUG HandlerThread:1022491 [handler.py:handle_request():158] handle_request: shutdown +2024-09-15 20:27:41,119 INFO HandlerThread:1022491 [handler.py:finish():882] shutting down handler +2024-09-15 20:27:42,119 INFO SenderThread:1022491 [sender.py:finish():1615] shutting down sender +2024-09-15 20:27:42,119 INFO WriterThread:1022491 [datastore.py:close():296] close: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202715-9m35lk87/run-9m35lk87.wandb +2024-09-15 20:27:42,119 INFO SenderThread:1022491 [file_pusher.py:finish():169] shutting down file pusher +2024-09-15 20:27:42,119 INFO SenderThread:1022491 [file_pusher.py:join():175] waiting for file pusher diff --git a/AllinonSAM/wandb/run-20240915_202715-9m35lk87/logs/debug.log b/AllinonSAM/wandb/run-20240915_202715-9m35lk87/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..9763d7b1d92d3da06535f6a3fb7fe0bafb593abf --- /dev/null +++ b/AllinonSAM/wandb/run-20240915_202715-9m35lk87/logs/debug.log @@ -0,0 +1,29 @@ +2024-09-15 20:27:15,740 INFO MainThread:1022023 [wandb_setup.py:_flush():76] Current SDK version is 0.17.5 +2024-09-15 20:27:15,741 INFO MainThread:1022023 [wandb_setup.py:_flush():76] Configure stats pid to 1022023 +2024-09-15 20:27:15,741 INFO MainThread:1022023 [wandb_setup.py:_flush():76] Loading settings from /home/sarim.hashmi/.config/wandb/settings +2024-09-15 20:27:15,741 INFO MainThread:1022023 [wandb_setup.py:_flush():76] Loading settings from /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/settings +2024-09-15 20:27:15,741 INFO MainThread:1022023 [wandb_setup.py:_flush():76] Loading settings from environment variables: {'api_key': '***REDACTED***'} +2024-09-15 20:27:15,741 INFO MainThread:1022023 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-09-15 20:27:15,741 INFO MainThread:1022023 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': 'driver_scratchpad.py', 'program_abspath': '/l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/driver_scratchpad.py', 'program': '/l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/driver_scratchpad.py'} +2024-09-15 20:27:15,741 INFO MainThread:1022023 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-09-15 20:27:15,741 INFO MainThread:1022023 [wandb_init.py:_log_setup():529] Logging user logs to /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202715-9m35lk87/logs/debug.log +2024-09-15 20:27:15,741 INFO MainThread:1022023 [wandb_init.py:_log_setup():530] Logging internal logs to /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_202715-9m35lk87/logs/debug-internal.log +2024-09-15 20:27:15,741 INFO MainThread:1022023 [wandb_init.py:init():569] calling init triggers +2024-09-15 20:27:15,741 INFO MainThread:1022023 [wandb_init.py:init():576] wandb.init called with sweep_config: {} +config: {'learning_rate': 0.0001, 'batch_size': 8, 'num_epochs': 200, 'reg_multiplier': 0} +2024-09-15 20:27:15,741 INFO MainThread:1022023 [wandb_init.py:init():619] starting backend +2024-09-15 20:27:15,741 INFO MainThread:1022023 [wandb_init.py:init():623] setting up manager +2024-09-15 20:27:15,743 INFO MainThread:1022023 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-09-15 20:27:15,744 INFO MainThread:1022023 [wandb_init.py:init():631] backend started and connected +2024-09-15 20:27:15,746 INFO MainThread:1022023 [wandb_init.py:init():720] updated telemetry +2024-09-15 20:27:15,751 INFO MainThread:1022023 [wandb_init.py:init():753] communicating run to backend with 90.0 second timeout +2024-09-15 20:27:16,375 INFO MainThread:1022023 [wandb_run.py:_on_init():2435] communicating current version +2024-09-15 20:27:16,467 INFO MainThread:1022023 [wandb_run.py:_on_init():2444] got version response upgrade_message: "wandb version 0.18.0 is available! To upgrade, please run:\n $ pip install wandb --upgrade" + +2024-09-15 20:27:16,468 INFO MainThread:1022023 [wandb_init.py:init():804] starting run threads in backend +2024-09-15 20:27:16,944 INFO MainThread:1022023 [wandb_run.py:_console_start():2413] atexit reg +2024-09-15 20:27:16,945 INFO MainThread:1022023 [wandb_run.py:_redirect():2255] redirect: wrap_raw +2024-09-15 20:27:16,945 INFO MainThread:1022023 [wandb_run.py:_redirect():2320] Wrapping output streams. +2024-09-15 20:27:16,945 INFO MainThread:1022023 [wandb_run.py:_redirect():2345] Redirects installed. +2024-09-15 20:27:16,947 INFO MainThread:1022023 [wandb_init.py:init():847] run started, returning control to user process +2024-09-15 20:27:42,121 WARNING MsgRouterThr:1022023 [router.py:message_loop():77] message_loop has been closed diff --git a/AllinonSAM/wandb/run-20240915_202715-9m35lk87/run-9m35lk87.wandb b/AllinonSAM/wandb/run-20240915_202715-9m35lk87/run-9m35lk87.wandb new file mode 100644 index 0000000000000000000000000000000000000000..8a160d04565b37680bbb83554455953279770e51 Binary files /dev/null and b/AllinonSAM/wandb/run-20240915_202715-9m35lk87/run-9m35lk87.wandb differ diff --git a/AllinonSAM/wandb/run-20240915_204253-1hswztum/files/conda-environment.yaml b/AllinonSAM/wandb/run-20240915_204253-1hswztum/files/conda-environment.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/AllinonSAM/wandb/run-20240915_204253-1hswztum/files/config.yaml b/AllinonSAM/wandb/run-20240915_204253-1hswztum/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1141dde7dcc488e0b0fa21d97d802296203c592b --- /dev/null +++ b/AllinonSAM/wandb/run-20240915_204253-1hswztum/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +learning_rate: + desc: null + value: 0.0001 +batch_size: + desc: null + value: 8 +num_epochs: + desc: null + value: 200 +reg_multiplier: + desc: null + value: 0 +_wandb: + desc: null + value: + python_version: 3.12.1 + cli_version: 0.17.5 + framework: torch + is_jupyter_run: false + is_kaggle_kernel: true + start_time: 1726418573 + t: + 1: + - 1 + - 41 + - 55 + - 105 + 2: + - 1 + - 41 + - 55 + - 105 + 3: + - 16 + - 23 + 4: 3.12.1 + 5: 0.17.5 + 8: + - 2 + - 5 + 13: linux-x86_64 diff --git a/AllinonSAM/wandb/run-20240915_204253-1hswztum/files/output.log b/AllinonSAM/wandb/run-20240915_204253-1hswztum/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..f9833e720649245c9b506d2076ab19e0a5036831 --- /dev/null +++ b/AllinonSAM/wandb/run-20240915_204253-1hswztum/files/output.log @@ -0,0 +1,34 @@ +Training parameters: +---------- +number of trainable parameters: 1034496 +batch size: 8 +num epochs: 200 +Epoch 0/199 +---------- + + + + + + + + +Traceback (most recent call last): + File "/l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/driver_scratchpad.py", line 382, in + main_train(data_config, model_config, args.pretrained_path, args.save_path, args.training_strategy, device=args.device) + File "/l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/driver_scratchpad.py", line 361, in main_train + model = train_dl(model, dataset_dict, dataset_sizes, criterion, optimizer, exp_lr_scheduler, save_path, num_epochs=training_params['num_epochs'], bs=training_params['batch_size'], device=device, retain_graph=retain_graph, neg2pos_ratio=data_config['data']['negative_to_positive_ratio'], reg_multiplier=model_config['training']['reg_multiplier']) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/train.py", line 203, in train_dl + outputs, reg_loss = model(inputs, text) + ^^^^^^^^^^^^^^^^^^^ + File "/home/sarim.hashmi/anaconda3/envs/AI702/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1553, in _wrapped_call_impl + return self._call_impl(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/home/sarim.hashmi/anaconda3/envs/AI702/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1562, in _call_impl + return forward_call(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/model.py", line 166, in forward + text_inputs = (clip.tokenize(x_text)).to(self.device) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +KeyboardInterrupt \ No newline at end of file diff --git a/AllinonSAM/wandb/run-20240915_204253-1hswztum/files/requirements.txt b/AllinonSAM/wandb/run-20240915_204253-1hswztum/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..4f9a3909a6c3c0ad41a7c480c097a5cffd1b26cb --- /dev/null +++ b/AllinonSAM/wandb/run-20240915_204253-1hswztum/files/requirements.txt @@ -0,0 +1,507 @@ +Babel==2.14.0 +Brotli==1.1.0 +CoLT5-attention==0.11.1 +Deprecated==1.2.14 +GitPython==3.1.43 +Jinja2==3.1.3 +Mako==1.3.5 +Markdown==3.6 +MarkupSafe==2.1.4 +PTable==0.9.2 +PuLP==2.9.0 +PyGithub==1.59.1 +PyJWT==2.9.0 +PyNaCl==1.5.0 +PyPika==0.48.9 +PySocks==1.7.1 +PyYAML==6.0.1 +Pygments==2.15.1 +Pygments==2.17.2 +SQLAlchemy==2.0.32 +Send2Trash==1.8.2 +SimpleITK==2.3.1 +TorchFix==0.5.0 +Werkzeug==3.0.3 +absl-py==2.1.0 +accelerate==0.33.0 +aiohttp==3.9.3 +aiosignal==1.3.1 +albucore==0.0.13 +albumentations==1.4.13 +alembic==1.13.2 +annotated-types==0.6.0 +anyio==4.2.0 +anytree==2.12.1 +appdirs==1.4.4 +arel==0.3.0 +argon2-cffi-bindings==21.2.0 +argon2-cffi==23.1.0 +argparse==1.4.0 +arrow==1.3.0 +asgiref==3.8.1 +asttokens==2.0.5 +async-asgi-testclient==1.4.11 +async-lru==2.0.4 +asyncio==3.4.3 +attrs==23.2.0 +autocommand==2.2.2 +backcall==0.2.0 +backoff==2.2.1 +backports.tarfile==1.2.0 +bcrypt==4.2.0 +beartype==0.18.5 +beautifulsoup4==4.12.3 +bidict==0.23.1 +bitsandbytes==0.43.3 +black==24.8.0 +bleach==6.1.0 +blis==0.7.11 +boto3==1.35.10 +botocore==1.35.10 +build==1.2.1 +cacheout==0.14.1 +cachetools==4.2.4 +catalogue==2.0.10 +certifi==2024.2.2 +cffi==1.16.0 +charset-normalizer==3.3.2 +chex==0.1.86 +chroma-hnswlib==0.7.3 +chromadb==0.4.24 +clarifai-grpc==10.7.3 +clarifai==10.7.0 +click==8.1.7 +clip==1.0 +cloudpathlib==0.16.0 +cohere==5.6.2 +colorama==0.4.6 +coloredlogs==15.0.1 +comm==0.2.1 +confection==0.1.4 +contextlib2==21.6.0 +contourpy==1.2.1 +crewai-tools==0.1.6 +crewai==0.28.8 +cryptography==42.0.8 +cycler==0.12.1 +cymem==2.0.8 +dataclasses-json==0.6.5 +datasets==2.19.1 +debugpy==1.6.7 +decorator==5.1.1 +defusedxml==0.7.1 +deprecation==2.1.0 +dill==0.3.8 +distinctipy==1.3.4 +distro==1.9.0 +docker-pycreds==0.4.0 +docstring_parser==0.16 +efficientnet-pytorch==0.7.1 +einops-exts==0.0.4 +einops==0.8.0 +einx==0.3.0 +embedchain==0.1.113 +en-core-web-sm==3.7.1 +etils==1.9.3 +eval_type_backport==0.2.0 +executing==2.1.0 +faiss-cpu==1.8.0.post1 +fastapi==0.109.0 +fastavro==1.9.5 +fastjsonschema==2.19.1 +ffmpeg-python==0.2.0 +filelock==3.13.1 +flake8==7.1.1 +flatbuffers==24.3.25 +flax==0.9.0 +flwr-datasets==0.2.0 +flwr==1.10.0 +fonttools==4.50.0 +fqdn==1.5.1 +fr-core-news-sm==3.7.0 +frozendict==2.4.4 +frozenlist==1.4.1 +fsspec==2023.12.2 +ftfy==6.2.3 +future==1.0.0 +gitdb==4.0.11 +giturlparse==0.12.0 +google-ai-generativelanguage==0.6.1 +google-api-core==2.18.0 +google-api-python-client==2.125.0 +google-auth-httplib2==0.2.0 +google-auth==2.29.0 +google-cloud-aiplatform==1.48.0 +google-cloud-bigquery==3.20.1 +google-cloud-core==2.4.1 +google-cloud-resource-manager==1.12.3 +google-cloud-storage==2.16.0 +google-crc32c==1.5.0 +google-generativeai==0.5.0 +google-resumable-media==2.7.0 +google==3.0.0 +googleapis-common-protos==1.63.0 +gptcache==0.1.44 +gpytorch==1.12 +graphviz==0.20.1 +greenlet==3.0.3 +groq==0.5.0 +grpc-google-iam-v1==0.13.0 +grpcio-status==1.62.1 +grpcio-tools==1.62.3 +grpcio==1.64.3 +h11==0.14.0 +h2==4.1.0 +h5py==3.11.0 +hpack==4.0.0 +httpcore==1.0.5 +httplib2==0.22.0 +httptools==0.6.1 +httpx-sse==0.4.0 +httpx==0.27.0 +huggingface-hub==0.24.5 +humanfriendly==10.0 +humanize==4.10.0 +hyperframe==6.0.1 +idna==3.6 +imagecodecs==2024.1.1 +imageio==2.34.0 +importlib_metadata==8.0.0 +importlib_metadata==8.4.0 +importlib_resources==6.4.0 +importlib_resources==6.4.4 +imutils==0.5.4 +inflect==7.3.1 +iniconfig==2.0.0 +inquirerpy==0.3.4 +instructor==0.5.2 +iopath==0.1.10 +ipykernel==5.5.6 +ipykernel==6.28.0 +ipython-genutils==0.2.0 +ipython==7.16.1 +ipython==8.20.0 +isoduration==20.11.0 +iterators==0.0.2 +jaraco.context==5.3.0 +jaraco.functools==4.0.1 +jaraco.text==3.12.1 +jax==0.4.31 +jaxlib==0.4.31 +jaxtyping==0.2.19 +jedi==0.18.1 +jedi==0.19.1 +jiter==0.4.2 +jmespath==1.0.1 +joblib==1.4.2 +json5==0.9.14 +json_repair==0.25.3 +jsonpatch==1.33 +jsonpointer==2.4 +jsonref==1.1.0 +jsonschema-specifications==2023.12.1 +jsonschema==4.20.0 +jupyter-events==0.9.0 +jupyter-lsp==2.2.2 +jupyter_client==8.6.0 +jupyter_core==5.5.0 +jupyter_core==5.7.1 +jupyter_server==2.12.5 +jupyter_server_terminals==0.5.2 +jupyterlab==4.0.12 +jupyterlab_pygments==0.3.0 +jupyterlab_server==2.25.2 +jupyterplot==0.0.3 +kiwisolver==1.4.5 +kubernetes==30.1.0 +lancedb==0.5.7 +langchain-cohere==0.1.5 +langchain-community==0.0.29 +langchain-core==0.1.52 +langchain-experimental==0.0.55 +langchain-groq==0.1.3 +langchain-openai==0.1.7 +langchain-text-splitters==0.0.2 +langchain==0.1.13 +langcodes==3.4.0 +langsmith==0.1.108 +language_data==1.2.0 +lazy_loader==0.4 +lib==4.0.0 +libcst==1.1.0 +lightning-utilities==0.11.2 +linear-operator==0.5.3 +local-attention==1.9.15 +loguru==0.7.2 +lrcurve==1.1.0 +lxml==5.2.2 +marisa-trie==1.1.0 +markdown-it-py==3.0.0 +marshmallow==3.21.2 +matplotlib-inline==0.1.6 +matplotlib==3.8.4 +mccabe==0.7.0 +mdurl==0.1.2 +mem0ai==0.0.20 +mistune==3.0.2 +ml-dtypes==0.4.0 +mmh3==4.1.0 +monai==1.3.1 +monotonic==1.6 +more-itertools==10.3.0 +mpmath==1.3.0 +msgpack==1.0.8 +multidict==6.0.5 +multiprocess==0.70.16 +munch==4.0.0 +murmurhash==1.0.10 +mutagen==1.47.0 +mypy-extensions==1.0.0 +nbclient==0.9.0 +nbconvert==7.14.2 +nbformat==5.9.2 +nest-asyncio==1.6.0 +networkx==3.2.1 +nibabel==5.2.1 +nilearn==0.10.4 +nltk==3.5 +nodeenv==1.9.1 +notebook==7.0.7 +notebook_shim==0.2.3 +numerize==0.12 +numpy==1.26.3 +nvidia-cublas-cu12==12.1.3.1 +nvidia-cuda-cupti-cu12==12.1.105 +nvidia-cuda-nvrtc-cu12==12.1.105 +nvidia-cuda-runtime-cu12==12.1.105 +nvidia-cudnn-cu12==9.1.0.70 +nvidia-cufft-cu12==11.0.2.54 +nvidia-curand-cu12==10.3.2.106 +nvidia-cusolver-cu12==11.4.5.107 +nvidia-cusparse-cu12==12.1.0.106 +nvidia-nccl-cu12==2.20.5 +nvidia-nvjitlink-cu12==12.3.101 +nvidia-nvtx-cu12==12.1.105 +oauthlib==3.2.2 +onnxruntime==1.19.0 +openai==1.43.0 +opencv-python-headless==4.10.0.84 +opencv-python==4.9.0.80 +opentelemetry-api==1.27.0 +opentelemetry-exporter-otlp-proto-common==1.27.0 +opentelemetry-exporter-otlp-proto-grpc==1.27.0 +opentelemetry-exporter-otlp-proto-http==1.27.0 +opentelemetry-instrumentation-asgi==0.48b0 +opentelemetry-instrumentation-fastapi==0.48b0 +opentelemetry-instrumentation==0.48b0 +opentelemetry-proto==1.27.0 +opentelemetry-sdk==1.27.0 +opentelemetry-semantic-conventions==0.48b0 +opentelemetry-util-http==0.48b0 +opt-einsum==3.3.0 +optax==0.2.3 +orbax-checkpoint==0.6.1 +orjson==3.10.3 +outcome==1.3.0.post0 +overrides==7.7.0 +packaging==23.2 +packaging==24.1 +pandas==2.1.4 +pandocfilters==1.5.1 +parameterized==0.9.0 +parso==0.8.3 +pathspec==0.12.1 +peft==0.12.0 +pendulum==3.0.0 +pexpect==4.8.0 +pexpect==4.9.0 +pfzy==0.3.4 +pickleshare==0.7.5 +pillow==10.2.0 +pinecone-plugin-inference==1.0.3 +pinecone-plugin-interface==0.0.7 +pinecone==5.1.0 +pip==24.2 +platformdirs==3.10.0 +platformdirs==4.2.0 +platformdirs==4.2.2 +pluggy==1.5.0 +portalocker==2.10.1 +posthog==3.6.0 +preshed==3.0.9 +pretrainedmodels==0.7.4 +prettytable==3.11.0 +prometheus-client==0.19.0 +prompt-toolkit==3.0.43 +proto-plus==1.23.0 +protobuf==3.20.3 +psutil==5.9.0 +ptflops==0.7.3 +ptyprocess==0.7.0 +pulsar-client==3.5.0 +pure-eval==0.2.2 +py==1.11.0 +pyarrow-hotfix==0.6 +pyarrow==16.0.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pycodestyle==2.12.1 +pycparser==2.21 +pycryptodome==3.20.0 +pycryptodomex==3.20.0 +pydantic==2.8.2 +pydantic_core==2.20.1 +pydicom==2.4.4 +pyflakes==3.2.0 +pylance==0.9.18 +pynrrd==0.4.3 +pyparsing==3.1.2 +pypdf==4.3.1 +pyproject_hooks==1.1.0 +pyright==1.1.378 +pysbd==0.3.4 +pytest==8.3.2 +python-dateutil==2.8.2 +python-dotenv==1.0.0 +python-json-logger==2.0.7 +python-magic==0.4.27 +python-multipart==0.0.5 +python-rapidjson==1.20 +pytorch-lightning==2.2.1 +pytorch-metric-learning==2.6.0 +pytube==15.0.0 +pytz==2024.1 +pyzmq==25.1.2 +qdrant-client==1.11.1 +rarfile==4.2 +ratelimit==2.2.1 +ratelimiter==1.2.0.post0 +referencing==0.33.0 +regex==2023.12.25 +requests-oauthlib==2.0.0 +requests-toolbelt==1.0.0 +requests==2.31.0 +retry==0.9.2 +rfc3339-validator==0.1.4 +rfc3986-validator==0.1.1 +rich==13.7.1 +rpds-py==0.17.1 +rsa==4.9 +s3transfer==0.10.2 +safetensors==0.4.2 +schedulefree==1.2.7 +schema==0.7.5 +scikit-image==0.23.1 +scikit-learn==1.5.0 +scipy==1.13.0 +seaborn==0.13.2 +segmentation-models-pytorch==0.3.3 +selenium==4.24.0 +semver==3.0.2 +sentence-transformers==3.0.1 +sentencepiece==0.2.0 +sentry-sdk==2.12.0 +setproctitle==1.3.3 +setuptools==74.1.1 +shapely==2.0.2 +shellingham==1.5.4 +shortuuid==1.0.13 +six==1.16.0 +skypilot==0.6.1 +smart-open==6.4.0 +smmap==5.0.1 +sniffio==1.3.0 +sortedcontainers==2.4.0 +soupsieve==2.5 +spacy-legacy==3.0.12 +spacy-loggers==1.0.5 +spacy==3.7.4 +srsly==2.4.8 +sse-starlette==2.1.0 +stack-data==0.2.0 +starlette==0.35.1 +stringcase==1.2.0 +stripe==10.10.0 +supervisely==6.73.181 +swarms-cloud==0.3.7 +swarms-memory==0.1.2 +swarms==5.6.6 +sympy==1.12 +tabulate==0.9.0 +tenacity==8.5.0 +tensorboard-data-server==0.7.2 +tensorboard==2.17.0 +tensorboardX==2.6.2.2 +tensorstore==0.1.64 +termcolor==2.4.0 +terminado==0.18.0 +thinc==8.2.3 +threadpoolctl==3.5.0 +tifffile==2024.2.12 +tiktoken==0.7.0 +time-machine==2.15.0 +timm==0.9.2 +tinycss2==1.2.1 +tokenizers==0.19.1 +toml==0.10.2 +tomli==2.0.1 +tomli==2.0.1 +tomli_w==1.0.0 +toolz==0.12.1 +torch==2.4.0 +torchmetrics==1.3.2 +torchsummary==1.5.1 +torchtext==0.5.0 +torchview==0.2.6 +torchvision==0.19.0 +tornado==6.3.3 +tornado==6.4 +tqdm==4.66.5 +traitlets==5.14.1 +traitlets==5.7.1 +transformers==4.44.2 +trimesh==3.23.5 +trio-websocket==0.11.1 +trio==0.26.2 +triton==3.0.0 +tritonclient==2.49.0 +typeguard==4.3.0 +typeguard==4.3.0 +typer==0.9.4 +types-python-dateutil==2.8.19.20240106 +types-requests==2.32.0.20240712 +typing-inspect==0.9.0 +typing_extensions==4.12.2 +typing_extensions==4.12.2 +tzdata==2024.1 +uri-template==1.3.0 +uritemplate==4.1.1 +urllib3==2.2.2 +uvicorn==0.30.6 +uvloop==0.20.0 +varname==0.13.3 +vector-quantize-pytorch==1.15.6 +vertexai==1.46.0 +vision-mamba==0.1.0 +wandb==0.17.5 +wasabi==1.1.2 +watchfiles==0.24.0 +wcwidth==0.2.13 +weasel==0.3.4 +webcolors==1.13 +webencodings==0.5.1 +websocket-client==1.8.0 +websockets==10.4 +wheel==0.41.2 +wheel==0.43.0 +wrapt==1.16.0 +wsproto==1.2.0 +xxhash==3.4.1 +yacs==0.1.8 +yarl==1.9.4 +youtube-transcript-api==0.6.2 +yt-dlp==2023.12.30 +zetascale==2.7.0 +zipp==3.19.2 +zipp==3.20.1 +zstd==1.5.5.1 \ No newline at end of file diff --git a/AllinonSAM/wandb/run-20240915_204253-1hswztum/files/wandb-metadata.json b/AllinonSAM/wandb/run-20240915_204253-1hswztum/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..d823891727946b069ce133c07cc5bb67c292832e --- /dev/null +++ b/AllinonSAM/wandb/run-20240915_204253-1hswztum/files/wandb-metadata.json @@ -0,0 +1,210 @@ +{ + "os": "Linux-5.15.133-ql-generic-13.0-9-x86_64-with-glibc2.35", + "python": "3.12.1", + "heartbeatAt": "2024-09-15T16:42:53.992440", + "startedAt": "2024-09-15T16:42:53.216863", + "docker": null, + "cuda": null, + "args": [ + "--model_config", + "model_svdtuning.yml", + "--data_config", + "config_arcade.yml", + "--save_path", + "./temp.pth" + ], + "state": "running", + "program": "/l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/driver_scratchpad.py", + "codePathLocal": "driver_scratchpad.py", + "codePath": "driver_scratchpad.py", + "git": { + "remote": "https://github.com/JayParanjape/SVDSAM.git", + "commit": "5936d0eff64d84fbefed6ecfe4bcc841459c2fc3" + }, + "cpu_count": 16, + "cpu_count_logical": 32, + "cpu_freq": { + "current": 3.910500000000002, + "min": 2200.0, + "max": 3900.0 + }, + "cpu_freq_per_core": [ + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 4.322, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3.9, + "min": 2200.0, + "max": 3900.0 + } + ], + "disk": { + "/": { + "total": 1.0, + "used": 0.04192352294921875 + } + }, + "gpu": "NVIDIA GeForce RTX 4090", + "gpu_count": 1, + "gpu_devices": [ + { + "name": "NVIDIA GeForce RTX 4090", + "memory_total": 25757220864 + } + ], + "memory": { + "total": 62.65229415893555 + } +} diff --git a/AllinonSAM/wandb/run-20240915_204253-1hswztum/files/wandb-summary.json b/AllinonSAM/wandb/run-20240915_204253-1hswztum/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..8cc12bfe91c134785c665544a6f0142ff356421d --- /dev/null +++ b/AllinonSAM/wandb/run-20240915_204253-1hswztum/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 19}} \ No newline at end of file diff --git a/AllinonSAM/wandb/run-20240915_204253-1hswztum/logs/debug-internal.log b/AllinonSAM/wandb/run-20240915_204253-1hswztum/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..ce487ca91250f6b3c3c261c84b6776d3bd258946 --- /dev/null +++ b/AllinonSAM/wandb/run-20240915_204253-1hswztum/logs/debug-internal.log @@ -0,0 +1,231 @@ +2024-09-15 20:42:53,237 INFO StreamThr :1051206 [internal.py:wandb_internal():85] W&B internal server running at pid: 1051206, started at: 2024-09-15 20:42:53.235049 +2024-09-15 20:42:53,238 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: status +2024-09-15 20:42:53,239 INFO WriterThread:1051206 [datastore.py:open_for_write():87] open: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_204253-1hswztum/run-1hswztum.wandb +2024-09-15 20:42:53,241 DEBUG SenderThread:1051206 [sender.py:send():379] send: header +2024-09-15 20:42:53,243 DEBUG SenderThread:1051206 [sender.py:send():379] send: run +2024-09-15 20:42:53,834 INFO SenderThread:1051206 [dir_watcher.py:__init__():211] watching files in: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_204253-1hswztum/files +2024-09-15 20:42:53,834 INFO SenderThread:1051206 [sender.py:_start_run_threads():1188] run started: 1hswztum with start time 1726418573.235347 +2024-09-15 20:42:53,838 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: check_version +2024-09-15 20:42:53,838 DEBUG SenderThread:1051206 [sender.py:send_request():406] send_request: check_version +2024-09-15 20:42:53,913 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: run_start +2024-09-15 20:42:53,944 DEBUG HandlerThread:1051206 [system_info.py:__init__():26] System info init +2024-09-15 20:42:53,945 DEBUG HandlerThread:1051206 [system_info.py:__init__():41] System info init done +2024-09-15 20:42:53,945 INFO HandlerThread:1051206 [system_monitor.py:start():194] Starting system monitor +2024-09-15 20:42:53,945 INFO SystemMonitor:1051206 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-09-15 20:42:53,945 INFO HandlerThread:1051206 [system_monitor.py:probe():214] Collecting system info +2024-09-15 20:42:53,945 INFO SystemMonitor:1051206 [interfaces.py:start():188] Started cpu monitoring +2024-09-15 20:42:53,945 INFO SystemMonitor:1051206 [interfaces.py:start():188] Started disk monitoring +2024-09-15 20:42:53,946 INFO SystemMonitor:1051206 [interfaces.py:start():188] Started gpu monitoring +2024-09-15 20:42:53,946 INFO SystemMonitor:1051206 [interfaces.py:start():188] Started memory monitoring +2024-09-15 20:42:53,946 INFO SystemMonitor:1051206 [interfaces.py:start():188] Started network monitoring +2024-09-15 20:42:53,992 DEBUG HandlerThread:1051206 [system_info.py:probe():152] Probing system +2024-09-15 20:42:53,994 DEBUG HandlerThread:1051206 [system_info.py:_probe_git():137] Probing git +2024-09-15 20:42:54,002 DEBUG HandlerThread:1051206 [system_info.py:_probe_git():145] Probing git done +2024-09-15 20:42:54,002 DEBUG HandlerThread:1051206 [system_info.py:probe():200] Probing system done +2024-09-15 20:42:54,002 DEBUG HandlerThread:1051206 [system_monitor.py:probe():223] {'os': 'Linux-5.15.133-ql-generic-13.0-9-x86_64-with-glibc2.35', 'python': '3.12.1', 'heartbeatAt': '2024-09-15T16:42:53.992440', 'startedAt': '2024-09-15T16:42:53.216863', 'docker': None, 'cuda': None, 'args': ('--model_config', 'model_svdtuning.yml', '--data_config', 'config_arcade.yml', '--save_path', './temp.pth'), 'state': 'running', 'program': '/l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/driver_scratchpad.py', 'codePathLocal': 'driver_scratchpad.py', 'codePath': 'driver_scratchpad.py', 'git': {'remote': 'https://github.com/JayParanjape/SVDSAM.git', 'commit': '5936d0eff64d84fbefed6ecfe4bcc841459c2fc3'}, 'cpu_count': 16, 'cpu_count_logical': 32, 'cpu_freq': {'current': 3.910500000000002, 'min': 2200.0, 'max': 3900.0}, 'cpu_freq_per_core': [{'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 4.322, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}, {'current': 3.9, 'min': 2200.0, 'max': 3900.0}], 'disk': {'/': {'total': 1.0, 'used': 0.04192352294921875}}, 'gpu': 'NVIDIA GeForce RTX 4090', 'gpu_count': 1, 'gpu_devices': [{'name': 'NVIDIA GeForce RTX 4090', 'memory_total': 25757220864}], 'memory': {'total': 62.65229415893555}} +2024-09-15 20:42:54,002 INFO HandlerThread:1051206 [system_monitor.py:probe():224] Finished collecting system info +2024-09-15 20:42:54,002 INFO HandlerThread:1051206 [system_monitor.py:probe():227] Publishing system info +2024-09-15 20:42:54,002 DEBUG HandlerThread:1051206 [system_info.py:_save_conda():209] Saving list of conda packages installed into the current environment +2024-09-15 20:42:54,005 ERROR HandlerThread:1051206 [system_info.py:_save_conda():223] Error saving conda packages: [Errno 2] No such file or directory: 'conda' +Traceback (most recent call last): + File "/home/sarim.hashmi/anaconda3/envs/AI702/lib/python3.12/site-packages/wandb/sdk/internal/system/system_info.py", line 216, in _save_conda + subprocess.call( + File "/home/sarim.hashmi/anaconda3/envs/AI702/lib/python3.12/subprocess.py", line 389, in call + with Popen(*popenargs, **kwargs) as p: + ^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/home/sarim.hashmi/anaconda3/envs/AI702/lib/python3.12/subprocess.py", line 1026, in __init__ + self._execute_child(args, executable, preexec_fn, close_fds, + File "/home/sarim.hashmi/anaconda3/envs/AI702/lib/python3.12/subprocess.py", line 1950, in _execute_child + raise child_exception_type(errno_num, err_msg, err_filename) +FileNotFoundError: [Errno 2] No such file or directory: 'conda' +2024-09-15 20:42:54,006 DEBUG HandlerThread:1051206 [system_info.py:_save_conda():224] Saving conda packages done +2024-09-15 20:42:54,009 INFO HandlerThread:1051206 [system_monitor.py:probe():229] Finished publishing system info +2024-09-15 20:42:54,019 DEBUG SenderThread:1051206 [sender.py:send():379] send: files +2024-09-15 20:42:54,019 INFO SenderThread:1051206 [sender.py:_save_file():1454] saving file wandb-metadata.json with policy now +2024-09-15 20:42:54,350 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: python_packages +2024-09-15 20:42:54,350 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: stop_status +2024-09-15 20:42:54,351 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: internal_messages +2024-09-15 20:42:54,351 DEBUG SenderThread:1051206 [sender.py:send_request():406] send_request: python_packages +2024-09-15 20:42:54,355 DEBUG SenderThread:1051206 [sender.py:send_request():406] send_request: stop_status +2024-09-15 20:42:54,706 DEBUG SenderThread:1051206 [sender.py:send():379] send: telemetry +2024-09-15 20:42:54,838 INFO Thread-12 :1051206 [dir_watcher.py:_on_file_created():271] file/dir created: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_204253-1hswztum/files/wandb-metadata.json +2024-09-15 20:42:54,838 INFO Thread-12 :1051206 [dir_watcher.py:_on_file_created():271] file/dir created: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_204253-1hswztum/files/requirements.txt +2024-09-15 20:42:54,838 INFO Thread-12 :1051206 [dir_watcher.py:_on_file_created():271] file/dir created: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_204253-1hswztum/files/output.log +2024-09-15 20:42:54,838 INFO Thread-12 :1051206 [dir_watcher.py:_on_file_created():271] file/dir created: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_204253-1hswztum/files/conda-environment.yaml +2024-09-15 20:42:54,984 INFO wandb-upload_0:1051206 [upload_job.py:push():130] Uploaded file /tmp/slurm-sarim.hashmi-40491/tmp4_wubgvawandb/yk80p21x-wandb-metadata.json +2024-09-15 20:42:55,349 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: internal_messages +2024-09-15 20:42:56,349 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: internal_messages +2024-09-15 20:42:56,840 INFO Thread-12 :1051206 [dir_watcher.py:_on_file_modified():288] file/dir modified: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_204253-1hswztum/files/output.log +2024-09-15 20:42:57,349 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: internal_messages +2024-09-15 20:42:58,270 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: status_report +2024-09-15 20:42:58,349 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: internal_messages +2024-09-15 20:42:58,842 INFO Thread-12 :1051206 [dir_watcher.py:_on_file_modified():288] file/dir modified: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_204253-1hswztum/files/output.log +2024-09-15 20:42:59,349 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: internal_messages +2024-09-15 20:43:00,349 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: internal_messages +2024-09-15 20:43:00,842 INFO Thread-12 :1051206 [dir_watcher.py:_on_file_modified():288] file/dir modified: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_204253-1hswztum/files/output.log +2024-09-15 20:43:01,349 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: internal_messages +2024-09-15 20:43:02,349 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: internal_messages +2024-09-15 20:43:02,843 INFO Thread-12 :1051206 [dir_watcher.py:_on_file_modified():288] file/dir modified: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_204253-1hswztum/files/output.log +2024-09-15 20:43:03,349 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: internal_messages +2024-09-15 20:43:03,471 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: status_report +2024-09-15 20:43:04,349 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: internal_messages +2024-09-15 20:43:04,844 INFO Thread-12 :1051206 [dir_watcher.py:_on_file_modified():288] file/dir modified: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_204253-1hswztum/files/output.log +2024-09-15 20:43:05,349 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: internal_messages +2024-09-15 20:43:06,349 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: internal_messages +2024-09-15 20:43:06,844 INFO Thread-12 :1051206 [dir_watcher.py:_on_file_modified():288] file/dir modified: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_204253-1hswztum/files/output.log +2024-09-15 20:43:07,350 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: internal_messages +2024-09-15 20:43:08,350 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: internal_messages +2024-09-15 20:43:08,491 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: status_report +2024-09-15 20:43:08,845 INFO Thread-12 :1051206 [dir_watcher.py:_on_file_modified():288] file/dir modified: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_204253-1hswztum/files/output.log +2024-09-15 20:43:09,348 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: stop_status +2024-09-15 20:43:09,349 DEBUG SenderThread:1051206 [sender.py:send_request():406] send_request: stop_status +2024-09-15 20:43:09,350 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: internal_messages +2024-09-15 20:43:10,350 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: internal_messages +2024-09-15 20:43:10,846 INFO Thread-12 :1051206 [dir_watcher.py:_on_file_modified():288] file/dir modified: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_204253-1hswztum/files/output.log +2024-09-15 20:43:11,350 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: internal_messages +2024-09-15 20:43:12,350 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: internal_messages +2024-09-15 20:43:12,846 INFO Thread-12 :1051206 [dir_watcher.py:_on_file_modified():288] file/dir modified: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_204253-1hswztum/files/output.log +2024-09-15 20:43:13,350 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: internal_messages +2024-09-15 20:43:13,629 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: status_report +2024-09-15 20:43:13,768 DEBUG SenderThread:1051206 [sender.py:send():379] send: exit +2024-09-15 20:43:13,768 INFO SenderThread:1051206 [sender.py:send_exit():586] handling exit code: 255 +2024-09-15 20:43:13,768 INFO SenderThread:1051206 [sender.py:send_exit():588] handling runtime: 19 +2024-09-15 20:43:13,772 INFO SenderThread:1051206 [sender.py:_save_file():1454] saving file wandb-summary.json with policy end +2024-09-15 20:43:13,772 INFO SenderThread:1051206 [sender.py:send_exit():594] send defer +2024-09-15 20:43:13,772 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: defer +2024-09-15 20:43:13,772 INFO HandlerThread:1051206 [handler.py:handle_request_defer():184] handle defer: 0 +2024-09-15 20:43:13,772 DEBUG SenderThread:1051206 [sender.py:send_request():406] send_request: defer +2024-09-15 20:43:13,772 INFO SenderThread:1051206 [sender.py:send_request_defer():610] handle sender defer: 0 +2024-09-15 20:43:13,772 INFO SenderThread:1051206 [sender.py:transition_state():614] send defer: 1 +2024-09-15 20:43:13,772 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: defer +2024-09-15 20:43:13,772 INFO HandlerThread:1051206 [handler.py:handle_request_defer():184] handle defer: 1 +2024-09-15 20:43:13,772 DEBUG SenderThread:1051206 [sender.py:send_request():406] send_request: defer +2024-09-15 20:43:13,772 INFO SenderThread:1051206 [sender.py:send_request_defer():610] handle sender defer: 1 +2024-09-15 20:43:13,772 INFO SenderThread:1051206 [sender.py:transition_state():614] send defer: 2 +2024-09-15 20:43:13,772 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: defer +2024-09-15 20:43:13,772 INFO HandlerThread:1051206 [handler.py:handle_request_defer():184] handle defer: 2 +2024-09-15 20:43:13,772 INFO HandlerThread:1051206 [system_monitor.py:finish():203] Stopping system monitor +2024-09-15 20:43:13,773 DEBUG SystemMonitor:1051206 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-09-15 20:43:13,773 INFO HandlerThread:1051206 [interfaces.py:finish():200] Joined cpu monitor +2024-09-15 20:43:13,773 DEBUG SystemMonitor:1051206 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-09-15 20:43:13,773 INFO HandlerThread:1051206 [interfaces.py:finish():200] Joined disk monitor +2024-09-15 20:43:13,773 DEBUG SystemMonitor:1051206 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-09-15 20:43:13,828 INFO HandlerThread:1051206 [interfaces.py:finish():200] Joined gpu monitor +2024-09-15 20:43:13,828 INFO HandlerThread:1051206 [interfaces.py:finish():200] Joined memory monitor +2024-09-15 20:43:13,828 INFO HandlerThread:1051206 [interfaces.py:finish():200] Joined network monitor +2024-09-15 20:43:13,829 DEBUG SenderThread:1051206 [sender.py:send_request():406] send_request: defer +2024-09-15 20:43:13,829 INFO SenderThread:1051206 [sender.py:send_request_defer():610] handle sender defer: 2 +2024-09-15 20:43:13,829 INFO SenderThread:1051206 [sender.py:transition_state():614] send defer: 3 +2024-09-15 20:43:13,829 DEBUG SenderThread:1051206 [sender.py:send():379] send: stats +2024-09-15 20:43:13,829 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: defer +2024-09-15 20:43:13,829 INFO HandlerThread:1051206 [handler.py:handle_request_defer():184] handle defer: 3 +2024-09-15 20:43:13,829 DEBUG SenderThread:1051206 [sender.py:send_request():406] send_request: defer +2024-09-15 20:43:13,829 INFO SenderThread:1051206 [sender.py:send_request_defer():610] handle sender defer: 3 +2024-09-15 20:43:13,829 INFO SenderThread:1051206 [sender.py:transition_state():614] send defer: 4 +2024-09-15 20:43:13,830 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: defer +2024-09-15 20:43:13,830 INFO HandlerThread:1051206 [handler.py:handle_request_defer():184] handle defer: 4 +2024-09-15 20:43:13,830 DEBUG SenderThread:1051206 [sender.py:send_request():406] send_request: defer +2024-09-15 20:43:13,830 INFO SenderThread:1051206 [sender.py:send_request_defer():610] handle sender defer: 4 +2024-09-15 20:43:13,830 INFO SenderThread:1051206 [sender.py:transition_state():614] send defer: 5 +2024-09-15 20:43:13,830 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: defer +2024-09-15 20:43:13,830 INFO HandlerThread:1051206 [handler.py:handle_request_defer():184] handle defer: 5 +2024-09-15 20:43:13,830 DEBUG SenderThread:1051206 [sender.py:send():379] send: summary +2024-09-15 20:43:13,832 INFO SenderThread:1051206 [sender.py:_save_file():1454] saving file wandb-summary.json with policy end +2024-09-15 20:43:13,833 DEBUG SenderThread:1051206 [sender.py:send_request():406] send_request: defer +2024-09-15 20:43:13,833 INFO SenderThread:1051206 [sender.py:send_request_defer():610] handle sender defer: 5 +2024-09-15 20:43:13,833 INFO SenderThread:1051206 [sender.py:transition_state():614] send defer: 6 +2024-09-15 20:43:13,833 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: defer +2024-09-15 20:43:13,833 INFO HandlerThread:1051206 [handler.py:handle_request_defer():184] handle defer: 6 +2024-09-15 20:43:13,833 DEBUG SenderThread:1051206 [sender.py:send_request():406] send_request: defer +2024-09-15 20:43:13,833 INFO SenderThread:1051206 [sender.py:send_request_defer():610] handle sender defer: 6 +2024-09-15 20:43:13,834 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: status_report +2024-09-15 20:43:13,848 INFO Thread-12 :1051206 [dir_watcher.py:_on_file_created():271] file/dir created: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_204253-1hswztum/files/wandb-summary.json +2024-09-15 20:43:14,084 INFO SenderThread:1051206 [sender.py:transition_state():614] send defer: 7 +2024-09-15 20:43:14,084 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: defer +2024-09-15 20:43:14,084 INFO HandlerThread:1051206 [handler.py:handle_request_defer():184] handle defer: 7 +2024-09-15 20:43:14,084 DEBUG SenderThread:1051206 [sender.py:send_request():406] send_request: defer +2024-09-15 20:43:14,084 INFO SenderThread:1051206 [sender.py:send_request_defer():610] handle sender defer: 7 +2024-09-15 20:43:14,768 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: poll_exit +2024-09-15 20:43:14,773 INFO SenderThread:1051206 [sender.py:transition_state():614] send defer: 8 +2024-09-15 20:43:14,773 DEBUG SenderThread:1051206 [sender.py:send_request():406] send_request: poll_exit +2024-09-15 20:43:14,773 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: defer +2024-09-15 20:43:14,773 INFO HandlerThread:1051206 [handler.py:handle_request_defer():184] handle defer: 8 +2024-09-15 20:43:14,773 DEBUG SenderThread:1051206 [sender.py:send_request():406] send_request: defer +2024-09-15 20:43:14,773 INFO SenderThread:1051206 [sender.py:send_request_defer():610] handle sender defer: 8 +2024-09-15 20:43:14,773 INFO SenderThread:1051206 [job_builder.py:build():440] Attempting to build job artifact +2024-09-15 20:43:14,774 INFO SenderThread:1051206 [job_builder.py:_get_source_type():569] is repo sourced job +2024-09-15 20:43:14,789 INFO SenderThread:1051206 [job_builder.py:build():545] adding wandb-job metadata file +2024-09-15 20:43:14,794 INFO SenderThread:1051206 [sender.py:transition_state():614] send defer: 9 +2024-09-15 20:43:14,795 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: defer +2024-09-15 20:43:14,795 DEBUG SenderThread:1051206 [sender.py:send():379] send: artifact +2024-09-15 20:43:14,795 INFO HandlerThread:1051206 [handler.py:handle_request_defer():184] handle defer: 9 +2024-09-15 20:43:14,849 INFO Thread-12 :1051206 [dir_watcher.py:_on_file_modified():288] file/dir modified: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_204253-1hswztum/files/config.yaml +2024-09-15 20:43:14,849 INFO Thread-12 :1051206 [dir_watcher.py:_on_file_modified():288] file/dir modified: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_204253-1hswztum/files/output.log +2024-09-15 20:43:15,768 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: poll_exit +2024-09-15 20:43:16,001 INFO SenderThread:1051206 [sender.py:send_artifact():1537] sent artifact job-https___github.com_JayParanjape_SVDSAM.git_driver_scratchpad.py - {'id': 'QXJ0aWZhY3Q6MTIyOTY1MDQ5Mw==', 'state': 'COMMITTED', 'artifactSequence': {'id': 'QXJ0aWZhY3RDb2xsZWN0aW9uOjQ1NjQ4NDk4Ng==', 'latestArtifact': {'id': 'QXJ0aWZhY3Q6MTIyOTY1MDQ5Mw==', 'versionIndex': 0}}} +2024-09-15 20:43:16,001 DEBUG SenderThread:1051206 [sender.py:send_request():406] send_request: defer +2024-09-15 20:43:16,001 INFO SenderThread:1051206 [sender.py:send_request_defer():610] handle sender defer: 9 +2024-09-15 20:43:16,001 INFO SenderThread:1051206 [dir_watcher.py:finish():358] shutting down directory watcher +2024-09-15 20:43:16,850 INFO SenderThread:1051206 [dir_watcher.py:finish():388] scan: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_204253-1hswztum/files +2024-09-15 20:43:16,850 INFO SenderThread:1051206 [dir_watcher.py:finish():402] scan save: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_204253-1hswztum/files/wandb-summary.json wandb-summary.json +2024-09-15 20:43:16,851 INFO SenderThread:1051206 [dir_watcher.py:finish():402] scan save: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_204253-1hswztum/files/config.yaml config.yaml +2024-09-15 20:43:16,851 INFO SenderThread:1051206 [dir_watcher.py:finish():402] scan save: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_204253-1hswztum/files/requirements.txt requirements.txt +2024-09-15 20:43:16,853 INFO SenderThread:1051206 [dir_watcher.py:finish():402] scan save: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_204253-1hswztum/files/output.log output.log +2024-09-15 20:43:16,854 INFO SenderThread:1051206 [dir_watcher.py:finish():402] scan save: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_204253-1hswztum/files/wandb-metadata.json wandb-metadata.json +2024-09-15 20:43:16,854 INFO SenderThread:1051206 [dir_watcher.py:finish():402] scan save: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_204253-1hswztum/files/conda-environment.yaml conda-environment.yaml +2024-09-15 20:43:16,855 INFO SenderThread:1051206 [sender.py:transition_state():614] send defer: 10 +2024-09-15 20:43:16,855 DEBUG SenderThread:1051206 [sender.py:send_request():406] send_request: poll_exit +2024-09-15 20:43:16,855 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: defer +2024-09-15 20:43:16,856 INFO HandlerThread:1051206 [handler.py:handle_request_defer():184] handle defer: 10 +2024-09-15 20:43:16,856 DEBUG SenderThread:1051206 [sender.py:send_request():406] send_request: defer +2024-09-15 20:43:16,856 INFO SenderThread:1051206 [sender.py:send_request_defer():610] handle sender defer: 10 +2024-09-15 20:43:16,856 INFO SenderThread:1051206 [file_pusher.py:finish():169] shutting down file pusher +2024-09-15 20:43:17,397 INFO wandb-upload_0:1051206 [upload_job.py:push():130] Uploaded file /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_204253-1hswztum/files/wandb-summary.json +2024-09-15 20:43:17,573 INFO wandb-upload_1:1051206 [upload_job.py:push():130] Uploaded file /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_204253-1hswztum/files/config.yaml +2024-09-15 20:43:17,627 INFO wandb-upload_3:1051206 [upload_job.py:push():130] Uploaded file /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_204253-1hswztum/files/output.log +2024-09-15 20:43:17,769 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: poll_exit +2024-09-15 20:43:17,769 DEBUG SenderThread:1051206 [sender.py:send_request():406] send_request: poll_exit +2024-09-15 20:43:17,832 INFO wandb-upload_2:1051206 [upload_job.py:push():130] Uploaded file /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_204253-1hswztum/files/requirements.txt +2024-09-15 20:43:18,033 INFO Thread-11 (_thread_body):1051206 [sender.py:transition_state():614] send defer: 11 +2024-09-15 20:43:18,033 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: defer +2024-09-15 20:43:18,033 INFO HandlerThread:1051206 [handler.py:handle_request_defer():184] handle defer: 11 +2024-09-15 20:43:18,033 DEBUG SenderThread:1051206 [sender.py:send_request():406] send_request: defer +2024-09-15 20:43:18,033 INFO SenderThread:1051206 [sender.py:send_request_defer():610] handle sender defer: 11 +2024-09-15 20:43:18,033 INFO SenderThread:1051206 [file_pusher.py:join():175] waiting for file pusher +2024-09-15 20:43:18,033 INFO SenderThread:1051206 [sender.py:transition_state():614] send defer: 12 +2024-09-15 20:43:18,034 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: defer +2024-09-15 20:43:18,034 INFO HandlerThread:1051206 [handler.py:handle_request_defer():184] handle defer: 12 +2024-09-15 20:43:18,034 DEBUG SenderThread:1051206 [sender.py:send_request():406] send_request: defer +2024-09-15 20:43:18,034 INFO SenderThread:1051206 [sender.py:send_request_defer():610] handle sender defer: 12 +2024-09-15 20:43:18,034 INFO SenderThread:1051206 [file_stream.py:finish():601] file stream finish called +2024-09-15 20:43:18,266 INFO SenderThread:1051206 [file_stream.py:finish():605] file stream finish is done +2024-09-15 20:43:18,266 INFO SenderThread:1051206 [sender.py:transition_state():614] send defer: 13 +2024-09-15 20:43:18,266 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: defer +2024-09-15 20:43:18,266 INFO HandlerThread:1051206 [handler.py:handle_request_defer():184] handle defer: 13 +2024-09-15 20:43:18,267 DEBUG SenderThread:1051206 [sender.py:send_request():406] send_request: defer +2024-09-15 20:43:18,267 INFO SenderThread:1051206 [sender.py:send_request_defer():610] handle sender defer: 13 +2024-09-15 20:43:18,267 INFO SenderThread:1051206 [sender.py:transition_state():614] send defer: 14 +2024-09-15 20:43:18,267 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: defer +2024-09-15 20:43:18,267 DEBUG SenderThread:1051206 [sender.py:send():379] send: final +2024-09-15 20:43:18,267 INFO HandlerThread:1051206 [handler.py:handle_request_defer():184] handle defer: 14 +2024-09-15 20:43:18,267 DEBUG SenderThread:1051206 [sender.py:send():379] send: footer +2024-09-15 20:43:18,267 DEBUG SenderThread:1051206 [sender.py:send_request():406] send_request: defer +2024-09-15 20:43:18,267 INFO SenderThread:1051206 [sender.py:send_request_defer():610] handle sender defer: 14 +2024-09-15 20:43:18,267 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: poll_exit +2024-09-15 20:43:18,268 DEBUG SenderThread:1051206 [sender.py:send_request():406] send_request: poll_exit +2024-09-15 20:43:18,268 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: server_info +2024-09-15 20:43:18,268 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: poll_exit +2024-09-15 20:43:18,268 DEBUG SenderThread:1051206 [sender.py:send_request():406] send_request: server_info +2024-09-15 20:43:18,269 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: get_summary +2024-09-15 20:43:18,269 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: sampled_history +2024-09-15 20:43:18,269 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: internal_messages +2024-09-15 20:43:18,494 DEBUG SenderThread:1051206 [sender.py:send_request():406] send_request: poll_exit +2024-09-15 20:43:18,495 INFO MainThread:1051206 [wandb_run.py:_footer_history_summary_info():4016] rendering history +2024-09-15 20:43:18,495 INFO MainThread:1051206 [wandb_run.py:_footer_history_summary_info():4048] rendering summary +2024-09-15 20:43:18,495 INFO MainThread:1051206 [wandb_run.py:_footer_sync_info():3975] logging synced files +2024-09-15 20:43:18,495 DEBUG HandlerThread:1051206 [handler.py:handle_request():158] handle_request: shutdown +2024-09-15 20:43:18,495 INFO HandlerThread:1051206 [handler.py:finish():882] shutting down handler +2024-09-15 20:43:19,268 INFO WriterThread:1051206 [datastore.py:close():296] close: /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_204253-1hswztum/run-1hswztum.wandb +2024-09-15 20:43:19,495 INFO SenderThread:1051206 [sender.py:finish():1615] shutting down sender +2024-09-15 20:43:19,495 INFO SenderThread:1051206 [file_pusher.py:finish():169] shutting down file pusher +2024-09-15 20:43:19,495 INFO SenderThread:1051206 [file_pusher.py:join():175] waiting for file pusher diff --git a/AllinonSAM/wandb/run-20240915_204253-1hswztum/logs/debug.log b/AllinonSAM/wandb/run-20240915_204253-1hswztum/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..cc4b12fbe8f025c5a5ee07f53d5bffdb331d4dc9 --- /dev/null +++ b/AllinonSAM/wandb/run-20240915_204253-1hswztum/logs/debug.log @@ -0,0 +1,29 @@ +2024-09-15 20:42:53,230 INFO MainThread:1050760 [wandb_setup.py:_flush():76] Current SDK version is 0.17.5 +2024-09-15 20:42:53,231 INFO MainThread:1050760 [wandb_setup.py:_flush():76] Configure stats pid to 1050760 +2024-09-15 20:42:53,231 INFO MainThread:1050760 [wandb_setup.py:_flush():76] Loading settings from /home/sarim.hashmi/.config/wandb/settings +2024-09-15 20:42:53,231 INFO MainThread:1050760 [wandb_setup.py:_flush():76] Loading settings from /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/settings +2024-09-15 20:42:53,231 INFO MainThread:1050760 [wandb_setup.py:_flush():76] Loading settings from environment variables: {'api_key': '***REDACTED***'} +2024-09-15 20:42:53,231 INFO MainThread:1050760 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-09-15 20:42:53,231 INFO MainThread:1050760 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': 'driver_scratchpad.py', 'program_abspath': '/l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/driver_scratchpad.py', 'program': '/l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/driver_scratchpad.py'} +2024-09-15 20:42:53,231 INFO MainThread:1050760 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-09-15 20:42:53,231 INFO MainThread:1050760 [wandb_init.py:_log_setup():529] Logging user logs to /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_204253-1hswztum/logs/debug.log +2024-09-15 20:42:53,231 INFO MainThread:1050760 [wandb_init.py:_log_setup():530] Logging internal logs to /l/users/sarim.hashmi/for_the_little_interns/SVD_vs_ortho/arcade/SVD/wandb/run-20240915_204253-1hswztum/logs/debug-internal.log +2024-09-15 20:42:53,231 INFO MainThread:1050760 [wandb_init.py:init():569] calling init triggers +2024-09-15 20:42:53,231 INFO MainThread:1050760 [wandb_init.py:init():576] wandb.init called with sweep_config: {} +config: {'learning_rate': 0.0001, 'batch_size': 8, 'num_epochs': 200, 'reg_multiplier': 0} +2024-09-15 20:42:53,231 INFO MainThread:1050760 [wandb_init.py:init():619] starting backend +2024-09-15 20:42:53,231 INFO MainThread:1050760 [wandb_init.py:init():623] setting up manager +2024-09-15 20:42:53,233 INFO MainThread:1050760 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-09-15 20:42:53,234 INFO MainThread:1050760 [wandb_init.py:init():631] backend started and connected +2024-09-15 20:42:53,236 INFO MainThread:1050760 [wandb_init.py:init():720] updated telemetry +2024-09-15 20:42:53,242 INFO MainThread:1050760 [wandb_init.py:init():753] communicating run to backend with 90.0 second timeout +2024-09-15 20:42:53,837 INFO MainThread:1050760 [wandb_run.py:_on_init():2435] communicating current version +2024-09-15 20:42:53,907 INFO MainThread:1050760 [wandb_run.py:_on_init():2444] got version response upgrade_message: "wandb version 0.18.0 is available! To upgrade, please run:\n $ pip install wandb --upgrade" + +2024-09-15 20:42:53,908 INFO MainThread:1050760 [wandb_init.py:init():804] starting run threads in backend +2024-09-15 20:42:54,348 INFO MainThread:1050760 [wandb_run.py:_console_start():2413] atexit reg +2024-09-15 20:42:54,348 INFO MainThread:1050760 [wandb_run.py:_redirect():2255] redirect: wrap_raw +2024-09-15 20:42:54,349 INFO MainThread:1050760 [wandb_run.py:_redirect():2320] Wrapping output streams. +2024-09-15 20:42:54,349 INFO MainThread:1050760 [wandb_run.py:_redirect():2345] Redirects installed. +2024-09-15 20:42:54,350 INFO MainThread:1050760 [wandb_init.py:init():847] run started, returning control to user process +2024-09-15 20:43:19,497 WARNING MsgRouterThr:1050760 [router.py:message_loop():77] message_loop has been closed diff --git a/AllinonSAM/wandb/run-20240915_204253-1hswztum/run-1hswztum.wandb b/AllinonSAM/wandb/run-20240915_204253-1hswztum/run-1hswztum.wandb new file mode 100644 index 0000000000000000000000000000000000000000..5b500c791f3dccc6485fb541c731cac127e3879b Binary files /dev/null and b/AllinonSAM/wandb/run-20240915_204253-1hswztum/run-1hswztum.wandb differ diff --git a/AllinonSAM/wandb/run-20240915_215641-1usjns7w/files/conda-environment.yaml b/AllinonSAM/wandb/run-20240915_215641-1usjns7w/files/conda-environment.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/AllinonSAM/wandb/run-20240915_215641-1usjns7w/files/config.yaml b/AllinonSAM/wandb/run-20240915_215641-1usjns7w/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d889ad9ce52764912e1feffb44d38101d9082589 --- /dev/null +++ b/AllinonSAM/wandb/run-20240915_215641-1usjns7w/files/config.yaml @@ -0,0 +1,45 @@ +wandb_version: 1 + +learning_rate: + desc: null + value: 0.0001 +batch_size: + desc: null + value: 8 +num_epochs: + desc: null + value: 200 +reg_multiplier: + desc: null + value: 0 +_wandb: + desc: null + value: + python_version: 3.12.1 + cli_version: 0.17.5 + framework: torch + is_jupyter_run: false + is_kaggle_kernel: true + start_time: 1726423001 + t: + 1: + - 1 + - 41 + - 55 + - 105 + 2: + - 1 + - 41 + - 55 + - 105 + 3: + - 2 + - 16 + - 23 + - 62 + 4: 3.12.1 + 5: 0.17.5 + 8: + - 2 + - 5 + 13: linux-x86_64 diff --git a/AllinonSAM/wandb/run-20240915_215641-1usjns7w/files/output.log b/AllinonSAM/wandb/run-20240915_215641-1usjns7w/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..b2e5ef86da208e4fd684c89d61f9635c57c036e2 --- /dev/null +++ b/AllinonSAM/wandb/run-20240915_215641-1usjns7w/files/output.log @@ -0,0 +1,5133 @@ +Training parameters: +---------- +number of trainable parameters: 1034496 +batch size: 8 +num epochs: 200 +Epoch 0/199 +---------- + + + + + + + + + + + + + + + + +train Epoch 0: 99%|▉| 124/125 [00:46<00:00, 4.23it/s, loss=0.379, dice=tensor( + + + + + + +val Epoch 0: 90%|▉| 113/125 [00:11<00:01, 10.31it/s, loss=0.758, dice=tensor(2. + +val Loss: 0.7222 Dice: 0.2643 +Epoch 1/199 +---------- + + + + + + + + + + + + + + +train Epoch 1: 98%|▉| 123/125 [00:29<00:00, 4.23it/s, loss=0.359, dice=tensor( + + + + + + +val Epoch 1: 87%|▊| 109/125 [00:11<00:01, 10.40it/s, loss=0.364, dice=tensor(5. +val Loss: 0.3830 Dice: 0.6247 +Epoch 2/199 + +---------- + + + + + + + + + + + + + + +train Epoch 2: 98%|▉| 123/125 [00:29<00:00, 4.22it/s, loss=0.462, dice=tensor( + + + + + + +val Epoch 2: 91%|▉| 114/125 [00:11<00:01, 10.35it/s, loss=0.407, dice=tensor(5. +val Loss: 0.3586 Dice: 0.6460 +Epoch 3/199 + + + + + + + + + + + + + + + +train Epoch 3: 92%|▉| 115/125 [00:28<00:02, 4.18it/s, loss=0.359, dice=tensor( + + + + + + + +val Epoch 3: 93%|▉| 116/125 [00:11<00:00, 10.34it/s, loss=0.406, dice=tensor(5. +val Loss: 0.3500 Dice: 0.6536 +Epoch 4/199 + + + + + + + + + + + + + + + +train Epoch 4: 94%|▉| 117/125 [00:28<00:01, 4.19it/s, loss=0.332, dice=tensor( + + + + + + + +val Epoch 4: 96%|▉| 120/125 [00:12<00:00, 10.42it/s, loss=0.297, dice=tensor(5. +val Loss: 0.3436 Dice: 0.6631 +Epoch 5/199 + + + + + + + + + + + + + + + +train Epoch 5: 94%|▉| 118/125 [00:28<00:01, 4.19it/s, loss=0.374, dice=tensor( + + + + + + +val Epoch 5: 82%|▊| 103/125 [00:10<00:02, 10.34it/s, loss=0.332, dice=tensor(5. + +val Epoch 5: 100%|█| 125/125 [00:12<00:00, 10.57it/s, loss=0.373, dice=tensor(5. +Epoch 6/199 + + + + + + + + + + + + + + + +train Epoch 6: 95%|▉| 119/125 [00:29<00:01, 4.20it/s, loss=0.386, dice=tensor( + + + + + + +val Epoch 6: 86%|▊| 107/125 [00:10<00:01, 10.36it/s, loss=0.293, dice=tensor(5. + +val Loss: 0.3215 Dice: 0.6816 +Epoch 7/199 +---------- + + + + + + + + + + + + + + +train Epoch 7: 97%|▉| 121/125 [00:29<00:00, 4.21it/s, loss=0.32, dice=tensor(5 + + + + + + +val Epoch 7: 87%|▊| 109/125 [00:11<00:01, 10.35it/s, loss=0.296, dice=tensor(5. + +val Loss: 0.3181 Dice: 0.6864 +Epoch 8/199 +---------- + + + + + + + + + + + + + + +train Epoch 8: 98%|▉| 122/125 [00:29<00:00, 4.22it/s, loss=0.39, dice=tensor(5 + + + + + + +val Epoch 8: 88%|▉| 110/125 [00:11<00:01, 10.26it/s, loss=0.386, dice=tensor(5. + +val Loss: 0.3002 Dice: 0.7038 +Epoch 9/199 +---------- + + + + + + + + + + + + + + +train Epoch 9: 98%|▉| 122/125 [00:29<00:00, 4.18it/s, loss=0.334, dice=tensor( + + + + + + +val Epoch 9: 90%|▉| 113/125 [00:11<00:01, 10.30it/s, loss=0.374, dice=tensor(5. + +train Epoch 10: 0%| | 0/125 [00:00 + main_train(data_config, model_config, args.pretrained_path, args.save_path, args.training_strategy, device=args.device) + File "/home/abdelrahman.elsayed/sarim_code/train_baselines.py", line 234, in main_train + model = train_dl( + File "/home/abdelrahman.elsayed/sarim_code/train.py", line 217, in train_dl + outputs, reg_loss = model(inputs, text) + File "/home/abdelrahman.elsayed/.conda/envs/s-sam/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl + return forward_call(*args, **kwargs) + File "/home/abdelrahman.elsayed/sarim_code/axialnet.py", line 711, in forward + return self.soft(self._forward_impl(x)),0 + File "/home/abdelrahman.elsayed/sarim_code/axialnet.py", line 636, in _forward_impl + x1 = self.layer1(x) + File "/home/abdelrahman.elsayed/.conda/envs/s-sam/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl + return forward_call(*args, **kwargs) + File "/home/abdelrahman.elsayed/.conda/envs/s-sam/lib/python3.8/site-packages/torch/nn/modules/container.py", line 217, in forward + input = module(input) + File "/home/abdelrahman.elsayed/.conda/envs/s-sam/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl + return forward_call(*args, **kwargs) + File "/home/abdelrahman.elsayed/sarim_code/axialnet.py", line 331, in forward + out = self.hight_block(out) + File "/home/abdelrahman.elsayed/.conda/envs/s-sam/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl + return forward_call(*args, **kwargs) + File "/home/abdelrahman.elsayed/sarim_code/axialnet.py", line 167, in forward + stacked_similarity = self.bn_similarity(stacked_similarity).view(N * W, 3, self.groups, H, H).sum(dim=1) +torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 2.50 GiB (GPU 0; 23.65 GiB total capacity; 23.03 GiB already allocated; 59.06 MiB free; 23.12 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF \ No newline at end of file diff --git a/AllinonSAM/wandb/run-20241018_211836-zaw8o90f/files/requirements.txt b/AllinonSAM/wandb/run-20241018_211836-zaw8o90f/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..f0720690c2daa9a35ddb12cd902cf9cf8de43d99 --- /dev/null +++ b/AllinonSAM/wandb/run-20241018_211836-zaw8o90f/files/requirements.txt @@ -0,0 +1,188 @@ +absl-py==1.3.0 +addict==2.4.0 +appdirs==1.4.4 +argparse==1.4.0 +asttokens==2.0.5 +backcall==0.2.0 +batchgenerators==0.25 +beautifulsoup4==4.11.1 +cachetools==5.2.0 +certifi==2022.12.7 +chardet==3.0.4 +charset-normalizer==3.1.0 +click==8.1.3 +cmake==3.26.3 +comm==0.1.2 +contextlib2==21.6.0 +contourpy==1.0.7 +crfseg==1.0.0 +cycler==0.11.0 +debugpy==1.5.1 +decorator==5.1.1 +docker-pycreds==0.4.0 +efficientnet-pytorch==0.7.1 +einops==0.8.0 +entrypoints==0.3 +exceptiongroup==1.1.1 +executing==0.8.3 +filelock==3.8.2 +flake8==3.7.9 +flit-core==3.8.0 +fonttools==4.39.3 +ftfy==6.1.1 +future==0.18.2 +gdown==4.6.0 +gensim==4.3.1 +gitdb==4.0.10 +gitpython==3.1.31 +google-auth-oauthlib==0.4.6 +google-auth==2.15.0 +googletrans==3.0.0 +grpcio==1.51.1 +h11==0.9.0 +h2==3.2.0 +h5py==3.8.0 +hpack==3.0.0 +hstspreload==2023.1.1 +httpcore==0.9.1 +httpx==0.13.3 +huggingface-hub==0.11.1 +hyperframe==5.2.0 +idna==2.10 +imageio==2.28.0 +importlib-metadata==5.2.0 +importlib-resources==5.12.0 +iniconfig==2.0.0 +ipykernel==6.19.2 +ipynb-py-convert==0.4.6 +ipython==8.12.0 +isort==4.3.21 +jedi==0.18.1 +jinja2==3.1.2 +joblib==1.2.0 +jupyter-client==8.1.0 +jupyter-core==5.3.0 +kiwisolver==1.4.4 +lazy-loader==0.2 +linecache2==1.0.0 +lit==16.0.3 +littleutils==0.2.2 +llvmlite==0.41.1 +markdown==3.4.1 +markupsafe==2.1.1 +matplotlib-inline==0.1.6 +matplotlib==3.7.1 +mccabe==0.6.1 +ml-collections==0.1.1 +mpmath==1.3.0 +munch==3.0.0 +nest-asyncio==1.5.6 +networkx==3.1 +nibabel==5.1.0 +nltk==3.8.1 +numba==0.58.1 +numpy==1.24.2 +nvidia-cublas-cu11==11.10.3.66 +nvidia-cuda-cupti-cu11==11.7.101 +nvidia-cuda-nvrtc-cu11==11.7.99 +nvidia-cuda-runtime-cu11==11.7.99 +nvidia-cudnn-cu11==8.5.0.96 +nvidia-cufft-cu11==10.9.0.58 +nvidia-curand-cu11==10.2.10.91 +nvidia-cusolver-cu11==11.4.0.1 +nvidia-cusparse-cu11==11.7.4.91 +nvidia-nccl-cu11==2.14.3 +nvidia-nvtx-cu11==11.7.91 +oauthlib==3.2.2 +ogb==1.3.5 +opencv-python==4.6.0.66 +outdated==0.2.2 +packaging==22.0 +pandas==1.5.2 +parso==0.8.3 +pathtools==0.1.2 +pexpect==4.8.0 +pickleshare==0.7.5 +pillow==9.5.0 +pip==23.0.1 +platformdirs==2.5.2 +pluggy==1.0.0 +pretrained-backbones-unet==0.0.1 +pretrainedmodels==0.7.4 +prompt-toolkit==3.0.36 +protobuf==3.20.3 +psutil==5.9.4 +ptyprocess==0.7.0 +pure-eval==0.2.2 +pyasn1-modules==0.2.8 +pyasn1==0.4.8 +pycocotools==2.0.6 +pycodestyle==2.5.0 +pyflakes==2.1.1 +pygments==2.11.2 +pynndescent==0.5.13 +pyparsing==3.0.9 +pysocks==1.7.1 +pytest==7.3.1 +python-dateutil==2.8.2 +pytz==2022.7 +pywavelets==1.4.1 +pyyaml==6.0 +pyzmq==23.2.0 +regex==2022.10.31 +requests-oauthlib==1.3.1 +requests==2.28.2 +rfc3986==1.5.0 +rsa==4.9 +safetensors==0.4.5 +schedulefree==1.2.7 +scikit-image==0.20.0 +scikit-learn==1.2.0 +scipy==1.9.1 +seaborn==0.13.2 +sentry-sdk==1.18.0 +setproctitle==1.3.2 +setuptools==65.6.3 +simpleitk==2.2.1 +six==1.16.0 +smart-open==6.3.0 +smmap==5.0.0 +sniffio==1.3.0 +soupsieve==2.3.2.post1 +stack-data==0.2.0 +supervision==0.3.2 +surface-distance-based-measures==0.1 +sympy==1.12 +tabulate==0.9.0 +tb-nightly==2.12.0a20221225 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +textaugment==1.3.4 +textblob==0.17.1 +threadpoolctl==3.1.0 +tifffile==2023.4.12 +timm==0.6.12 +tokenizers==0.13.3 +tomli==2.0.1 +torch==2.0.1 +torchaudio==2.0.2 +torchvision==0.15.2 +tornado==6.2 +tqdm==4.64.1 +traceback2==1.4.0 +traitlets==5.7.1 +transformers==4.27.4 +triton==2.0.0 +typing-extensions==4.4.0 +umap-learn==0.5.6 +unittest2==1.1.0 +urllib3==1.26.15 +wandb==0.14.0 +wcwidth==0.2.5 +werkzeug==2.2.2 +wget==3.2 +wheel==0.38.4 +wilds==1.2.2 +yacs==0.1.8 +yapf==0.29.0 +zipp==3.11.0 \ No newline at end of file diff --git a/AllinonSAM/wandb/run-20241018_211836-zaw8o90f/files/wandb-metadata.json b/AllinonSAM/wandb/run-20241018_211836-zaw8o90f/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..b7464dbca21e7e9bbd377a06ea135a76dcae5e2a --- /dev/null +++ b/AllinonSAM/wandb/run-20241018_211836-zaw8o90f/files/wandb-metadata.json @@ -0,0 +1,205 @@ +{ + "os": "Linux-5.15.133-ql-generic-13.0-9-x86_64-with-glibc2.17", + "python": "3.8.16", + "heartbeatAt": "2024-10-18T17:18:37.020882", + "startedAt": "2024-10-18T17:18:36.200305", + "docker": null, + "cuda": null, + "args": [], + "state": "running", + "program": "/home/abdelrahman.elsayed/sarim_code/train_baselines.py", + "codePath": "train_baselines.py", + "git": { + "remote": "https://github.com/JayParanjape/SVDSAM.git", + "commit": "5936d0eff64d84fbefed6ecfe4bcc841459c2fc3" + }, + "email": "amra51548@gmail.com", + "root": "/home/abdelrahman.elsayed/sarim_code", + "host": "ws-l6-014", + "username": "abdelrahman.elsayed", + "executable": "/home/abdelrahman.elsayed/.conda/envs/s-sam/bin/python", + "cpu_count": 16, + "cpu_count_logical": 32, + "cpu_freq": { + "current": 3891.1558125, + "min": 2200.0, + "max": 3900.0 + }, + "cpu_freq_per_core": [ + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3616.986, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + } + ], + "disk": { + "total": 1.0, + "used": 0.042255401611328125 + }, + "gpu": "NVIDIA GeForce RTX 4090", + "gpu_count": 1, + "gpu_devices": [ + { + "name": "NVIDIA GeForce RTX 4090", + "memory_total": 25757220864 + } + ], + "memory": { + "total": 62.65230178833008 + } +} diff --git a/AllinonSAM/wandb/run-20241018_211836-zaw8o90f/files/wandb-summary.json b/AllinonSAM/wandb/run-20241018_211836-zaw8o90f/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..e682bae6b5eaeba8295fd0fffdc51474a259249e --- /dev/null +++ b/AllinonSAM/wandb/run-20241018_211836-zaw8o90f/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 5}} \ No newline at end of file diff --git a/AllinonSAM/wandb/run-20241018_211836-zaw8o90f/logs/debug-internal.log b/AllinonSAM/wandb/run-20241018_211836-zaw8o90f/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..c5822e9ecee9e5407059b6ec99c1cfcc67366e4d --- /dev/null +++ b/AllinonSAM/wandb/run-20241018_211836-zaw8o90f/logs/debug-internal.log @@ -0,0 +1,183 @@ +2024-10-18 21:18:36,210 INFO StreamThr :3324483 [internal.py:wandb_internal():87] W&B internal server running at pid: 3324483, started at: 2024-10-18 21:18:36.209404 +2024-10-18 21:18:36,211 DEBUG HandlerThread:3324483 [handler.py:handle_request():144] handle_request: status +2024-10-18 21:18:36,212 INFO WriterThread:3324483 [datastore.py:open_for_write():85] open: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_211836-zaw8o90f/run-zaw8o90f.wandb +2024-10-18 21:18:36,213 DEBUG SenderThread:3324483 [sender.py:send():336] send: header +2024-10-18 21:18:36,258 DEBUG SenderThread:3324483 [sender.py:send():336] send: run +2024-10-18 21:18:36,881 INFO SenderThread:3324483 [dir_watcher.py:__init__():219] watching files in: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_211836-zaw8o90f/files +2024-10-18 21:18:36,881 INFO SenderThread:3324483 [sender.py:_start_run_threads():1078] run started: zaw8o90f with start time 1729271916.208827 +2024-10-18 21:18:36,881 DEBUG SenderThread:3324483 [sender.py:send_request():363] send_request: summary_record +2024-10-18 21:18:36,882 INFO SenderThread:3324483 [sender.py:_save_file():1332] saving file wandb-summary.json with policy end +2024-10-18 21:18:36,883 DEBUG HandlerThread:3324483 [handler.py:handle_request():144] handle_request: check_version +2024-10-18 21:18:36,883 DEBUG SenderThread:3324483 [sender.py:send_request():363] send_request: check_version +2024-10-18 21:18:36,947 DEBUG HandlerThread:3324483 [handler.py:handle_request():144] handle_request: run_start +2024-10-18 21:18:36,979 DEBUG HandlerThread:3324483 [system_info.py:__init__():31] System info init +2024-10-18 21:18:36,979 DEBUG HandlerThread:3324483 [system_info.py:__init__():46] System info init done +2024-10-18 21:18:36,979 INFO HandlerThread:3324483 [system_monitor.py:start():183] Starting system monitor +2024-10-18 21:18:36,979 INFO SystemMonitor:3324483 [system_monitor.py:_start():147] Starting system asset monitoring threads +2024-10-18 21:18:36,980 INFO HandlerThread:3324483 [system_monitor.py:probe():204] Collecting system info +2024-10-18 21:18:36,980 INFO SystemMonitor:3324483 [interfaces.py:start():187] Started cpu monitoring +2024-10-18 21:18:36,980 INFO SystemMonitor:3324483 [interfaces.py:start():187] Started disk monitoring +2024-10-18 21:18:36,981 INFO SystemMonitor:3324483 [interfaces.py:start():187] Started gpu monitoring +2024-10-18 21:18:36,981 INFO SystemMonitor:3324483 [interfaces.py:start():187] Started memory monitoring +2024-10-18 21:18:36,981 INFO SystemMonitor:3324483 [interfaces.py:start():187] Started network monitoring +2024-10-18 21:18:37,020 DEBUG HandlerThread:3324483 [system_info.py:probe():195] Probing system +2024-10-18 21:18:37,027 DEBUG HandlerThread:3324483 [system_info.py:_probe_git():180] Probing git +2024-10-18 21:18:37,042 DEBUG HandlerThread:3324483 [system_info.py:_probe_git():188] Probing git done +2024-10-18 21:18:37,042 DEBUG HandlerThread:3324483 [system_info.py:probe():240] Probing system done +2024-10-18 21:18:37,042 DEBUG HandlerThread:3324483 [system_monitor.py:probe():213] {'os': 'Linux-5.15.133-ql-generic-13.0-9-x86_64-with-glibc2.17', 'python': '3.8.16', 'heartbeatAt': '2024-10-18T17:18:37.020882', 'startedAt': '2024-10-18T17:18:36.200305', 'docker': None, 'cuda': None, 'args': (), 'state': 'running', 'program': '/home/abdelrahman.elsayed/sarim_code/train_baselines.py', 'codePath': 'train_baselines.py', 'git': {'remote': 'https://github.com/JayParanjape/SVDSAM.git', 'commit': '5936d0eff64d84fbefed6ecfe4bcc841459c2fc3'}, 'email': 'amra51548@gmail.com', 'root': '/home/abdelrahman.elsayed/sarim_code', 'host': 'ws-l6-014', 'username': 'abdelrahman.elsayed', 'executable': '/home/abdelrahman.elsayed/.conda/envs/s-sam/bin/python', 'cpu_count': 16, 'cpu_count_logical': 32, 'cpu_freq': {'current': 3891.1558125, 'min': 2200.0, 'max': 3900.0}, 'cpu_freq_per_core': [{'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3616.986, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}], 'disk': {'total': 1.0, 'used': 0.042255401611328125}, 'gpu': 'NVIDIA GeForce RTX 4090', 'gpu_count': 1, 'gpu_devices': [{'name': 'NVIDIA GeForce RTX 4090', 'memory_total': 25757220864}], 'memory': {'total': 62.65230178833008}} +2024-10-18 21:18:37,042 INFO HandlerThread:3324483 [system_monitor.py:probe():214] Finished collecting system info +2024-10-18 21:18:37,042 INFO HandlerThread:3324483 [system_monitor.py:probe():217] Publishing system info +2024-10-18 21:18:37,043 DEBUG HandlerThread:3324483 [system_info.py:_save_pip():51] Saving list of pip packages installed into the current environment +2024-10-18 21:18:37,044 DEBUG HandlerThread:3324483 [system_info.py:_save_pip():67] Saving pip packages done +2024-10-18 21:18:37,044 DEBUG HandlerThread:3324483 [system_info.py:_save_conda():74] Saving list of conda packages installed into the current environment +2024-10-18 21:18:37,882 INFO Thread-13 :3324483 [dir_watcher.py:_on_file_created():278] file/dir created: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_211836-zaw8o90f/files/wandb-summary.json +2024-10-18 21:18:37,883 INFO Thread-13 :3324483 [dir_watcher.py:_on_file_created():278] file/dir created: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_211836-zaw8o90f/files/conda-environment.yaml +2024-10-18 21:18:37,883 INFO Thread-13 :3324483 [dir_watcher.py:_on_file_created():278] file/dir created: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_211836-zaw8o90f/files/requirements.txt +2024-10-18 21:18:39,884 INFO Thread-13 :3324483 [dir_watcher.py:_on_file_modified():295] file/dir modified: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_211836-zaw8o90f/files/conda-environment.yaml +2024-10-18 21:18:39,989 DEBUG HandlerThread:3324483 [system_info.py:_save_conda():86] Saving conda packages done +2024-10-18 21:18:39,990 INFO HandlerThread:3324483 [system_monitor.py:probe():219] Finished publishing system info +2024-10-18 21:18:39,999 DEBUG SenderThread:3324483 [sender.py:send():336] send: files +2024-10-18 21:18:39,999 INFO SenderThread:3324483 [sender.py:_save_file():1332] saving file wandb-metadata.json with policy now +2024-10-18 21:18:40,004 DEBUG SenderThread:3324483 [sender.py:send():336] send: telemetry +2024-10-18 21:18:40,055 DEBUG HandlerThread:3324483 [handler.py:handle_request():144] handle_request: stop_status +2024-10-18 21:18:40,057 DEBUG SenderThread:3324483 [sender.py:send_request():363] send_request: stop_status +2024-10-18 21:18:40,878 INFO wandb-upload_0:3324483 [upload_job.py:push():138] Uploaded file /tmp/slurm-abdelrahman.elsayed-44776/tmpr1e9vgnjwandb/dq7zkk3y-wandb-metadata.json +2024-10-18 21:18:40,885 INFO Thread-13 :3324483 [dir_watcher.py:_on_file_created():278] file/dir created: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_211836-zaw8o90f/files/wandb-metadata.json +2024-10-18 21:18:40,886 INFO Thread-13 :3324483 [dir_watcher.py:_on_file_created():278] file/dir created: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_211836-zaw8o90f/files/output.log +2024-10-18 21:18:41,373 DEBUG HandlerThread:3324483 [handler.py:handle_request():144] handle_request: status_report +2024-10-18 21:18:41,985 DEBUG SenderThread:3324483 [sender.py:send():336] send: exit +2024-10-18 21:18:41,985 INFO SenderThread:3324483 [sender.py:send_exit():559] handling exit code: 1 +2024-10-18 21:18:41,985 INFO SenderThread:3324483 [sender.py:send_exit():561] handling runtime: 5 +2024-10-18 21:18:41,987 INFO SenderThread:3324483 [sender.py:_save_file():1332] saving file wandb-summary.json with policy end +2024-10-18 21:18:41,987 INFO SenderThread:3324483 [sender.py:send_exit():567] send defer +2024-10-18 21:18:41,987 DEBUG HandlerThread:3324483 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:18:41,987 INFO HandlerThread:3324483 [handler.py:handle_request_defer():170] handle defer: 0 +2024-10-18 21:18:41,987 DEBUG SenderThread:3324483 [sender.py:send_request():363] send_request: defer +2024-10-18 21:18:41,987 INFO SenderThread:3324483 [sender.py:send_request_defer():583] handle sender defer: 0 +2024-10-18 21:18:41,987 INFO SenderThread:3324483 [sender.py:transition_state():587] send defer: 1 +2024-10-18 21:18:41,987 DEBUG HandlerThread:3324483 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:18:41,987 INFO HandlerThread:3324483 [handler.py:handle_request_defer():170] handle defer: 1 +2024-10-18 21:18:41,987 DEBUG SenderThread:3324483 [sender.py:send_request():363] send_request: defer +2024-10-18 21:18:41,987 INFO SenderThread:3324483 [sender.py:send_request_defer():583] handle sender defer: 1 +2024-10-18 21:18:41,987 INFO SenderThread:3324483 [sender.py:transition_state():587] send defer: 2 +2024-10-18 21:18:41,987 DEBUG HandlerThread:3324483 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:18:41,987 INFO HandlerThread:3324483 [handler.py:handle_request_defer():170] handle defer: 2 +2024-10-18 21:18:41,987 INFO HandlerThread:3324483 [system_monitor.py:finish():193] Stopping system monitor +2024-10-18 21:18:41,988 DEBUG SystemMonitor:3324483 [system_monitor.py:_start():161] Starting system metrics aggregation loop +2024-10-18 21:18:41,988 INFO HandlerThread:3324483 [interfaces.py:finish():199] Joined cpu monitor +2024-10-18 21:18:41,988 DEBUG SystemMonitor:3324483 [system_monitor.py:_start():168] Finished system metrics aggregation loop +2024-10-18 21:18:41,988 DEBUG SystemMonitor:3324483 [system_monitor.py:_start():172] Publishing last batch of metrics +2024-10-18 21:18:41,989 INFO HandlerThread:3324483 [interfaces.py:finish():199] Joined disk monitor +2024-10-18 21:18:42,023 INFO HandlerThread:3324483 [interfaces.py:finish():199] Joined gpu monitor +2024-10-18 21:18:42,023 INFO HandlerThread:3324483 [interfaces.py:finish():199] Joined memory monitor +2024-10-18 21:18:42,023 INFO HandlerThread:3324483 [interfaces.py:finish():199] Joined network monitor +2024-10-18 21:18:42,024 DEBUG SenderThread:3324483 [sender.py:send_request():363] send_request: defer +2024-10-18 21:18:42,024 INFO SenderThread:3324483 [sender.py:send_request_defer():583] handle sender defer: 2 +2024-10-18 21:18:42,024 INFO SenderThread:3324483 [sender.py:transition_state():587] send defer: 3 +2024-10-18 21:18:42,024 DEBUG SenderThread:3324483 [sender.py:send():336] send: stats +2024-10-18 21:18:42,024 DEBUG HandlerThread:3324483 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:18:42,024 INFO HandlerThread:3324483 [handler.py:handle_request_defer():170] handle defer: 3 +2024-10-18 21:18:42,024 DEBUG SenderThread:3324483 [sender.py:send_request():363] send_request: defer +2024-10-18 21:18:42,024 INFO SenderThread:3324483 [sender.py:send_request_defer():583] handle sender defer: 3 +2024-10-18 21:18:42,024 INFO SenderThread:3324483 [sender.py:transition_state():587] send defer: 4 +2024-10-18 21:18:42,024 DEBUG HandlerThread:3324483 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:18:42,024 INFO HandlerThread:3324483 [handler.py:handle_request_defer():170] handle defer: 4 +2024-10-18 21:18:42,024 DEBUG SenderThread:3324483 [sender.py:send_request():363] send_request: defer +2024-10-18 21:18:42,024 INFO SenderThread:3324483 [sender.py:send_request_defer():583] handle sender defer: 4 +2024-10-18 21:18:42,024 INFO SenderThread:3324483 [sender.py:transition_state():587] send defer: 5 +2024-10-18 21:18:42,025 DEBUG HandlerThread:3324483 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:18:42,025 INFO HandlerThread:3324483 [handler.py:handle_request_defer():170] handle defer: 5 +2024-10-18 21:18:42,025 DEBUG SenderThread:3324483 [sender.py:send():336] send: summary +2024-10-18 21:18:42,025 INFO SenderThread:3324483 [sender.py:_save_file():1332] saving file wandb-summary.json with policy end +2024-10-18 21:18:42,025 DEBUG SenderThread:3324483 [sender.py:send_request():363] send_request: defer +2024-10-18 21:18:42,025 INFO SenderThread:3324483 [sender.py:send_request_defer():583] handle sender defer: 5 +2024-10-18 21:18:42,025 INFO SenderThread:3324483 [sender.py:transition_state():587] send defer: 6 +2024-10-18 21:18:42,025 DEBUG HandlerThread:3324483 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:18:42,025 INFO HandlerThread:3324483 [handler.py:handle_request_defer():170] handle defer: 6 +2024-10-18 21:18:42,025 DEBUG SenderThread:3324483 [sender.py:send_request():363] send_request: defer +2024-10-18 21:18:42,026 INFO SenderThread:3324483 [sender.py:send_request_defer():583] handle sender defer: 6 +2024-10-18 21:18:42,028 DEBUG HandlerThread:3324483 [handler.py:handle_request():144] handle_request: status_report +2024-10-18 21:18:42,312 INFO SenderThread:3324483 [sender.py:transition_state():587] send defer: 7 +2024-10-18 21:18:42,312 DEBUG HandlerThread:3324483 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:18:42,312 INFO HandlerThread:3324483 [handler.py:handle_request_defer():170] handle defer: 7 +2024-10-18 21:18:42,312 DEBUG SenderThread:3324483 [sender.py:send_request():363] send_request: defer +2024-10-18 21:18:42,312 INFO SenderThread:3324483 [sender.py:send_request_defer():583] handle sender defer: 7 +2024-10-18 21:18:42,887 INFO Thread-13 :3324483 [dir_watcher.py:_on_file_modified():295] file/dir modified: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_211836-zaw8o90f/files/config.yaml +2024-10-18 21:18:42,887 INFO Thread-13 :3324483 [dir_watcher.py:_on_file_modified():295] file/dir modified: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_211836-zaw8o90f/files/wandb-summary.json +2024-10-18 21:18:42,888 INFO Thread-13 :3324483 [dir_watcher.py:_on_file_modified():295] file/dir modified: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_211836-zaw8o90f/files/output.log +2024-10-18 21:18:42,986 DEBUG HandlerThread:3324483 [handler.py:handle_request():144] handle_request: poll_exit +2024-10-18 21:18:44,380 INFO SenderThread:3324483 [sender.py:transition_state():587] send defer: 8 +2024-10-18 21:18:44,380 DEBUG SenderThread:3324483 [sender.py:send_request():363] send_request: poll_exit +2024-10-18 21:18:44,380 DEBUG HandlerThread:3324483 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:18:44,380 INFO HandlerThread:3324483 [handler.py:handle_request_defer():170] handle defer: 8 +2024-10-18 21:18:44,381 DEBUG SenderThread:3324483 [sender.py:send_request():363] send_request: defer +2024-10-18 21:18:44,381 INFO SenderThread:3324483 [sender.py:send_request_defer():583] handle sender defer: 8 +2024-10-18 21:18:44,875 INFO SenderThread:3324483 [sender.py:transition_state():587] send defer: 9 +2024-10-18 21:18:44,875 DEBUG HandlerThread:3324483 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:18:44,875 INFO HandlerThread:3324483 [handler.py:handle_request_defer():170] handle defer: 9 +2024-10-18 21:18:44,875 DEBUG SenderThread:3324483 [sender.py:send():336] send: artifact +2024-10-18 21:18:44,889 INFO Thread-13 :3324483 [dir_watcher.py:_on_file_modified():295] file/dir modified: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_211836-zaw8o90f/files/output.log +2024-10-18 21:18:46,452 INFO wandb-upload_0:3324483 [upload_job.py:push():93] Skipped uploading /home/abdelrahman.elsayed/.local/share/wandb/artifacts/staging/tmpbnaalgbu +2024-10-18 21:18:46,871 INFO wandb-upload_1:3324483 [upload_job.py:push():96] Uploaded file /home/abdelrahman.elsayed/.local/share/wandb/artifacts/staging/tmp_m4mjgxn +2024-10-18 21:18:47,989 DEBUG HandlerThread:3324483 [handler.py:handle_request():144] handle_request: keepalive +2024-10-18 21:18:48,710 INFO SenderThread:3324483 [sender.py:send_artifact():1428] sent artifact job-https___github.com_JayParanjape_SVDSAM.git_train_baselines.py - {'id': 'QXJ0aWZhY3Q6MTI4NTc5Njg5Ng==', 'digest': '5f8b773fe3bdf0ea6bd47c21f45cc4fb', 'state': 'PENDING', 'aliases': [], 'artifactSequence': {'id': 'QXJ0aWZhY3RDb2xsZWN0aW9uOjQ3ODg1ODkyMQ==', 'latestArtifact': {'id': 'QXJ0aWZhY3Q6MTI4NTQ4OTAyOA==', 'versionIndex': 0}}, 'version': 'latest'} +2024-10-18 21:18:48,710 DEBUG SenderThread:3324483 [sender.py:send_request():363] send_request: defer +2024-10-18 21:18:48,710 INFO SenderThread:3324483 [sender.py:send_request_defer():583] handle sender defer: 9 +2024-10-18 21:18:48,710 INFO SenderThread:3324483 [dir_watcher.py:finish():365] shutting down directory watcher +2024-10-18 21:18:48,710 DEBUG HandlerThread:3324483 [handler.py:handle_request():144] handle_request: status_report +2024-10-18 21:18:48,891 INFO SenderThread:3324483 [dir_watcher.py:finish():395] scan: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_211836-zaw8o90f/files +2024-10-18 21:18:48,891 INFO SenderThread:3324483 [dir_watcher.py:finish():409] scan save: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_211836-zaw8o90f/files/wandb-summary.json wandb-summary.json +2024-10-18 21:18:48,891 INFO SenderThread:3324483 [dir_watcher.py:finish():409] scan save: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_211836-zaw8o90f/files/output.log output.log +2024-10-18 21:18:48,892 INFO SenderThread:3324483 [dir_watcher.py:finish():409] scan save: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_211836-zaw8o90f/files/requirements.txt requirements.txt +2024-10-18 21:18:48,893 INFO SenderThread:3324483 [dir_watcher.py:finish():409] scan save: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_211836-zaw8o90f/files/config.yaml config.yaml +2024-10-18 21:18:48,895 INFO SenderThread:3324483 [dir_watcher.py:finish():409] scan save: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_211836-zaw8o90f/files/wandb-metadata.json wandb-metadata.json +2024-10-18 21:18:48,895 INFO SenderThread:3324483 [dir_watcher.py:finish():409] scan save: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_211836-zaw8o90f/files/conda-environment.yaml conda-environment.yaml +2024-10-18 21:18:48,898 INFO SenderThread:3324483 [sender.py:transition_state():587] send defer: 10 +2024-10-18 21:18:48,898 DEBUG HandlerThread:3324483 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:18:48,899 INFO HandlerThread:3324483 [handler.py:handle_request_defer():170] handle defer: 10 +2024-10-18 21:18:48,902 DEBUG SenderThread:3324483 [sender.py:send_request():363] send_request: defer +2024-10-18 21:18:48,902 INFO SenderThread:3324483 [sender.py:send_request_defer():583] handle sender defer: 10 +2024-10-18 21:18:48,902 INFO SenderThread:3324483 [file_pusher.py:finish():164] shutting down file pusher +2024-10-18 21:18:49,510 INFO wandb-upload_0:3324483 [upload_job.py:push():138] Uploaded file /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_211836-zaw8o90f/files/wandb-summary.json +2024-10-18 21:18:49,677 INFO wandb-upload_1:3324483 [upload_job.py:push():138] Uploaded file /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_211836-zaw8o90f/files/output.log +2024-10-18 21:18:49,720 INFO wandb-upload_4:3324483 [upload_job.py:push():138] Uploaded file /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_211836-zaw8o90f/files/conda-environment.yaml +2024-10-18 21:18:49,735 INFO wandb-upload_3:3324483 [upload_job.py:push():138] Uploaded file /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_211836-zaw8o90f/files/config.yaml +2024-10-18 21:18:49,770 INFO wandb-upload_2:3324483 [upload_job.py:push():138] Uploaded file /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_211836-zaw8o90f/files/requirements.txt +2024-10-18 21:18:49,970 INFO Thread-12 :3324483 [sender.py:transition_state():587] send defer: 11 +2024-10-18 21:18:49,971 DEBUG HandlerThread:3324483 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:18:49,971 INFO HandlerThread:3324483 [handler.py:handle_request_defer():170] handle defer: 11 +2024-10-18 21:18:49,971 DEBUG SenderThread:3324483 [sender.py:send_request():363] send_request: defer +2024-10-18 21:18:49,971 INFO SenderThread:3324483 [sender.py:send_request_defer():583] handle sender defer: 11 +2024-10-18 21:18:49,971 INFO SenderThread:3324483 [file_pusher.py:join():169] waiting for file pusher +2024-10-18 21:18:49,971 INFO SenderThread:3324483 [sender.py:transition_state():587] send defer: 12 +2024-10-18 21:18:49,971 DEBUG HandlerThread:3324483 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:18:49,971 INFO HandlerThread:3324483 [handler.py:handle_request_defer():170] handle defer: 12 +2024-10-18 21:18:49,972 DEBUG SenderThread:3324483 [sender.py:send_request():363] send_request: defer +2024-10-18 21:18:49,972 INFO SenderThread:3324483 [sender.py:send_request_defer():583] handle sender defer: 12 +2024-10-18 21:18:50,235 INFO SenderThread:3324483 [sender.py:transition_state():587] send defer: 13 +2024-10-18 21:18:50,235 DEBUG HandlerThread:3324483 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:18:50,235 INFO HandlerThread:3324483 [handler.py:handle_request_defer():170] handle defer: 13 +2024-10-18 21:18:50,235 DEBUG SenderThread:3324483 [sender.py:send_request():363] send_request: defer +2024-10-18 21:18:50,235 INFO SenderThread:3324483 [sender.py:send_request_defer():583] handle sender defer: 13 +2024-10-18 21:18:50,236 INFO SenderThread:3324483 [sender.py:transition_state():587] send defer: 14 +2024-10-18 21:18:50,236 DEBUG HandlerThread:3324483 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:18:50,236 INFO HandlerThread:3324483 [handler.py:handle_request_defer():170] handle defer: 14 +2024-10-18 21:18:50,236 DEBUG SenderThread:3324483 [sender.py:send():336] send: final +2024-10-18 21:18:50,236 DEBUG SenderThread:3324483 [sender.py:send():336] send: footer +2024-10-18 21:18:50,236 DEBUG SenderThread:3324483 [sender.py:send_request():363] send_request: defer +2024-10-18 21:18:50,236 INFO SenderThread:3324483 [sender.py:send_request_defer():583] handle sender defer: 14 +2024-10-18 21:18:50,236 DEBUG HandlerThread:3324483 [handler.py:handle_request():144] handle_request: poll_exit +2024-10-18 21:18:50,237 DEBUG SenderThread:3324483 [sender.py:send_request():363] send_request: poll_exit +2024-10-18 21:18:50,237 DEBUG HandlerThread:3324483 [handler.py:handle_request():144] handle_request: server_info +2024-10-18 21:18:50,237 DEBUG HandlerThread:3324483 [handler.py:handle_request():144] handle_request: get_summary +2024-10-18 21:18:50,237 DEBUG SenderThread:3324483 [sender.py:send_request():363] send_request: server_info +2024-10-18 21:18:50,239 DEBUG HandlerThread:3324483 [handler.py:handle_request():144] handle_request: sampled_history +2024-10-18 21:18:50,498 INFO MainThread:3324483 [wandb_run.py:_footer_history_summary_info():3422] rendering history +2024-10-18 21:18:50,498 INFO MainThread:3324483 [wandb_run.py:_footer_history_summary_info():3454] rendering summary +2024-10-18 21:18:50,498 INFO MainThread:3324483 [wandb_run.py:_footer_sync_info():3380] logging synced files +2024-10-18 21:18:50,498 DEBUG HandlerThread:3324483 [handler.py:handle_request():144] handle_request: shutdown +2024-10-18 21:18:50,498 INFO HandlerThread:3324483 [handler.py:finish():842] shutting down handler +2024-10-18 21:18:51,237 INFO WriterThread:3324483 [datastore.py:close():298] close: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_211836-zaw8o90f/run-zaw8o90f.wandb +2024-10-18 21:18:51,498 INFO SenderThread:3324483 [sender.py:finish():1504] shutting down sender +2024-10-18 21:18:51,498 INFO SenderThread:3324483 [file_pusher.py:finish():164] shutting down file pusher +2024-10-18 21:18:51,498 INFO SenderThread:3324483 [file_pusher.py:join():169] waiting for file pusher diff --git a/AllinonSAM/wandb/run-20241018_211836-zaw8o90f/logs/debug.log b/AllinonSAM/wandb/run-20241018_211836-zaw8o90f/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..5f6624b68bc2f642cb59d31bc7bec5d38efe6e35 --- /dev/null +++ b/AllinonSAM/wandb/run-20241018_211836-zaw8o90f/logs/debug.log @@ -0,0 +1,27 @@ +2024-10-18 21:18:36,205 INFO MainThread:3324429 [wandb_setup.py:_flush():76] Configure stats pid to 3324429 +2024-10-18 21:18:36,205 INFO MainThread:3324429 [wandb_setup.py:_flush():76] Loading settings from /home/abdelrahman.elsayed/.config/wandb/settings +2024-10-18 21:18:36,205 INFO MainThread:3324429 [wandb_setup.py:_flush():76] Loading settings from /home/abdelrahman.elsayed/sarim_code/wandb/settings +2024-10-18 21:18:36,205 INFO MainThread:3324429 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-10-18 21:18:36,205 INFO MainThread:3324429 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-10-18 21:18:36,205 INFO MainThread:3324429 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': 'train_baselines.py', 'program': '/home/abdelrahman.elsayed/sarim_code/train_baselines.py'} +2024-10-18 21:18:36,205 INFO MainThread:3324429 [wandb_init.py:_log_setup():506] Logging user logs to /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_211836-zaw8o90f/logs/debug.log +2024-10-18 21:18:36,205 INFO MainThread:3324429 [wandb_init.py:_log_setup():507] Logging internal logs to /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_211836-zaw8o90f/logs/debug-internal.log +2024-10-18 21:18:36,205 INFO MainThread:3324429 [wandb_init.py:init():546] calling init triggers +2024-10-18 21:18:36,205 INFO MainThread:3324429 [wandb_init.py:init():552] wandb.init called with sweep_config: {} +config: {'learning_rate': 0.0001, 'batch_size': 5, 'num_epochs': 500, 'reg_multiplier': 0.01} +2024-10-18 21:18:36,205 INFO MainThread:3324429 [wandb_init.py:init():602] starting backend +2024-10-18 21:18:36,205 INFO MainThread:3324429 [wandb_init.py:init():606] setting up manager +2024-10-18 21:18:36,207 INFO MainThread:3324429 [backend.py:_multiprocessing_setup():106] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-10-18 21:18:36,208 INFO MainThread:3324429 [wandb_init.py:init():613] backend started and connected +2024-10-18 21:18:36,210 INFO MainThread:3324429 [wandb_init.py:init():701] updated telemetry +2024-10-18 21:18:36,257 INFO MainThread:3324429 [wandb_init.py:init():741] communicating run to backend with 60.0 second timeout +2024-10-18 21:18:36,883 INFO MainThread:3324429 [wandb_run.py:_on_init():2133] communicating current version +2024-10-18 21:18:36,943 INFO MainThread:3324429 [wandb_run.py:_on_init():2142] got version response upgrade_message: "wandb version 0.18.5 is available! To upgrade, please run:\n $ pip install wandb --upgrade" + +2024-10-18 21:18:36,943 INFO MainThread:3324429 [wandb_init.py:init():789] starting run threads in backend +2024-10-18 21:18:40,003 INFO MainThread:3324429 [wandb_run.py:_console_start():2114] atexit reg +2024-10-18 21:18:40,003 INFO MainThread:3324429 [wandb_run.py:_redirect():1969] redirect: SettingsConsole.WRAP_RAW +2024-10-18 21:18:40,003 INFO MainThread:3324429 [wandb_run.py:_redirect():2034] Wrapping output streams. +2024-10-18 21:18:40,003 INFO MainThread:3324429 [wandb_run.py:_redirect():2059] Redirects installed. +2024-10-18 21:18:40,004 INFO MainThread:3324429 [wandb_init.py:init():831] run started, returning control to user process +2024-10-18 21:18:51,500 WARNING MsgRouterThr:3324429 [router.py:message_loop():77] message_loop has been closed diff --git a/AllinonSAM/wandb/run-20241018_211836-zaw8o90f/run-zaw8o90f.wandb b/AllinonSAM/wandb/run-20241018_211836-zaw8o90f/run-zaw8o90f.wandb new file mode 100644 index 0000000000000000000000000000000000000000..a04ed12fc9454ad42547c8813f4d538ae7ab4c0d Binary files /dev/null and b/AllinonSAM/wandb/run-20241018_211836-zaw8o90f/run-zaw8o90f.wandb differ diff --git a/AllinonSAM/wandb/run-20241018_212018-p8w61ip6/files/conda-environment.yaml b/AllinonSAM/wandb/run-20241018_212018-p8w61ip6/files/conda-environment.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7a029db1357b890c0decfa95cf08b71a66c531d9 --- /dev/null +++ b/AllinonSAM/wandb/run-20241018_212018-p8w61ip6/files/conda-environment.yaml @@ -0,0 +1,267 @@ +name: s-sam +channels: + - pytorch + - nvidia + - defaults +dependencies: + - _libgcc_mutex=0.1=main + - _openmp_mutex=5.1=1_gnu + - asttokens=2.0.5=pyhd3eb1b0_0 + - backcall=0.2.0=pyhd3eb1b0_0 + - ca-certificates=2023.01.10=h06a4308_0 + - certifi=2022.12.7=py38h06a4308_0 + - comm=0.1.2=py38h06a4308_0 + - cuda=11.7.1=0 + - cuda-cccl=11.7.91=0 + - cuda-command-line-tools=11.7.1=0 + - cuda-compiler=11.7.1=0 + - cuda-cudart=11.7.99=0 + - cuda-cudart-dev=11.7.99=0 + - cuda-cuobjdump=11.7.91=0 + - cuda-cupti=11.7.101=0 + - cuda-cuxxfilt=11.7.91=0 + - cuda-demo-suite=12.0.76=0 + - cuda-documentation=12.0.76=0 + - cuda-driver-dev=11.7.99=0 + - cuda-gdb=12.0.90=0 + - cuda-libraries=11.7.1=0 + - cuda-libraries-dev=11.7.1=0 + - cuda-memcheck=11.8.86=0 + - cuda-nsight=12.0.78=0 + - cuda-nsight-compute=12.0.0=0 + - cuda-nvcc=11.7.99=0 + - cuda-nvdisasm=12.0.76=0 + - cuda-nvml-dev=11.7.91=0 + - cuda-nvprof=12.0.90=0 + - cuda-nvprune=11.7.91=0 + - cuda-nvrtc=11.7.99=0 + - cuda-nvrtc-dev=11.7.99=0 + - cuda-nvtx=11.7.91=0 + - cuda-nvvp=12.0.90=0 + - cuda-runtime=11.7.1=0 + - cuda-sanitizer-api=12.0.90=0 + - cuda-toolkit=11.7.1=0 + - cuda-tools=11.7.1=0 + - cuda-visual-tools=11.7.1=0 + - cudatoolkit=11.0.221=h6bb024c_0 + - debugpy=1.5.1=py38h295c915_0 + - decorator=5.1.1=pyhd3eb1b0_0 + - executing=0.8.3=pyhd3eb1b0_0 + - flit-core=3.8.0=py38h06a4308_0 + - gds-tools=1.5.0.59=0 + - importlib_metadata=6.0.0=hd3eb1b0_0 + - ipykernel=6.19.2=py38hb070fc8_0 + - ipython=8.12.0=py38h06a4308_0 + - jedi=0.18.1=py38h06a4308_1 + - jupyter_client=8.1.0=py38h06a4308_0 + - jupyter_core=5.3.0=py38h06a4308_0 + - ld_impl_linux-64=2.38=h1181459_1 + - libcublas=11.10.3.66=0 + - libcublas-dev=11.10.3.66=0 + - libcufft=10.7.2.124=h4fbf590_0 + - libcufft-dev=10.7.2.124=h98a8f43_0 + - libcufile=1.5.0.59=0 + - libcufile-dev=1.5.0.59=0 + - libcurand=10.3.1.50=0 + - libcurand-dev=10.3.1.50=0 + - libcusolver=11.4.0.1=0 + - libcusolver-dev=11.4.0.1=0 + - libcusparse=11.7.4.91=0 + - libcusparse-dev=11.7.4.91=0 + - libffi=3.4.2=h6a678d5_6 + - libgcc-ng=11.2.0=h1234567_1 + - libgomp=11.2.0=h1234567_1 + - libnpp=11.7.4.75=0 + - libnpp-dev=11.7.4.75=0 + - libnvjpeg=11.8.0.2=0 + - libnvjpeg-dev=11.8.0.2=0 + - libsodium=1.0.18=h7b6447c_0 + - libstdcxx-ng=11.2.0=h1234567_1 + - matplotlib-inline=0.1.6=py38h06a4308_0 + - ncurses=6.4=h6a678d5_0 + - nest-asyncio=1.5.6=py38h06a4308_0 + - nsight-compute=2022.4.0.15=0 + - openssl=1.1.1t=h7f8727e_0 + - parso=0.8.3=pyhd3eb1b0_0 + - pexpect=4.8.0=pyhd3eb1b0_3 + - pickleshare=0.7.5=pyhd3eb1b0_1003 + - pip=23.0.1=py38h06a4308_0 + - platformdirs=2.5.2=py38h06a4308_0 + - prompt-toolkit=3.0.36=py38h06a4308_0 + - ptyprocess=0.7.0=pyhd3eb1b0_2 + - pure_eval=0.2.2=pyhd3eb1b0_0 + - pygments=2.11.2=pyhd3eb1b0_0 + - python=3.8.16=h7a1cb2a_3 + - python-dateutil=2.8.2=pyhd3eb1b0_0 + - pytorch-cuda=11.7=h67b0de4_1 + - pyzmq=23.2.0=py38h6a678d5_0 + - readline=8.2=h5eee18b_0 + - setuptools=65.6.3=py38h06a4308_0 + - six=1.16.0=pyhd3eb1b0_1 + - sqlite=3.41.1=h5eee18b_0 + - stack_data=0.2.0=pyhd3eb1b0_0 + - tk=8.6.12=h1ccaba5_0 + - tornado=6.2=py38h5eee18b_0 + - traitlets=5.7.1=py38h06a4308_0 + - typing_extensions=4.4.0=py38h06a4308_0 + - wcwidth=0.2.5=pyhd3eb1b0_0 + - xz=5.2.10=h5eee18b_1 + - zeromq=4.3.4=h2531618_0 + - zipp=3.11.0=py38h06a4308_0 + - zlib=1.2.13=h5eee18b_0 + - pip: + - absl-py==1.3.0 + - addict==2.4.0 + - appdirs==1.4.4 + - argparse==1.4.0 + - batchgenerators==0.25 + - beautifulsoup4==4.11.1 + - cachetools==5.2.0 + - chardet==3.0.4 + - charset-normalizer==3.1.0 + - click==8.1.3 + - cmake==3.26.3 + - contextlib2==21.6.0 + - contourpy==1.0.7 + - crfseg==1.0.0 + - cycler==0.11.0 + - docker-pycreds==0.4.0 + - efficientnet-pytorch==0.7.1 + - einops==0.8.0 + - entrypoints==0.3 + - exceptiongroup==1.1.1 + - filelock==3.8.2 + - flake8==3.7.9 + - fonttools==4.39.3 + - ftfy==6.1.1 + - future==0.18.2 + - gdown==4.6.0 + - gensim==4.3.1 + - gitdb==4.0.10 + - gitpython==3.1.31 + - google-auth==2.15.0 + - google-auth-oauthlib==0.4.6 + - googletrans==3.0.0 + - grpcio==1.51.1 + - h11==0.9.0 + - h2==3.2.0 + - h5py==3.8.0 + - hpack==3.0.0 + - hstspreload==2023.1.1 + - httpcore==0.9.1 + - httpx==0.13.3 + - huggingface-hub==0.11.1 + - hyperframe==5.2.0 + - idna==2.10 + - imageio==2.28.0 + - importlib-metadata==5.2.0 + - importlib-resources==5.12.0 + - iniconfig==2.0.0 + - ipynb-py-convert==0.4.6 + - isort==4.3.21 + - jinja2==3.1.2 + - joblib==1.2.0 + - kiwisolver==1.4.4 + - lazy-loader==0.2 + - linecache2==1.0.0 + - lit==16.0.3 + - littleutils==0.2.2 + - llvmlite==0.41.1 + - markdown==3.4.1 + - markupsafe==2.1.1 + - matplotlib==3.7.1 + - mccabe==0.6.1 + - ml-collections==0.1.1 + - mpmath==1.3.0 + - munch==3.0.0 + - networkx==3.1 + - nibabel==5.1.0 + - nltk==3.8.1 + - numba==0.58.1 + - numpy==1.24.2 + - nvidia-cublas-cu11==11.10.3.66 + - nvidia-cuda-cupti-cu11==11.7.101 + - nvidia-cuda-nvrtc-cu11==11.7.99 + - nvidia-cuda-runtime-cu11==11.7.99 + - nvidia-cudnn-cu11==8.5.0.96 + - nvidia-cufft-cu11==10.9.0.58 + - nvidia-curand-cu11==10.2.10.91 + - nvidia-cusolver-cu11==11.4.0.1 + - nvidia-cusparse-cu11==11.7.4.91 + - nvidia-nccl-cu11==2.14.3 + - nvidia-nvtx-cu11==11.7.91 + - oauthlib==3.2.2 + - ogb==1.3.5 + - opencv-python==4.6.0.66 + - outdated==0.2.2 + - packaging==22.0 + - pandas==1.5.2 + - pathtools==0.1.2 + - pillow==9.5.0 + - pluggy==1.0.0 + - pretrained-backbones-unet==0.0.1 + - pretrainedmodels==0.7.4 + - protobuf==3.20.3 + - psutil==5.9.4 + - pyasn1==0.4.8 + - pyasn1-modules==0.2.8 + - pycocotools==2.0.6 + - pycodestyle==2.5.0 + - pyflakes==2.1.1 + - pynndescent==0.5.13 + - pyparsing==3.0.9 + - pysocks==1.7.1 + - pytest==7.3.1 + - pytz==2022.7 + - pywavelets==1.4.1 + - pyyaml==6.0 + - regex==2022.10.31 + - requests==2.28.2 + - requests-oauthlib==1.3.1 + - rfc3986==1.5.0 + - rsa==4.9 + - safetensors==0.4.5 + - schedulefree==1.2.7 + - scikit-image==0.20.0 + - scikit-learn==1.2.0 + - scipy==1.9.1 + - seaborn==0.13.2 + - sentry-sdk==1.18.0 + - setproctitle==1.3.2 + - simpleitk==2.2.1 + - smart-open==6.3.0 + - smmap==5.0.0 + - sniffio==1.3.0 + - soupsieve==2.3.2.post1 + - supervision==0.3.2 + - surface-distance-based-measures==0.1 + - sympy==1.12 + - tabulate==0.9.0 + - tb-nightly==2.12.0a20221225 + - tensorboard-data-server==0.6.1 + - tensorboard-plugin-wit==1.8.1 + - textaugment==1.3.4 + - textblob==0.17.1 + - threadpoolctl==3.1.0 + - tifffile==2023.4.12 + - timm==0.6.12 + - tokenizers==0.13.3 + - tomli==2.0.1 + - torch==2.0.1 + - torchaudio==2.0.2 + - torchvision==0.15.2 + - tqdm==4.64.1 + - traceback2==1.4.0 + - transformers==4.27.4 + - triton==2.0.0 + - umap-learn==0.5.6 + - unittest2==1.1.0 + - urllib3==1.26.15 + - wandb==0.14.0 + - werkzeug==2.2.2 + - wget==3.2 + - wheel==0.38.4 + - wilds==1.2.2 + - yacs==0.1.8 + - yapf==0.29.0 +prefix: /home/abdelrahman.elsayed/.conda/envs/s-sam diff --git a/AllinonSAM/wandb/run-20241018_212018-p8w61ip6/files/config.yaml b/AllinonSAM/wandb/run-20241018_212018-p8w61ip6/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ec9b71c1d559ede33925e24e0364f9a7f88e4ef9 --- /dev/null +++ b/AllinonSAM/wandb/run-20241018_212018-p8w61ip6/files/config.yaml @@ -0,0 +1,44 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + cli_version: 0.14.0 + framework: torch + is_jupyter_run: false + is_kaggle_kernel: false + python_version: 3.8.16 + start_time: 1729272018.294645 + t: + 1: + - 1 + - 41 + - 49 + - 55 + - 63 + 2: + - 1 + - 41 + - 49 + - 55 + - 63 + 3: + - 13 + - 16 + - 23 + 4: 3.8.16 + 5: 0.14.0 + 8: + - 5 +batch_size: + desc: null + value: 2 +learning_rate: + desc: null + value: 0.0001 +num_epochs: + desc: null + value: 500 +reg_multiplier: + desc: null + value: 0.01 diff --git a/AllinonSAM/wandb/run-20241018_212018-p8w61ip6/files/output.log b/AllinonSAM/wandb/run-20241018_212018-p8w61ip6/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..0ff8d9c51de84fb920c11301afc480e8537df6a2 --- /dev/null +++ b/AllinonSAM/wandb/run-20241018_212018-p8w61ip6/files/output.log @@ -0,0 +1,39 @@ +Training parameters: +---------- +number of trainable parameters: 1572370 +batch size: 2 +num epochs: 500 +Epoch 0/499 +---------- +Traceback (most recent call last): + File "/home/abdelrahman.elsayed/sarim_code/train_baselines.py", line 266, in + main_train(data_config, model_config, args.pretrained_path, args.save_path, args.training_strategy, device=args.device) + File "/home/abdelrahman.elsayed/sarim_code/train_baselines.py", line 234, in main_train + model = train_dl( + File "/home/abdelrahman.elsayed/sarim_code/train.py", line 218, in train_dl + outputs, reg_loss = model(inputs, text) + File "/home/abdelrahman.elsayed/.conda/envs/s-sam/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl + return forward_call(*args, **kwargs) + File "/home/abdelrahman.elsayed/sarim_code/axialnet.py", line 711, in forward + return self.soft(self._forward_impl(x)),0 + File "/home/abdelrahman.elsayed/sarim_code/axialnet.py", line 638, in _forward_impl + x2 = self.layer2(x1) + File "/home/abdelrahman.elsayed/.conda/envs/s-sam/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl + return forward_call(*args, **kwargs) + File "/home/abdelrahman.elsayed/.conda/envs/s-sam/lib/python3.8/site-packages/torch/nn/modules/container.py", line 217, in forward + input = module(input) + File "/home/abdelrahman.elsayed/.conda/envs/s-sam/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl + return forward_call(*args, **kwargs) + File "/home/abdelrahman.elsayed/sarim_code/axialnet.py", line 332, in forward + out = self.width_block(out) + File "/home/abdelrahman.elsayed/.conda/envs/s-sam/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl + return forward_call(*args, **kwargs) + File "/home/abdelrahman.elsayed/sarim_code/axialnet.py", line 167, in forward + stacked_similarity = self.bn_similarity(stacked_similarity).view(N * W, 3, self.groups, H, H).sum(dim=1) + File "/home/abdelrahman.elsayed/.conda/envs/s-sam/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl + return forward_call(*args, **kwargs) + File "/home/abdelrahman.elsayed/.conda/envs/s-sam/lib/python3.8/site-packages/torch/nn/modules/batchnorm.py", line 171, in forward + return F.batch_norm( + File "/home/abdelrahman.elsayed/.conda/envs/s-sam/lib/python3.8/site-packages/torch/nn/functional.py", line 2450, in batch_norm + return torch.batch_norm( +torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 3.00 GiB (GPU 0; 23.65 GiB total capacity; 18.59 GiB already allocated; 1.82 GiB free; 21.35 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF \ No newline at end of file diff --git a/AllinonSAM/wandb/run-20241018_212018-p8w61ip6/files/requirements.txt b/AllinonSAM/wandb/run-20241018_212018-p8w61ip6/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..f0720690c2daa9a35ddb12cd902cf9cf8de43d99 --- /dev/null +++ b/AllinonSAM/wandb/run-20241018_212018-p8w61ip6/files/requirements.txt @@ -0,0 +1,188 @@ +absl-py==1.3.0 +addict==2.4.0 +appdirs==1.4.4 +argparse==1.4.0 +asttokens==2.0.5 +backcall==0.2.0 +batchgenerators==0.25 +beautifulsoup4==4.11.1 +cachetools==5.2.0 +certifi==2022.12.7 +chardet==3.0.4 +charset-normalizer==3.1.0 +click==8.1.3 +cmake==3.26.3 +comm==0.1.2 +contextlib2==21.6.0 +contourpy==1.0.7 +crfseg==1.0.0 +cycler==0.11.0 +debugpy==1.5.1 +decorator==5.1.1 +docker-pycreds==0.4.0 +efficientnet-pytorch==0.7.1 +einops==0.8.0 +entrypoints==0.3 +exceptiongroup==1.1.1 +executing==0.8.3 +filelock==3.8.2 +flake8==3.7.9 +flit-core==3.8.0 +fonttools==4.39.3 +ftfy==6.1.1 +future==0.18.2 +gdown==4.6.0 +gensim==4.3.1 +gitdb==4.0.10 +gitpython==3.1.31 +google-auth-oauthlib==0.4.6 +google-auth==2.15.0 +googletrans==3.0.0 +grpcio==1.51.1 +h11==0.9.0 +h2==3.2.0 +h5py==3.8.0 +hpack==3.0.0 +hstspreload==2023.1.1 +httpcore==0.9.1 +httpx==0.13.3 +huggingface-hub==0.11.1 +hyperframe==5.2.0 +idna==2.10 +imageio==2.28.0 +importlib-metadata==5.2.0 +importlib-resources==5.12.0 +iniconfig==2.0.0 +ipykernel==6.19.2 +ipynb-py-convert==0.4.6 +ipython==8.12.0 +isort==4.3.21 +jedi==0.18.1 +jinja2==3.1.2 +joblib==1.2.0 +jupyter-client==8.1.0 +jupyter-core==5.3.0 +kiwisolver==1.4.4 +lazy-loader==0.2 +linecache2==1.0.0 +lit==16.0.3 +littleutils==0.2.2 +llvmlite==0.41.1 +markdown==3.4.1 +markupsafe==2.1.1 +matplotlib-inline==0.1.6 +matplotlib==3.7.1 +mccabe==0.6.1 +ml-collections==0.1.1 +mpmath==1.3.0 +munch==3.0.0 +nest-asyncio==1.5.6 +networkx==3.1 +nibabel==5.1.0 +nltk==3.8.1 +numba==0.58.1 +numpy==1.24.2 +nvidia-cublas-cu11==11.10.3.66 +nvidia-cuda-cupti-cu11==11.7.101 +nvidia-cuda-nvrtc-cu11==11.7.99 +nvidia-cuda-runtime-cu11==11.7.99 +nvidia-cudnn-cu11==8.5.0.96 +nvidia-cufft-cu11==10.9.0.58 +nvidia-curand-cu11==10.2.10.91 +nvidia-cusolver-cu11==11.4.0.1 +nvidia-cusparse-cu11==11.7.4.91 +nvidia-nccl-cu11==2.14.3 +nvidia-nvtx-cu11==11.7.91 +oauthlib==3.2.2 +ogb==1.3.5 +opencv-python==4.6.0.66 +outdated==0.2.2 +packaging==22.0 +pandas==1.5.2 +parso==0.8.3 +pathtools==0.1.2 +pexpect==4.8.0 +pickleshare==0.7.5 +pillow==9.5.0 +pip==23.0.1 +platformdirs==2.5.2 +pluggy==1.0.0 +pretrained-backbones-unet==0.0.1 +pretrainedmodels==0.7.4 +prompt-toolkit==3.0.36 +protobuf==3.20.3 +psutil==5.9.4 +ptyprocess==0.7.0 +pure-eval==0.2.2 +pyasn1-modules==0.2.8 +pyasn1==0.4.8 +pycocotools==2.0.6 +pycodestyle==2.5.0 +pyflakes==2.1.1 +pygments==2.11.2 +pynndescent==0.5.13 +pyparsing==3.0.9 +pysocks==1.7.1 +pytest==7.3.1 +python-dateutil==2.8.2 +pytz==2022.7 +pywavelets==1.4.1 +pyyaml==6.0 +pyzmq==23.2.0 +regex==2022.10.31 +requests-oauthlib==1.3.1 +requests==2.28.2 +rfc3986==1.5.0 +rsa==4.9 +safetensors==0.4.5 +schedulefree==1.2.7 +scikit-image==0.20.0 +scikit-learn==1.2.0 +scipy==1.9.1 +seaborn==0.13.2 +sentry-sdk==1.18.0 +setproctitle==1.3.2 +setuptools==65.6.3 +simpleitk==2.2.1 +six==1.16.0 +smart-open==6.3.0 +smmap==5.0.0 +sniffio==1.3.0 +soupsieve==2.3.2.post1 +stack-data==0.2.0 +supervision==0.3.2 +surface-distance-based-measures==0.1 +sympy==1.12 +tabulate==0.9.0 +tb-nightly==2.12.0a20221225 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +textaugment==1.3.4 +textblob==0.17.1 +threadpoolctl==3.1.0 +tifffile==2023.4.12 +timm==0.6.12 +tokenizers==0.13.3 +tomli==2.0.1 +torch==2.0.1 +torchaudio==2.0.2 +torchvision==0.15.2 +tornado==6.2 +tqdm==4.64.1 +traceback2==1.4.0 +traitlets==5.7.1 +transformers==4.27.4 +triton==2.0.0 +typing-extensions==4.4.0 +umap-learn==0.5.6 +unittest2==1.1.0 +urllib3==1.26.15 +wandb==0.14.0 +wcwidth==0.2.5 +werkzeug==2.2.2 +wget==3.2 +wheel==0.38.4 +wilds==1.2.2 +yacs==0.1.8 +yapf==0.29.0 +zipp==3.11.0 \ No newline at end of file diff --git a/AllinonSAM/wandb/run-20241018_212018-p8w61ip6/files/wandb-metadata.json b/AllinonSAM/wandb/run-20241018_212018-p8w61ip6/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..c1b2e86715a6ffda8ef5996a22d322679d2f80ec --- /dev/null +++ b/AllinonSAM/wandb/run-20241018_212018-p8w61ip6/files/wandb-metadata.json @@ -0,0 +1,205 @@ +{ + "os": "Linux-5.15.133-ql-generic-13.0-9-x86_64-with-glibc2.17", + "python": "3.8.16", + "heartbeatAt": "2024-10-18T17:20:19.839310", + "startedAt": "2024-10-18T17:20:18.285219", + "docker": null, + "cuda": null, + "args": [], + "state": "running", + "program": "/home/abdelrahman.elsayed/sarim_code/train_baselines.py", + "codePath": "train_baselines.py", + "git": { + "remote": "https://github.com/JayParanjape/SVDSAM.git", + "commit": "5936d0eff64d84fbefed6ecfe4bcc841459c2fc3" + }, + "email": "amra51548@gmail.com", + "root": "/home/abdelrahman.elsayed/sarim_code", + "host": "ws-l6-014", + "username": "abdelrahman.elsayed", + "executable": "/home/abdelrahman.elsayed/.conda/envs/s-sam/bin/python", + "cpu_count": 16, + "cpu_count_logical": 32, + "cpu_freq": { + "current": 3923.0690625, + "min": 2200.0, + "max": 3900.0 + }, + "cpu_freq_per_core": [ + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3968.026, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3756.782, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + } + ], + "disk": { + "total": 1.0, + "used": 0.042255401611328125 + }, + "gpu": "NVIDIA GeForce RTX 4090", + "gpu_count": 1, + "gpu_devices": [ + { + "name": "NVIDIA GeForce RTX 4090", + "memory_total": 25757220864 + } + ], + "memory": { + "total": 62.65230178833008 + } +} diff --git a/AllinonSAM/wandb/run-20241018_212018-p8w61ip6/files/wandb-summary.json b/AllinonSAM/wandb/run-20241018_212018-p8w61ip6/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..b1d4cf96d64955a2ae8b982ce021e29fde546a1a --- /dev/null +++ b/AllinonSAM/wandb/run-20241018_212018-p8w61ip6/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 4}} \ No newline at end of file diff --git a/AllinonSAM/wandb/run-20241018_212018-p8w61ip6/logs/debug-internal.log b/AllinonSAM/wandb/run-20241018_212018-p8w61ip6/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..7845f31419d3952bda20a0a95b6f47586311f4f1 --- /dev/null +++ b/AllinonSAM/wandb/run-20241018_212018-p8w61ip6/logs/debug-internal.log @@ -0,0 +1,180 @@ +2024-10-18 21:20:18,295 INFO StreamThr :3326268 [internal.py:wandb_internal():87] W&B internal server running at pid: 3326268, started at: 2024-10-18 21:20:18.295183 +2024-10-18 21:20:18,297 DEBUG HandlerThread:3326268 [handler.py:handle_request():144] handle_request: status +2024-10-18 21:20:18,297 INFO WriterThread:3326268 [datastore.py:open_for_write():85] open: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212018-p8w61ip6/run-p8w61ip6.wandb +2024-10-18 21:20:18,299 DEBUG SenderThread:3326268 [sender.py:send():336] send: header +2024-10-18 21:20:18,345 DEBUG SenderThread:3326268 [sender.py:send():336] send: run +2024-10-18 21:20:19,647 INFO SenderThread:3326268 [dir_watcher.py:__init__():219] watching files in: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212018-p8w61ip6/files +2024-10-18 21:20:19,647 INFO SenderThread:3326268 [sender.py:_start_run_threads():1078] run started: p8w61ip6 with start time 1729272018.294645 +2024-10-18 21:20:19,647 DEBUG SenderThread:3326268 [sender.py:send_request():363] send_request: summary_record +2024-10-18 21:20:19,648 INFO SenderThread:3326268 [sender.py:_save_file():1332] saving file wandb-summary.json with policy end +2024-10-18 21:20:19,649 DEBUG HandlerThread:3326268 [handler.py:handle_request():144] handle_request: check_version +2024-10-18 21:20:19,649 DEBUG SenderThread:3326268 [sender.py:send_request():363] send_request: check_version +2024-10-18 21:20:19,729 DEBUG HandlerThread:3326268 [handler.py:handle_request():144] handle_request: run_start +2024-10-18 21:20:19,769 DEBUG HandlerThread:3326268 [system_info.py:__init__():31] System info init +2024-10-18 21:20:19,769 DEBUG HandlerThread:3326268 [system_info.py:__init__():46] System info init done +2024-10-18 21:20:19,769 INFO HandlerThread:3326268 [system_monitor.py:start():183] Starting system monitor +2024-10-18 21:20:19,769 INFO SystemMonitor:3326268 [system_monitor.py:_start():147] Starting system asset monitoring threads +2024-10-18 21:20:19,769 INFO HandlerThread:3326268 [system_monitor.py:probe():204] Collecting system info +2024-10-18 21:20:19,769 INFO SystemMonitor:3326268 [interfaces.py:start():187] Started cpu monitoring +2024-10-18 21:20:19,770 INFO SystemMonitor:3326268 [interfaces.py:start():187] Started disk monitoring +2024-10-18 21:20:19,770 INFO SystemMonitor:3326268 [interfaces.py:start():187] Started gpu monitoring +2024-10-18 21:20:19,771 INFO SystemMonitor:3326268 [interfaces.py:start():187] Started memory monitoring +2024-10-18 21:20:19,771 INFO SystemMonitor:3326268 [interfaces.py:start():187] Started network monitoring +2024-10-18 21:20:19,839 DEBUG HandlerThread:3326268 [system_info.py:probe():195] Probing system +2024-10-18 21:20:19,845 DEBUG HandlerThread:3326268 [system_info.py:_probe_git():180] Probing git +2024-10-18 21:20:19,860 DEBUG HandlerThread:3326268 [system_info.py:_probe_git():188] Probing git done +2024-10-18 21:20:19,860 DEBUG HandlerThread:3326268 [system_info.py:probe():240] Probing system done +2024-10-18 21:20:19,860 DEBUG HandlerThread:3326268 [system_monitor.py:probe():213] {'os': 'Linux-5.15.133-ql-generic-13.0-9-x86_64-with-glibc2.17', 'python': '3.8.16', 'heartbeatAt': '2024-10-18T17:20:19.839310', 'startedAt': '2024-10-18T17:20:18.285219', 'docker': None, 'cuda': None, 'args': (), 'state': 'running', 'program': '/home/abdelrahman.elsayed/sarim_code/train_baselines.py', 'codePath': 'train_baselines.py', 'git': {'remote': 'https://github.com/JayParanjape/SVDSAM.git', 'commit': '5936d0eff64d84fbefed6ecfe4bcc841459c2fc3'}, 'email': 'amra51548@gmail.com', 'root': '/home/abdelrahman.elsayed/sarim_code', 'host': 'ws-l6-014', 'username': 'abdelrahman.elsayed', 'executable': '/home/abdelrahman.elsayed/.conda/envs/s-sam/bin/python', 'cpu_count': 16, 'cpu_count_logical': 32, 'cpu_freq': {'current': 3923.0690625, 'min': 2200.0, 'max': 3900.0}, 'cpu_freq_per_core': [{'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3968.026, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3756.782, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}], 'disk': {'total': 1.0, 'used': 0.042255401611328125}, 'gpu': 'NVIDIA GeForce RTX 4090', 'gpu_count': 1, 'gpu_devices': [{'name': 'NVIDIA GeForce RTX 4090', 'memory_total': 25757220864}], 'memory': {'total': 62.65230178833008}} +2024-10-18 21:20:19,860 INFO HandlerThread:3326268 [system_monitor.py:probe():214] Finished collecting system info +2024-10-18 21:20:19,860 INFO HandlerThread:3326268 [system_monitor.py:probe():217] Publishing system info +2024-10-18 21:20:19,860 DEBUG HandlerThread:3326268 [system_info.py:_save_pip():51] Saving list of pip packages installed into the current environment +2024-10-18 21:20:19,861 DEBUG HandlerThread:3326268 [system_info.py:_save_pip():67] Saving pip packages done +2024-10-18 21:20:19,862 DEBUG HandlerThread:3326268 [system_info.py:_save_conda():74] Saving list of conda packages installed into the current environment +2024-10-18 21:20:20,649 INFO Thread-13 :3326268 [dir_watcher.py:_on_file_created():278] file/dir created: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212018-p8w61ip6/files/wandb-summary.json +2024-10-18 21:20:20,649 INFO Thread-13 :3326268 [dir_watcher.py:_on_file_created():278] file/dir created: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212018-p8w61ip6/files/requirements.txt +2024-10-18 21:20:20,649 INFO Thread-13 :3326268 [dir_watcher.py:_on_file_created():278] file/dir created: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212018-p8w61ip6/files/conda-environment.yaml +2024-10-18 21:20:22,800 DEBUG HandlerThread:3326268 [system_info.py:_save_conda():86] Saving conda packages done +2024-10-18 21:20:22,802 INFO HandlerThread:3326268 [system_monitor.py:probe():219] Finished publishing system info +2024-10-18 21:20:22,810 DEBUG SenderThread:3326268 [sender.py:send():336] send: files +2024-10-18 21:20:22,811 INFO SenderThread:3326268 [sender.py:_save_file():1332] saving file wandb-metadata.json with policy now +2024-10-18 21:20:22,815 DEBUG HandlerThread:3326268 [handler.py:handle_request():144] handle_request: stop_status +2024-10-18 21:20:22,815 DEBUG SenderThread:3326268 [sender.py:send_request():363] send_request: stop_status +2024-10-18 21:20:23,127 DEBUG SenderThread:3326268 [sender.py:send():336] send: telemetry +2024-10-18 21:20:23,631 INFO wandb-upload_0:3326268 [upload_job.py:push():138] Uploaded file /tmp/slurm-abdelrahman.elsayed-44777/tmpcvru88_1wandb/j489umuy-wandb-metadata.json +2024-10-18 21:20:23,651 INFO Thread-13 :3326268 [dir_watcher.py:_on_file_modified():295] file/dir modified: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212018-p8w61ip6/files/conda-environment.yaml +2024-10-18 21:20:23,651 INFO Thread-13 :3326268 [dir_watcher.py:_on_file_created():278] file/dir created: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212018-p8w61ip6/files/output.log +2024-10-18 21:20:23,651 INFO Thread-13 :3326268 [dir_watcher.py:_on_file_created():278] file/dir created: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212018-p8w61ip6/files/wandb-metadata.json +2024-10-18 21:20:24,130 DEBUG HandlerThread:3326268 [handler.py:handle_request():144] handle_request: status_report +2024-10-18 21:20:24,294 DEBUG SenderThread:3326268 [sender.py:send():336] send: exit +2024-10-18 21:20:24,294 INFO SenderThread:3326268 [sender.py:send_exit():559] handling exit code: 1 +2024-10-18 21:20:24,295 INFO SenderThread:3326268 [sender.py:send_exit():561] handling runtime: 4 +2024-10-18 21:20:24,296 INFO SenderThread:3326268 [sender.py:_save_file():1332] saving file wandb-summary.json with policy end +2024-10-18 21:20:24,296 INFO SenderThread:3326268 [sender.py:send_exit():567] send defer +2024-10-18 21:20:24,296 DEBUG HandlerThread:3326268 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:20:24,296 INFO HandlerThread:3326268 [handler.py:handle_request_defer():170] handle defer: 0 +2024-10-18 21:20:24,297 DEBUG SenderThread:3326268 [sender.py:send_request():363] send_request: defer +2024-10-18 21:20:24,297 INFO SenderThread:3326268 [sender.py:send_request_defer():583] handle sender defer: 0 +2024-10-18 21:20:24,297 INFO SenderThread:3326268 [sender.py:transition_state():587] send defer: 1 +2024-10-18 21:20:24,297 DEBUG HandlerThread:3326268 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:20:24,297 INFO HandlerThread:3326268 [handler.py:handle_request_defer():170] handle defer: 1 +2024-10-18 21:20:24,297 DEBUG SenderThread:3326268 [sender.py:send_request():363] send_request: defer +2024-10-18 21:20:24,297 INFO SenderThread:3326268 [sender.py:send_request_defer():583] handle sender defer: 1 +2024-10-18 21:20:24,297 INFO SenderThread:3326268 [sender.py:transition_state():587] send defer: 2 +2024-10-18 21:20:24,297 DEBUG HandlerThread:3326268 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:20:24,297 INFO HandlerThread:3326268 [handler.py:handle_request_defer():170] handle defer: 2 +2024-10-18 21:20:24,297 INFO HandlerThread:3326268 [system_monitor.py:finish():193] Stopping system monitor +2024-10-18 21:20:24,297 DEBUG SystemMonitor:3326268 [system_monitor.py:_start():161] Starting system metrics aggregation loop +2024-10-18 21:20:24,298 DEBUG SystemMonitor:3326268 [system_monitor.py:_start():168] Finished system metrics aggregation loop +2024-10-18 21:20:24,298 DEBUG SystemMonitor:3326268 [system_monitor.py:_start():172] Publishing last batch of metrics +2024-10-18 21:20:24,298 INFO HandlerThread:3326268 [interfaces.py:finish():199] Joined cpu monitor +2024-10-18 21:20:24,298 INFO HandlerThread:3326268 [interfaces.py:finish():199] Joined disk monitor +2024-10-18 21:20:24,332 INFO HandlerThread:3326268 [interfaces.py:finish():199] Joined gpu monitor +2024-10-18 21:20:24,332 INFO HandlerThread:3326268 [interfaces.py:finish():199] Joined memory monitor +2024-10-18 21:20:24,332 INFO HandlerThread:3326268 [interfaces.py:finish():199] Joined network monitor +2024-10-18 21:20:24,333 DEBUG SenderThread:3326268 [sender.py:send_request():363] send_request: defer +2024-10-18 21:20:24,333 INFO SenderThread:3326268 [sender.py:send_request_defer():583] handle sender defer: 2 +2024-10-18 21:20:24,333 INFO SenderThread:3326268 [sender.py:transition_state():587] send defer: 3 +2024-10-18 21:20:24,333 DEBUG SenderThread:3326268 [sender.py:send():336] send: stats +2024-10-18 21:20:24,333 DEBUG HandlerThread:3326268 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:20:24,333 INFO HandlerThread:3326268 [handler.py:handle_request_defer():170] handle defer: 3 +2024-10-18 21:20:24,333 DEBUG SenderThread:3326268 [sender.py:send_request():363] send_request: defer +2024-10-18 21:20:24,333 INFO SenderThread:3326268 [sender.py:send_request_defer():583] handle sender defer: 3 +2024-10-18 21:20:24,333 INFO SenderThread:3326268 [sender.py:transition_state():587] send defer: 4 +2024-10-18 21:20:24,333 DEBUG HandlerThread:3326268 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:20:24,333 INFO HandlerThread:3326268 [handler.py:handle_request_defer():170] handle defer: 4 +2024-10-18 21:20:24,334 DEBUG SenderThread:3326268 [sender.py:send_request():363] send_request: defer +2024-10-18 21:20:24,334 INFO SenderThread:3326268 [sender.py:send_request_defer():583] handle sender defer: 4 +2024-10-18 21:20:24,334 INFO SenderThread:3326268 [sender.py:transition_state():587] send defer: 5 +2024-10-18 21:20:24,334 DEBUG HandlerThread:3326268 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:20:24,334 INFO HandlerThread:3326268 [handler.py:handle_request_defer():170] handle defer: 5 +2024-10-18 21:20:24,334 DEBUG SenderThread:3326268 [sender.py:send():336] send: summary +2024-10-18 21:20:24,334 INFO SenderThread:3326268 [sender.py:_save_file():1332] saving file wandb-summary.json with policy end +2024-10-18 21:20:24,335 DEBUG SenderThread:3326268 [sender.py:send_request():363] send_request: defer +2024-10-18 21:20:24,335 INFO SenderThread:3326268 [sender.py:send_request_defer():583] handle sender defer: 5 +2024-10-18 21:20:24,335 INFO SenderThread:3326268 [sender.py:transition_state():587] send defer: 6 +2024-10-18 21:20:24,335 DEBUG HandlerThread:3326268 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:20:24,335 INFO HandlerThread:3326268 [handler.py:handle_request_defer():170] handle defer: 6 +2024-10-18 21:20:24,335 DEBUG SenderThread:3326268 [sender.py:send_request():363] send_request: defer +2024-10-18 21:20:24,335 INFO SenderThread:3326268 [sender.py:send_request_defer():583] handle sender defer: 6 +2024-10-18 21:20:24,337 DEBUG HandlerThread:3326268 [handler.py:handle_request():144] handle_request: status_report +2024-10-18 21:20:24,629 INFO SenderThread:3326268 [sender.py:transition_state():587] send defer: 7 +2024-10-18 21:20:24,629 DEBUG HandlerThread:3326268 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:20:24,630 INFO HandlerThread:3326268 [handler.py:handle_request_defer():170] handle defer: 7 +2024-10-18 21:20:24,630 DEBUG SenderThread:3326268 [sender.py:send_request():363] send_request: defer +2024-10-18 21:20:24,630 INFO SenderThread:3326268 [sender.py:send_request_defer():583] handle sender defer: 7 +2024-10-18 21:20:24,651 INFO Thread-13 :3326268 [dir_watcher.py:_on_file_modified():295] file/dir modified: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212018-p8w61ip6/files/config.yaml +2024-10-18 21:20:24,651 INFO Thread-13 :3326268 [dir_watcher.py:_on_file_modified():295] file/dir modified: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212018-p8w61ip6/files/wandb-summary.json +2024-10-18 21:20:25,295 DEBUG HandlerThread:3326268 [handler.py:handle_request():144] handle_request: poll_exit +2024-10-18 21:20:25,653 INFO Thread-13 :3326268 [dir_watcher.py:_on_file_modified():295] file/dir modified: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212018-p8w61ip6/files/output.log +2024-10-18 21:20:27,137 INFO SenderThread:3326268 [sender.py:transition_state():587] send defer: 8 +2024-10-18 21:20:27,137 DEBUG SenderThread:3326268 [sender.py:send_request():363] send_request: poll_exit +2024-10-18 21:20:27,138 DEBUG HandlerThread:3326268 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:20:27,138 INFO HandlerThread:3326268 [handler.py:handle_request_defer():170] handle defer: 8 +2024-10-18 21:20:27,138 DEBUG SenderThread:3326268 [sender.py:send_request():363] send_request: defer +2024-10-18 21:20:27,138 INFO SenderThread:3326268 [sender.py:send_request_defer():583] handle sender defer: 8 +2024-10-18 21:20:27,150 INFO SenderThread:3326268 [sender.py:transition_state():587] send defer: 9 +2024-10-18 21:20:27,150 DEBUG HandlerThread:3326268 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:20:27,150 INFO HandlerThread:3326268 [handler.py:handle_request_defer():170] handle defer: 9 +2024-10-18 21:20:27,150 DEBUG SenderThread:3326268 [sender.py:send():336] send: artifact +2024-10-18 21:20:27,657 INFO Thread-13 :3326268 [dir_watcher.py:_on_file_modified():295] file/dir modified: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212018-p8w61ip6/files/output.log +2024-10-18 21:20:28,597 INFO SenderThread:3326268 [sender.py:send_artifact():1428] sent artifact job-https___github.com_JayParanjape_SVDSAM.git_train_baselines.py - {'id': 'QXJ0aWZhY3Q6MTI4NTc5Njg5Ng==', 'digest': '5f8b773fe3bdf0ea6bd47c21f45cc4fb', 'state': 'COMMITTED', 'aliases': [{'artifactCollectionName': 'job-https___github.com_JayParanjape_SVDSAM.git_train_baselines.py', 'alias': 'latest'}, {'artifactCollectionName': 'job-https___github.com_JayParanjape_SVDSAM.git_train_baselines.py', 'alias': 'v1'}], 'artifactSequence': {'id': 'QXJ0aWZhY3RDb2xsZWN0aW9uOjQ3ODg1ODkyMQ==', 'latestArtifact': {'id': 'QXJ0aWZhY3Q6MTI4NTc5Njg5Ng==', 'versionIndex': 1}}, 'version': 'v1'} +2024-10-18 21:20:28,597 DEBUG SenderThread:3326268 [sender.py:send_request():363] send_request: defer +2024-10-18 21:20:28,597 INFO SenderThread:3326268 [sender.py:send_request_defer():583] handle sender defer: 9 +2024-10-18 21:20:28,597 INFO SenderThread:3326268 [dir_watcher.py:finish():365] shutting down directory watcher +2024-10-18 21:20:28,657 INFO SenderThread:3326268 [dir_watcher.py:finish():395] scan: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212018-p8w61ip6/files +2024-10-18 21:20:28,658 INFO SenderThread:3326268 [dir_watcher.py:finish():409] scan save: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212018-p8w61ip6/files/requirements.txt requirements.txt +2024-10-18 21:20:28,658 INFO SenderThread:3326268 [dir_watcher.py:finish():409] scan save: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212018-p8w61ip6/files/conda-environment.yaml conda-environment.yaml +2024-10-18 21:20:28,658 INFO SenderThread:3326268 [dir_watcher.py:finish():409] scan save: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212018-p8w61ip6/files/output.log output.log +2024-10-18 21:20:28,658 INFO SenderThread:3326268 [dir_watcher.py:finish():409] scan save: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212018-p8w61ip6/files/wandb-summary.json wandb-summary.json +2024-10-18 21:20:28,658 INFO SenderThread:3326268 [dir_watcher.py:finish():409] scan save: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212018-p8w61ip6/files/wandb-metadata.json wandb-metadata.json +2024-10-18 21:20:28,658 INFO SenderThread:3326268 [dir_watcher.py:finish():409] scan save: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212018-p8w61ip6/files/config.yaml config.yaml +2024-10-18 21:20:28,664 INFO SenderThread:3326268 [sender.py:transition_state():587] send defer: 10 +2024-10-18 21:20:28,666 DEBUG HandlerThread:3326268 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:20:28,666 INFO HandlerThread:3326268 [handler.py:handle_request_defer():170] handle defer: 10 +2024-10-18 21:20:28,667 DEBUG SenderThread:3326268 [sender.py:send_request():363] send_request: defer +2024-10-18 21:20:28,667 INFO SenderThread:3326268 [sender.py:send_request_defer():583] handle sender defer: 10 +2024-10-18 21:20:28,667 INFO SenderThread:3326268 [file_pusher.py:finish():164] shutting down file pusher +2024-10-18 21:20:29,311 INFO wandb-upload_1:3326268 [upload_job.py:push():138] Uploaded file /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212018-p8w61ip6/files/requirements.txt +2024-10-18 21:20:29,495 INFO wandb-upload_0:3326268 [upload_job.py:push():138] Uploaded file /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212018-p8w61ip6/files/conda-environment.yaml +2024-10-18 21:20:29,509 INFO wandb-upload_4:3326268 [upload_job.py:push():138] Uploaded file /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212018-p8w61ip6/files/config.yaml +2024-10-18 21:20:29,521 INFO wandb-upload_3:3326268 [upload_job.py:push():138] Uploaded file /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212018-p8w61ip6/files/wandb-summary.json +2024-10-18 21:20:29,521 INFO wandb-upload_2:3326268 [upload_job.py:push():138] Uploaded file /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212018-p8w61ip6/files/output.log +2024-10-18 21:20:29,667 DEBUG HandlerThread:3326268 [handler.py:handle_request():144] handle_request: status_report +2024-10-18 21:20:29,722 INFO Thread-12 :3326268 [sender.py:transition_state():587] send defer: 11 +2024-10-18 21:20:29,722 DEBUG HandlerThread:3326268 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:20:29,723 INFO HandlerThread:3326268 [handler.py:handle_request_defer():170] handle defer: 11 +2024-10-18 21:20:29,723 DEBUG SenderThread:3326268 [sender.py:send_request():363] send_request: defer +2024-10-18 21:20:29,723 INFO SenderThread:3326268 [sender.py:send_request_defer():583] handle sender defer: 11 +2024-10-18 21:20:29,723 INFO SenderThread:3326268 [file_pusher.py:join():169] waiting for file pusher +2024-10-18 21:20:29,723 INFO SenderThread:3326268 [sender.py:transition_state():587] send defer: 12 +2024-10-18 21:20:29,723 DEBUG HandlerThread:3326268 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:20:29,723 INFO HandlerThread:3326268 [handler.py:handle_request_defer():170] handle defer: 12 +2024-10-18 21:20:29,723 DEBUG SenderThread:3326268 [sender.py:send_request():363] send_request: defer +2024-10-18 21:20:29,723 INFO SenderThread:3326268 [sender.py:send_request_defer():583] handle sender defer: 12 +2024-10-18 21:20:29,965 INFO SenderThread:3326268 [sender.py:transition_state():587] send defer: 13 +2024-10-18 21:20:29,965 DEBUG HandlerThread:3326268 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:20:29,966 INFO HandlerThread:3326268 [handler.py:handle_request_defer():170] handle defer: 13 +2024-10-18 21:20:29,966 DEBUG SenderThread:3326268 [sender.py:send_request():363] send_request: defer +2024-10-18 21:20:29,966 INFO SenderThread:3326268 [sender.py:send_request_defer():583] handle sender defer: 13 +2024-10-18 21:20:29,966 INFO SenderThread:3326268 [sender.py:transition_state():587] send defer: 14 +2024-10-18 21:20:29,966 DEBUG HandlerThread:3326268 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:20:29,966 INFO HandlerThread:3326268 [handler.py:handle_request_defer():170] handle defer: 14 +2024-10-18 21:20:29,966 DEBUG SenderThread:3326268 [sender.py:send():336] send: final +2024-10-18 21:20:29,966 DEBUG SenderThread:3326268 [sender.py:send():336] send: footer +2024-10-18 21:20:29,966 DEBUG SenderThread:3326268 [sender.py:send_request():363] send_request: defer +2024-10-18 21:20:29,966 INFO SenderThread:3326268 [sender.py:send_request_defer():583] handle sender defer: 14 +2024-10-18 21:20:29,967 DEBUG HandlerThread:3326268 [handler.py:handle_request():144] handle_request: poll_exit +2024-10-18 21:20:29,967 DEBUG SenderThread:3326268 [sender.py:send_request():363] send_request: poll_exit +2024-10-18 21:20:29,967 DEBUG HandlerThread:3326268 [handler.py:handle_request():144] handle_request: server_info +2024-10-18 21:20:29,967 DEBUG SenderThread:3326268 [sender.py:send_request():363] send_request: server_info +2024-10-18 21:20:29,969 DEBUG HandlerThread:3326268 [handler.py:handle_request():144] handle_request: get_summary +2024-10-18 21:20:29,969 DEBUG HandlerThread:3326268 [handler.py:handle_request():144] handle_request: sampled_history +2024-10-18 21:20:30,222 INFO MainThread:3326268 [wandb_run.py:_footer_history_summary_info():3422] rendering history +2024-10-18 21:20:30,222 INFO MainThread:3326268 [wandb_run.py:_footer_history_summary_info():3454] rendering summary +2024-10-18 21:20:30,222 INFO MainThread:3326268 [wandb_run.py:_footer_sync_info():3380] logging synced files +2024-10-18 21:20:30,222 DEBUG HandlerThread:3326268 [handler.py:handle_request():144] handle_request: shutdown +2024-10-18 21:20:30,222 INFO HandlerThread:3326268 [handler.py:finish():842] shutting down handler +2024-10-18 21:20:30,967 INFO WriterThread:3326268 [datastore.py:close():298] close: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212018-p8w61ip6/run-p8w61ip6.wandb +2024-10-18 21:20:31,222 INFO SenderThread:3326268 [sender.py:finish():1504] shutting down sender +2024-10-18 21:20:31,222 INFO SenderThread:3326268 [file_pusher.py:finish():164] shutting down file pusher +2024-10-18 21:20:31,222 INFO SenderThread:3326268 [file_pusher.py:join():169] waiting for file pusher diff --git a/AllinonSAM/wandb/run-20241018_212018-p8w61ip6/logs/debug.log b/AllinonSAM/wandb/run-20241018_212018-p8w61ip6/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..a4042c48eaabfd731b3e495e0b4fa1fdd5113a8b --- /dev/null +++ b/AllinonSAM/wandb/run-20241018_212018-p8w61ip6/logs/debug.log @@ -0,0 +1,27 @@ +2024-10-18 21:20:18,291 INFO MainThread:3326145 [wandb_setup.py:_flush():76] Configure stats pid to 3326145 +2024-10-18 21:20:18,291 INFO MainThread:3326145 [wandb_setup.py:_flush():76] Loading settings from /home/abdelrahman.elsayed/.config/wandb/settings +2024-10-18 21:20:18,291 INFO MainThread:3326145 [wandb_setup.py:_flush():76] Loading settings from /home/abdelrahman.elsayed/sarim_code/wandb/settings +2024-10-18 21:20:18,291 INFO MainThread:3326145 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-10-18 21:20:18,291 INFO MainThread:3326145 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-10-18 21:20:18,291 INFO MainThread:3326145 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': 'train_baselines.py', 'program': '/home/abdelrahman.elsayed/sarim_code/train_baselines.py'} +2024-10-18 21:20:18,291 INFO MainThread:3326145 [wandb_init.py:_log_setup():506] Logging user logs to /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212018-p8w61ip6/logs/debug.log +2024-10-18 21:20:18,291 INFO MainThread:3326145 [wandb_init.py:_log_setup():507] Logging internal logs to /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212018-p8w61ip6/logs/debug-internal.log +2024-10-18 21:20:18,291 INFO MainThread:3326145 [wandb_init.py:init():546] calling init triggers +2024-10-18 21:20:18,291 INFO MainThread:3326145 [wandb_init.py:init():552] wandb.init called with sweep_config: {} +config: {'learning_rate': 0.0001, 'batch_size': 2, 'num_epochs': 500, 'reg_multiplier': 0.01} +2024-10-18 21:20:18,291 INFO MainThread:3326145 [wandb_init.py:init():602] starting backend +2024-10-18 21:20:18,291 INFO MainThread:3326145 [wandb_init.py:init():606] setting up manager +2024-10-18 21:20:18,293 INFO MainThread:3326145 [backend.py:_multiprocessing_setup():106] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-10-18 21:20:18,294 INFO MainThread:3326145 [wandb_init.py:init():613] backend started and connected +2024-10-18 21:20:18,296 INFO MainThread:3326145 [wandb_init.py:init():701] updated telemetry +2024-10-18 21:20:18,344 INFO MainThread:3326145 [wandb_init.py:init():741] communicating run to backend with 60.0 second timeout +2024-10-18 21:20:19,649 INFO MainThread:3326145 [wandb_run.py:_on_init():2133] communicating current version +2024-10-18 21:20:19,724 INFO MainThread:3326145 [wandb_run.py:_on_init():2142] got version response upgrade_message: "wandb version 0.18.5 is available! To upgrade, please run:\n $ pip install wandb --upgrade" + +2024-10-18 21:20:19,725 INFO MainThread:3326145 [wandb_init.py:init():789] starting run threads in backend +2024-10-18 21:20:22,814 INFO MainThread:3326145 [wandb_run.py:_console_start():2114] atexit reg +2024-10-18 21:20:22,814 INFO MainThread:3326145 [wandb_run.py:_redirect():1969] redirect: SettingsConsole.WRAP_RAW +2024-10-18 21:20:22,814 INFO MainThread:3326145 [wandb_run.py:_redirect():2034] Wrapping output streams. +2024-10-18 21:20:22,814 INFO MainThread:3326145 [wandb_run.py:_redirect():2059] Redirects installed. +2024-10-18 21:20:22,815 INFO MainThread:3326145 [wandb_init.py:init():831] run started, returning control to user process +2024-10-18 21:20:31,225 WARNING MsgRouterThr:3326145 [router.py:message_loop():77] message_loop has been closed diff --git a/AllinonSAM/wandb/run-20241018_212018-p8w61ip6/run-p8w61ip6.wandb b/AllinonSAM/wandb/run-20241018_212018-p8w61ip6/run-p8w61ip6.wandb new file mode 100644 index 0000000000000000000000000000000000000000..6ff9e555d856052130d3aa1431ecc35e330411bd Binary files /dev/null and b/AllinonSAM/wandb/run-20241018_212018-p8w61ip6/run-p8w61ip6.wandb differ diff --git a/AllinonSAM/wandb/run-20241018_212146-w102ona2/files/conda-environment.yaml b/AllinonSAM/wandb/run-20241018_212146-w102ona2/files/conda-environment.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7a029db1357b890c0decfa95cf08b71a66c531d9 --- /dev/null +++ b/AllinonSAM/wandb/run-20241018_212146-w102ona2/files/conda-environment.yaml @@ -0,0 +1,267 @@ +name: s-sam +channels: + - pytorch + - nvidia + - defaults +dependencies: + - _libgcc_mutex=0.1=main + - _openmp_mutex=5.1=1_gnu + - asttokens=2.0.5=pyhd3eb1b0_0 + - backcall=0.2.0=pyhd3eb1b0_0 + - ca-certificates=2023.01.10=h06a4308_0 + - certifi=2022.12.7=py38h06a4308_0 + - comm=0.1.2=py38h06a4308_0 + - cuda=11.7.1=0 + - cuda-cccl=11.7.91=0 + - cuda-command-line-tools=11.7.1=0 + - cuda-compiler=11.7.1=0 + - cuda-cudart=11.7.99=0 + - cuda-cudart-dev=11.7.99=0 + - cuda-cuobjdump=11.7.91=0 + - cuda-cupti=11.7.101=0 + - cuda-cuxxfilt=11.7.91=0 + - cuda-demo-suite=12.0.76=0 + - cuda-documentation=12.0.76=0 + - cuda-driver-dev=11.7.99=0 + - cuda-gdb=12.0.90=0 + - cuda-libraries=11.7.1=0 + - cuda-libraries-dev=11.7.1=0 + - cuda-memcheck=11.8.86=0 + - cuda-nsight=12.0.78=0 + - cuda-nsight-compute=12.0.0=0 + - cuda-nvcc=11.7.99=0 + - cuda-nvdisasm=12.0.76=0 + - cuda-nvml-dev=11.7.91=0 + - cuda-nvprof=12.0.90=0 + - cuda-nvprune=11.7.91=0 + - cuda-nvrtc=11.7.99=0 + - cuda-nvrtc-dev=11.7.99=0 + - cuda-nvtx=11.7.91=0 + - cuda-nvvp=12.0.90=0 + - cuda-runtime=11.7.1=0 + - cuda-sanitizer-api=12.0.90=0 + - cuda-toolkit=11.7.1=0 + - cuda-tools=11.7.1=0 + - cuda-visual-tools=11.7.1=0 + - cudatoolkit=11.0.221=h6bb024c_0 + - debugpy=1.5.1=py38h295c915_0 + - decorator=5.1.1=pyhd3eb1b0_0 + - executing=0.8.3=pyhd3eb1b0_0 + - flit-core=3.8.0=py38h06a4308_0 + - gds-tools=1.5.0.59=0 + - importlib_metadata=6.0.0=hd3eb1b0_0 + - ipykernel=6.19.2=py38hb070fc8_0 + - ipython=8.12.0=py38h06a4308_0 + - jedi=0.18.1=py38h06a4308_1 + - jupyter_client=8.1.0=py38h06a4308_0 + - jupyter_core=5.3.0=py38h06a4308_0 + - ld_impl_linux-64=2.38=h1181459_1 + - libcublas=11.10.3.66=0 + - libcublas-dev=11.10.3.66=0 + - libcufft=10.7.2.124=h4fbf590_0 + - libcufft-dev=10.7.2.124=h98a8f43_0 + - libcufile=1.5.0.59=0 + - libcufile-dev=1.5.0.59=0 + - libcurand=10.3.1.50=0 + - libcurand-dev=10.3.1.50=0 + - libcusolver=11.4.0.1=0 + - libcusolver-dev=11.4.0.1=0 + - libcusparse=11.7.4.91=0 + - libcusparse-dev=11.7.4.91=0 + - libffi=3.4.2=h6a678d5_6 + - libgcc-ng=11.2.0=h1234567_1 + - libgomp=11.2.0=h1234567_1 + - libnpp=11.7.4.75=0 + - libnpp-dev=11.7.4.75=0 + - libnvjpeg=11.8.0.2=0 + - libnvjpeg-dev=11.8.0.2=0 + - libsodium=1.0.18=h7b6447c_0 + - libstdcxx-ng=11.2.0=h1234567_1 + - matplotlib-inline=0.1.6=py38h06a4308_0 + - ncurses=6.4=h6a678d5_0 + - nest-asyncio=1.5.6=py38h06a4308_0 + - nsight-compute=2022.4.0.15=0 + - openssl=1.1.1t=h7f8727e_0 + - parso=0.8.3=pyhd3eb1b0_0 + - pexpect=4.8.0=pyhd3eb1b0_3 + - pickleshare=0.7.5=pyhd3eb1b0_1003 + - pip=23.0.1=py38h06a4308_0 + - platformdirs=2.5.2=py38h06a4308_0 + - prompt-toolkit=3.0.36=py38h06a4308_0 + - ptyprocess=0.7.0=pyhd3eb1b0_2 + - pure_eval=0.2.2=pyhd3eb1b0_0 + - pygments=2.11.2=pyhd3eb1b0_0 + - python=3.8.16=h7a1cb2a_3 + - python-dateutil=2.8.2=pyhd3eb1b0_0 + - pytorch-cuda=11.7=h67b0de4_1 + - pyzmq=23.2.0=py38h6a678d5_0 + - readline=8.2=h5eee18b_0 + - setuptools=65.6.3=py38h06a4308_0 + - six=1.16.0=pyhd3eb1b0_1 + - sqlite=3.41.1=h5eee18b_0 + - stack_data=0.2.0=pyhd3eb1b0_0 + - tk=8.6.12=h1ccaba5_0 + - tornado=6.2=py38h5eee18b_0 + - traitlets=5.7.1=py38h06a4308_0 + - typing_extensions=4.4.0=py38h06a4308_0 + - wcwidth=0.2.5=pyhd3eb1b0_0 + - xz=5.2.10=h5eee18b_1 + - zeromq=4.3.4=h2531618_0 + - zipp=3.11.0=py38h06a4308_0 + - zlib=1.2.13=h5eee18b_0 + - pip: + - absl-py==1.3.0 + - addict==2.4.0 + - appdirs==1.4.4 + - argparse==1.4.0 + - batchgenerators==0.25 + - beautifulsoup4==4.11.1 + - cachetools==5.2.0 + - chardet==3.0.4 + - charset-normalizer==3.1.0 + - click==8.1.3 + - cmake==3.26.3 + - contextlib2==21.6.0 + - contourpy==1.0.7 + - crfseg==1.0.0 + - cycler==0.11.0 + - docker-pycreds==0.4.0 + - efficientnet-pytorch==0.7.1 + - einops==0.8.0 + - entrypoints==0.3 + - exceptiongroup==1.1.1 + - filelock==3.8.2 + - flake8==3.7.9 + - fonttools==4.39.3 + - ftfy==6.1.1 + - future==0.18.2 + - gdown==4.6.0 + - gensim==4.3.1 + - gitdb==4.0.10 + - gitpython==3.1.31 + - google-auth==2.15.0 + - google-auth-oauthlib==0.4.6 + - googletrans==3.0.0 + - grpcio==1.51.1 + - h11==0.9.0 + - h2==3.2.0 + - h5py==3.8.0 + - hpack==3.0.0 + - hstspreload==2023.1.1 + - httpcore==0.9.1 + - httpx==0.13.3 + - huggingface-hub==0.11.1 + - hyperframe==5.2.0 + - idna==2.10 + - imageio==2.28.0 + - importlib-metadata==5.2.0 + - importlib-resources==5.12.0 + - iniconfig==2.0.0 + - ipynb-py-convert==0.4.6 + - isort==4.3.21 + - jinja2==3.1.2 + - joblib==1.2.0 + - kiwisolver==1.4.4 + - lazy-loader==0.2 + - linecache2==1.0.0 + - lit==16.0.3 + - littleutils==0.2.2 + - llvmlite==0.41.1 + - markdown==3.4.1 + - markupsafe==2.1.1 + - matplotlib==3.7.1 + - mccabe==0.6.1 + - ml-collections==0.1.1 + - mpmath==1.3.0 + - munch==3.0.0 + - networkx==3.1 + - nibabel==5.1.0 + - nltk==3.8.1 + - numba==0.58.1 + - numpy==1.24.2 + - nvidia-cublas-cu11==11.10.3.66 + - nvidia-cuda-cupti-cu11==11.7.101 + - nvidia-cuda-nvrtc-cu11==11.7.99 + - nvidia-cuda-runtime-cu11==11.7.99 + - nvidia-cudnn-cu11==8.5.0.96 + - nvidia-cufft-cu11==10.9.0.58 + - nvidia-curand-cu11==10.2.10.91 + - nvidia-cusolver-cu11==11.4.0.1 + - nvidia-cusparse-cu11==11.7.4.91 + - nvidia-nccl-cu11==2.14.3 + - nvidia-nvtx-cu11==11.7.91 + - oauthlib==3.2.2 + - ogb==1.3.5 + - opencv-python==4.6.0.66 + - outdated==0.2.2 + - packaging==22.0 + - pandas==1.5.2 + - pathtools==0.1.2 + - pillow==9.5.0 + - pluggy==1.0.0 + - pretrained-backbones-unet==0.0.1 + - pretrainedmodels==0.7.4 + - protobuf==3.20.3 + - psutil==5.9.4 + - pyasn1==0.4.8 + - pyasn1-modules==0.2.8 + - pycocotools==2.0.6 + - pycodestyle==2.5.0 + - pyflakes==2.1.1 + - pynndescent==0.5.13 + - pyparsing==3.0.9 + - pysocks==1.7.1 + - pytest==7.3.1 + - pytz==2022.7 + - pywavelets==1.4.1 + - pyyaml==6.0 + - regex==2022.10.31 + - requests==2.28.2 + - requests-oauthlib==1.3.1 + - rfc3986==1.5.0 + - rsa==4.9 + - safetensors==0.4.5 + - schedulefree==1.2.7 + - scikit-image==0.20.0 + - scikit-learn==1.2.0 + - scipy==1.9.1 + - seaborn==0.13.2 + - sentry-sdk==1.18.0 + - setproctitle==1.3.2 + - simpleitk==2.2.1 + - smart-open==6.3.0 + - smmap==5.0.0 + - sniffio==1.3.0 + - soupsieve==2.3.2.post1 + - supervision==0.3.2 + - surface-distance-based-measures==0.1 + - sympy==1.12 + - tabulate==0.9.0 + - tb-nightly==2.12.0a20221225 + - tensorboard-data-server==0.6.1 + - tensorboard-plugin-wit==1.8.1 + - textaugment==1.3.4 + - textblob==0.17.1 + - threadpoolctl==3.1.0 + - tifffile==2023.4.12 + - timm==0.6.12 + - tokenizers==0.13.3 + - tomli==2.0.1 + - torch==2.0.1 + - torchaudio==2.0.2 + - torchvision==0.15.2 + - tqdm==4.64.1 + - traceback2==1.4.0 + - transformers==4.27.4 + - triton==2.0.0 + - umap-learn==0.5.6 + - unittest2==1.1.0 + - urllib3==1.26.15 + - wandb==0.14.0 + - werkzeug==2.2.2 + - wget==3.2 + - wheel==0.38.4 + - wilds==1.2.2 + - yacs==0.1.8 + - yapf==0.29.0 +prefix: /home/abdelrahman.elsayed/.conda/envs/s-sam diff --git a/AllinonSAM/wandb/run-20241018_212146-w102ona2/files/config.yaml b/AllinonSAM/wandb/run-20241018_212146-w102ona2/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..975a934b6e577eafc2cc7e10b35f43908014c05a --- /dev/null +++ b/AllinonSAM/wandb/run-20241018_212146-w102ona2/files/config.yaml @@ -0,0 +1,44 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + cli_version: 0.14.0 + framework: torch + is_jupyter_run: false + is_kaggle_kernel: false + python_version: 3.8.16 + start_time: 1729272106.164478 + t: + 1: + - 1 + - 41 + - 49 + - 55 + - 63 + 2: + - 1 + - 41 + - 49 + - 55 + - 63 + 3: + - 13 + - 16 + - 23 + 4: 3.8.16 + 5: 0.14.0 + 8: + - 5 +batch_size: + desc: null + value: 2 +learning_rate: + desc: null + value: 0.0001 +num_epochs: + desc: null + value: 500 +reg_multiplier: + desc: null + value: 0.01 diff --git a/AllinonSAM/wandb/run-20241018_212146-w102ona2/files/output.log b/AllinonSAM/wandb/run-20241018_212146-w102ona2/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..f43cabdab9acead14880153fd26713ed6ad578af --- /dev/null +++ b/AllinonSAM/wandb/run-20241018_212146-w102ona2/files/output.log @@ -0,0 +1,27 @@ +Traceback (most recent call last): + File "/home/abdelrahman.elsayed/sarim_code/train_baselines.py", line 266, in + main_train(data_config, model_config, args.pretrained_path, args.save_path, args.training_strategy, device=args.device) + File "/home/abdelrahman.elsayed/sarim_code/train_baselines.py", line 234, in main_train + model = train_dl( + File "/home/abdelrahman.elsayed/sarim_code/train.py", line 218, in train_dl + outputs, reg_loss = model(inputs, text) + File "/home/abdelrahman.elsayed/.conda/envs/s-sam/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl + return forward_call(*args, **kwargs) + File "/home/abdelrahman.elsayed/sarim_code/vit_seg_modeling.py", line 390, in forward + x = self.decoder(x, features) + File "/home/abdelrahman.elsayed/.conda/envs/s-sam/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl + return forward_call(*args, **kwargs) + File "/home/abdelrahman.elsayed/sarim_code/vit_seg_modeling.py", line 366, in forward + x = decoder_block(x, skip=skip) + File "/home/abdelrahman.elsayed/.conda/envs/s-sam/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl + return forward_call(*args, **kwargs) + File "/home/abdelrahman.elsayed/sarim_code/vit_seg_modeling.py", line 312, in forward + x = torch.cat([x, skip], dim=1) +RuntimeError: Sizes of tensors must match except in dimension 1. Expected size 32 but got size 64 for tensor number 1 in the list. +Training parameters: +---------- +number of trainable parameters: 107681297 +batch size: 2 +num epochs: 500 +Epoch 0/499 +---------- \ No newline at end of file diff --git a/AllinonSAM/wandb/run-20241018_212146-w102ona2/files/requirements.txt b/AllinonSAM/wandb/run-20241018_212146-w102ona2/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..f0720690c2daa9a35ddb12cd902cf9cf8de43d99 --- /dev/null +++ b/AllinonSAM/wandb/run-20241018_212146-w102ona2/files/requirements.txt @@ -0,0 +1,188 @@ +absl-py==1.3.0 +addict==2.4.0 +appdirs==1.4.4 +argparse==1.4.0 +asttokens==2.0.5 +backcall==0.2.0 +batchgenerators==0.25 +beautifulsoup4==4.11.1 +cachetools==5.2.0 +certifi==2022.12.7 +chardet==3.0.4 +charset-normalizer==3.1.0 +click==8.1.3 +cmake==3.26.3 +comm==0.1.2 +contextlib2==21.6.0 +contourpy==1.0.7 +crfseg==1.0.0 +cycler==0.11.0 +debugpy==1.5.1 +decorator==5.1.1 +docker-pycreds==0.4.0 +efficientnet-pytorch==0.7.1 +einops==0.8.0 +entrypoints==0.3 +exceptiongroup==1.1.1 +executing==0.8.3 +filelock==3.8.2 +flake8==3.7.9 +flit-core==3.8.0 +fonttools==4.39.3 +ftfy==6.1.1 +future==0.18.2 +gdown==4.6.0 +gensim==4.3.1 +gitdb==4.0.10 +gitpython==3.1.31 +google-auth-oauthlib==0.4.6 +google-auth==2.15.0 +googletrans==3.0.0 +grpcio==1.51.1 +h11==0.9.0 +h2==3.2.0 +h5py==3.8.0 +hpack==3.0.0 +hstspreload==2023.1.1 +httpcore==0.9.1 +httpx==0.13.3 +huggingface-hub==0.11.1 +hyperframe==5.2.0 +idna==2.10 +imageio==2.28.0 +importlib-metadata==5.2.0 +importlib-resources==5.12.0 +iniconfig==2.0.0 +ipykernel==6.19.2 +ipynb-py-convert==0.4.6 +ipython==8.12.0 +isort==4.3.21 +jedi==0.18.1 +jinja2==3.1.2 +joblib==1.2.0 +jupyter-client==8.1.0 +jupyter-core==5.3.0 +kiwisolver==1.4.4 +lazy-loader==0.2 +linecache2==1.0.0 +lit==16.0.3 +littleutils==0.2.2 +llvmlite==0.41.1 +markdown==3.4.1 +markupsafe==2.1.1 +matplotlib-inline==0.1.6 +matplotlib==3.7.1 +mccabe==0.6.1 +ml-collections==0.1.1 +mpmath==1.3.0 +munch==3.0.0 +nest-asyncio==1.5.6 +networkx==3.1 +nibabel==5.1.0 +nltk==3.8.1 +numba==0.58.1 +numpy==1.24.2 +nvidia-cublas-cu11==11.10.3.66 +nvidia-cuda-cupti-cu11==11.7.101 +nvidia-cuda-nvrtc-cu11==11.7.99 +nvidia-cuda-runtime-cu11==11.7.99 +nvidia-cudnn-cu11==8.5.0.96 +nvidia-cufft-cu11==10.9.0.58 +nvidia-curand-cu11==10.2.10.91 +nvidia-cusolver-cu11==11.4.0.1 +nvidia-cusparse-cu11==11.7.4.91 +nvidia-nccl-cu11==2.14.3 +nvidia-nvtx-cu11==11.7.91 +oauthlib==3.2.2 +ogb==1.3.5 +opencv-python==4.6.0.66 +outdated==0.2.2 +packaging==22.0 +pandas==1.5.2 +parso==0.8.3 +pathtools==0.1.2 +pexpect==4.8.0 +pickleshare==0.7.5 +pillow==9.5.0 +pip==23.0.1 +platformdirs==2.5.2 +pluggy==1.0.0 +pretrained-backbones-unet==0.0.1 +pretrainedmodels==0.7.4 +prompt-toolkit==3.0.36 +protobuf==3.20.3 +psutil==5.9.4 +ptyprocess==0.7.0 +pure-eval==0.2.2 +pyasn1-modules==0.2.8 +pyasn1==0.4.8 +pycocotools==2.0.6 +pycodestyle==2.5.0 +pyflakes==2.1.1 +pygments==2.11.2 +pynndescent==0.5.13 +pyparsing==3.0.9 +pysocks==1.7.1 +pytest==7.3.1 +python-dateutil==2.8.2 +pytz==2022.7 +pywavelets==1.4.1 +pyyaml==6.0 +pyzmq==23.2.0 +regex==2022.10.31 +requests-oauthlib==1.3.1 +requests==2.28.2 +rfc3986==1.5.0 +rsa==4.9 +safetensors==0.4.5 +schedulefree==1.2.7 +scikit-image==0.20.0 +scikit-learn==1.2.0 +scipy==1.9.1 +seaborn==0.13.2 +sentry-sdk==1.18.0 +setproctitle==1.3.2 +setuptools==65.6.3 +simpleitk==2.2.1 +six==1.16.0 +smart-open==6.3.0 +smmap==5.0.0 +sniffio==1.3.0 +soupsieve==2.3.2.post1 +stack-data==0.2.0 +supervision==0.3.2 +surface-distance-based-measures==0.1 +sympy==1.12 +tabulate==0.9.0 +tb-nightly==2.12.0a20221225 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +textaugment==1.3.4 +textblob==0.17.1 +threadpoolctl==3.1.0 +tifffile==2023.4.12 +timm==0.6.12 +tokenizers==0.13.3 +tomli==2.0.1 +torch==2.0.1 +torchaudio==2.0.2 +torchvision==0.15.2 +tornado==6.2 +tqdm==4.64.1 +traceback2==1.4.0 +traitlets==5.7.1 +transformers==4.27.4 +triton==2.0.0 +typing-extensions==4.4.0 +umap-learn==0.5.6 +unittest2==1.1.0 +urllib3==1.26.15 +wandb==0.14.0 +wcwidth==0.2.5 +werkzeug==2.2.2 +wget==3.2 +wheel==0.38.4 +wilds==1.2.2 +yacs==0.1.8 +yapf==0.29.0 +zipp==3.11.0 \ No newline at end of file diff --git a/AllinonSAM/wandb/run-20241018_212146-w102ona2/files/wandb-metadata.json b/AllinonSAM/wandb/run-20241018_212146-w102ona2/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..2b9718071a3be1ce657d44dfdb72f192a8de8077 --- /dev/null +++ b/AllinonSAM/wandb/run-20241018_212146-w102ona2/files/wandb-metadata.json @@ -0,0 +1,205 @@ +{ + "os": "Linux-5.15.133-ql-generic-13.0-9-x86_64-with-glibc2.17", + "python": "3.8.16", + "heartbeatAt": "2024-10-18T17:21:47.040398", + "startedAt": "2024-10-18T17:21:46.154298", + "docker": null, + "cuda": null, + "args": [], + "state": "running", + "program": "/home/abdelrahman.elsayed/sarim_code/train_baselines.py", + "codePath": "train_baselines.py", + "git": { + "remote": "https://github.com/JayParanjape/SVDSAM.git", + "commit": "5936d0eff64d84fbefed6ecfe4bcc841459c2fc3" + }, + "email": "amra51548@gmail.com", + "root": "/home/abdelrahman.elsayed/sarim_code", + "host": "ws-l6-014", + "username": "abdelrahman.elsayed", + "executable": "/home/abdelrahman.elsayed/.conda/envs/s-sam/bin/python", + "cpu_count": 16, + "cpu_count_logical": 32, + "cpu_freq": { + "current": 3881.4324687499998, + "min": 2200.0, + "max": 3900.0 + }, + "cpu_freq_per_core": [ + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3613.224, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + }, + { + "current": 3900.0, + "min": 2200.0, + "max": 3900.0 + } + ], + "disk": { + "total": 1.0, + "used": 0.042255401611328125 + }, + "gpu": "NVIDIA GeForce RTX 4090", + "gpu_count": 1, + "gpu_devices": [ + { + "name": "NVIDIA GeForce RTX 4090", + "memory_total": 25757220864 + } + ], + "memory": { + "total": 62.65230178833008 + } +} diff --git a/AllinonSAM/wandb/run-20241018_212146-w102ona2/files/wandb-summary.json b/AllinonSAM/wandb/run-20241018_212146-w102ona2/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..b1d4cf96d64955a2ae8b982ce021e29fde546a1a --- /dev/null +++ b/AllinonSAM/wandb/run-20241018_212146-w102ona2/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 4}} \ No newline at end of file diff --git a/AllinonSAM/wandb/run-20241018_212146-w102ona2/logs/debug-internal.log b/AllinonSAM/wandb/run-20241018_212146-w102ona2/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..b066a9d3fd1975d59f8c20de2321ebc015bc9e96 --- /dev/null +++ b/AllinonSAM/wandb/run-20241018_212146-w102ona2/logs/debug-internal.log @@ -0,0 +1,181 @@ +2024-10-18 21:21:46,165 INFO StreamThr :3327812 [internal.py:wandb_internal():87] W&B internal server running at pid: 3327812, started at: 2024-10-18 21:21:46.164863 +2024-10-18 21:21:46,166 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: status +2024-10-18 21:21:46,167 INFO WriterThread:3327812 [datastore.py:open_for_write():85] open: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/run-w102ona2.wandb +2024-10-18 21:21:46,168 DEBUG SenderThread:3327812 [sender.py:send():336] send: header +2024-10-18 21:21:46,222 DEBUG SenderThread:3327812 [sender.py:send():336] send: run +2024-10-18 21:21:46,898 INFO SenderThread:3327812 [dir_watcher.py:__init__():219] watching files in: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/files +2024-10-18 21:21:46,898 INFO SenderThread:3327812 [sender.py:_start_run_threads():1078] run started: w102ona2 with start time 1729272106.164478 +2024-10-18 21:21:46,898 DEBUG SenderThread:3327812 [sender.py:send_request():363] send_request: summary_record +2024-10-18 21:21:46,898 INFO SenderThread:3327812 [sender.py:_save_file():1332] saving file wandb-summary.json with policy end +2024-10-18 21:21:46,900 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: check_version +2024-10-18 21:21:46,900 DEBUG SenderThread:3327812 [sender.py:send_request():363] send_request: check_version +2024-10-18 21:21:46,969 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: run_start +2024-10-18 21:21:47,000 DEBUG HandlerThread:3327812 [system_info.py:__init__():31] System info init +2024-10-18 21:21:47,000 DEBUG HandlerThread:3327812 [system_info.py:__init__():46] System info init done +2024-10-18 21:21:47,000 INFO HandlerThread:3327812 [system_monitor.py:start():183] Starting system monitor +2024-10-18 21:21:47,000 INFO SystemMonitor:3327812 [system_monitor.py:_start():147] Starting system asset monitoring threads +2024-10-18 21:21:47,000 INFO HandlerThread:3327812 [system_monitor.py:probe():204] Collecting system info +2024-10-18 21:21:47,000 INFO SystemMonitor:3327812 [interfaces.py:start():187] Started cpu monitoring +2024-10-18 21:21:47,001 INFO SystemMonitor:3327812 [interfaces.py:start():187] Started disk monitoring +2024-10-18 21:21:47,002 INFO SystemMonitor:3327812 [interfaces.py:start():187] Started gpu monitoring +2024-10-18 21:21:47,002 INFO SystemMonitor:3327812 [interfaces.py:start():187] Started memory monitoring +2024-10-18 21:21:47,002 INFO SystemMonitor:3327812 [interfaces.py:start():187] Started network monitoring +2024-10-18 21:21:47,040 DEBUG HandlerThread:3327812 [system_info.py:probe():195] Probing system +2024-10-18 21:21:47,045 DEBUG HandlerThread:3327812 [system_info.py:_probe_git():180] Probing git +2024-10-18 21:21:47,059 DEBUG HandlerThread:3327812 [system_info.py:_probe_git():188] Probing git done +2024-10-18 21:21:47,060 DEBUG HandlerThread:3327812 [system_info.py:probe():240] Probing system done +2024-10-18 21:21:47,060 DEBUG HandlerThread:3327812 [system_monitor.py:probe():213] {'os': 'Linux-5.15.133-ql-generic-13.0-9-x86_64-with-glibc2.17', 'python': '3.8.16', 'heartbeatAt': '2024-10-18T17:21:47.040398', 'startedAt': '2024-10-18T17:21:46.154298', 'docker': None, 'cuda': None, 'args': (), 'state': 'running', 'program': '/home/abdelrahman.elsayed/sarim_code/train_baselines.py', 'codePath': 'train_baselines.py', 'git': {'remote': 'https://github.com/JayParanjape/SVDSAM.git', 'commit': '5936d0eff64d84fbefed6ecfe4bcc841459c2fc3'}, 'email': 'amra51548@gmail.com', 'root': '/home/abdelrahman.elsayed/sarim_code', 'host': 'ws-l6-014', 'username': 'abdelrahman.elsayed', 'executable': '/home/abdelrahman.elsayed/.conda/envs/s-sam/bin/python', 'cpu_count': 16, 'cpu_count_logical': 32, 'cpu_freq': {'current': 3881.4324687499998, 'min': 2200.0, 'max': 3900.0}, 'cpu_freq_per_core': [{'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3613.224, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}, {'current': 3900.0, 'min': 2200.0, 'max': 3900.0}], 'disk': {'total': 1.0, 'used': 0.042255401611328125}, 'gpu': 'NVIDIA GeForce RTX 4090', 'gpu_count': 1, 'gpu_devices': [{'name': 'NVIDIA GeForce RTX 4090', 'memory_total': 25757220864}], 'memory': {'total': 62.65230178833008}} +2024-10-18 21:21:47,060 INFO HandlerThread:3327812 [system_monitor.py:probe():214] Finished collecting system info +2024-10-18 21:21:47,060 INFO HandlerThread:3327812 [system_monitor.py:probe():217] Publishing system info +2024-10-18 21:21:47,060 DEBUG HandlerThread:3327812 [system_info.py:_save_pip():51] Saving list of pip packages installed into the current environment +2024-10-18 21:21:47,061 DEBUG HandlerThread:3327812 [system_info.py:_save_pip():67] Saving pip packages done +2024-10-18 21:21:47,061 DEBUG HandlerThread:3327812 [system_info.py:_save_conda():74] Saving list of conda packages installed into the current environment +2024-10-18 21:21:47,899 INFO Thread-13 :3327812 [dir_watcher.py:_on_file_created():278] file/dir created: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/files/wandb-summary.json +2024-10-18 21:21:47,899 INFO Thread-13 :3327812 [dir_watcher.py:_on_file_created():278] file/dir created: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/files/conda-environment.yaml +2024-10-18 21:21:47,899 INFO Thread-13 :3327812 [dir_watcher.py:_on_file_created():278] file/dir created: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/files/requirements.txt +2024-10-18 21:21:49,899 DEBUG HandlerThread:3327812 [system_info.py:_save_conda():86] Saving conda packages done +2024-10-18 21:21:49,900 INFO HandlerThread:3327812 [system_monitor.py:probe():219] Finished publishing system info +2024-10-18 21:21:49,901 INFO Thread-13 :3327812 [dir_watcher.py:_on_file_modified():295] file/dir modified: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/files/conda-environment.yaml +2024-10-18 21:21:49,901 INFO Thread-13 :3327812 [dir_watcher.py:_on_file_created():278] file/dir created: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/files/wandb-metadata.json +2024-10-18 21:21:49,908 DEBUG SenderThread:3327812 [sender.py:send():336] send: files +2024-10-18 21:21:49,908 INFO SenderThread:3327812 [sender.py:_save_file():1332] saving file wandb-metadata.json with policy now +2024-10-18 21:21:49,912 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: stop_status +2024-10-18 21:21:49,913 DEBUG SenderThread:3327812 [sender.py:send_request():363] send_request: stop_status +2024-10-18 21:21:50,224 DEBUG SenderThread:3327812 [sender.py:send():336] send: telemetry +2024-10-18 21:21:50,813 INFO wandb-upload_0:3327812 [upload_job.py:push():138] Uploaded file /tmp/slurm-abdelrahman.elsayed-44778/tmps66kwbdjwandb/ai9lj76l-wandb-metadata.json +2024-10-18 21:21:50,901 INFO Thread-13 :3327812 [dir_watcher.py:_on_file_created():278] file/dir created: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/files/output.log +2024-10-18 21:21:51,162 DEBUG SenderThread:3327812 [sender.py:send():336] send: exit +2024-10-18 21:21:51,162 INFO SenderThread:3327812 [sender.py:send_exit():559] handling exit code: 1 +2024-10-18 21:21:51,162 INFO SenderThread:3327812 [sender.py:send_exit():561] handling runtime: 4 +2024-10-18 21:21:51,164 INFO SenderThread:3327812 [sender.py:_save_file():1332] saving file wandb-summary.json with policy end +2024-10-18 21:21:51,164 INFO SenderThread:3327812 [sender.py:send_exit():567] send defer +2024-10-18 21:21:51,164 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:21:51,164 INFO HandlerThread:3327812 [handler.py:handle_request_defer():170] handle defer: 0 +2024-10-18 21:21:51,164 DEBUG SenderThread:3327812 [sender.py:send_request():363] send_request: defer +2024-10-18 21:21:51,164 INFO SenderThread:3327812 [sender.py:send_request_defer():583] handle sender defer: 0 +2024-10-18 21:21:51,164 INFO SenderThread:3327812 [sender.py:transition_state():587] send defer: 1 +2024-10-18 21:21:51,164 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:21:51,164 INFO HandlerThread:3327812 [handler.py:handle_request_defer():170] handle defer: 1 +2024-10-18 21:21:51,164 DEBUG SenderThread:3327812 [sender.py:send_request():363] send_request: defer +2024-10-18 21:21:51,164 INFO SenderThread:3327812 [sender.py:send_request_defer():583] handle sender defer: 1 +2024-10-18 21:21:51,164 INFO SenderThread:3327812 [sender.py:transition_state():587] send defer: 2 +2024-10-18 21:21:51,164 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:21:51,164 INFO HandlerThread:3327812 [handler.py:handle_request_defer():170] handle defer: 2 +2024-10-18 21:21:51,165 INFO HandlerThread:3327812 [system_monitor.py:finish():193] Stopping system monitor +2024-10-18 21:21:51,165 DEBUG SystemMonitor:3327812 [system_monitor.py:_start():161] Starting system metrics aggregation loop +2024-10-18 21:21:51,165 DEBUG SystemMonitor:3327812 [system_monitor.py:_start():168] Finished system metrics aggregation loop +2024-10-18 21:21:51,165 DEBUG SystemMonitor:3327812 [system_monitor.py:_start():172] Publishing last batch of metrics +2024-10-18 21:21:51,165 INFO HandlerThread:3327812 [interfaces.py:finish():199] Joined cpu monitor +2024-10-18 21:21:51,166 INFO HandlerThread:3327812 [interfaces.py:finish():199] Joined disk monitor +2024-10-18 21:21:51,200 INFO HandlerThread:3327812 [interfaces.py:finish():199] Joined gpu monitor +2024-10-18 21:21:51,201 INFO HandlerThread:3327812 [interfaces.py:finish():199] Joined memory monitor +2024-10-18 21:21:51,201 INFO HandlerThread:3327812 [interfaces.py:finish():199] Joined network monitor +2024-10-18 21:21:51,201 DEBUG SenderThread:3327812 [sender.py:send_request():363] send_request: defer +2024-10-18 21:21:51,201 INFO SenderThread:3327812 [sender.py:send_request_defer():583] handle sender defer: 2 +2024-10-18 21:21:51,201 INFO SenderThread:3327812 [sender.py:transition_state():587] send defer: 3 +2024-10-18 21:21:51,201 DEBUG SenderThread:3327812 [sender.py:send():336] send: stats +2024-10-18 21:21:51,201 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:21:51,202 INFO HandlerThread:3327812 [handler.py:handle_request_defer():170] handle defer: 3 +2024-10-18 21:21:51,202 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: status_report +2024-10-18 21:21:51,202 DEBUG SenderThread:3327812 [sender.py:send_request():363] send_request: defer +2024-10-18 21:21:51,202 INFO SenderThread:3327812 [sender.py:send_request_defer():583] handle sender defer: 3 +2024-10-18 21:21:51,202 INFO SenderThread:3327812 [sender.py:transition_state():587] send defer: 4 +2024-10-18 21:21:51,202 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:21:51,202 INFO HandlerThread:3327812 [handler.py:handle_request_defer():170] handle defer: 4 +2024-10-18 21:21:51,202 DEBUG SenderThread:3327812 [sender.py:send_request():363] send_request: defer +2024-10-18 21:21:51,202 INFO SenderThread:3327812 [sender.py:send_request_defer():583] handle sender defer: 4 +2024-10-18 21:21:51,202 INFO SenderThread:3327812 [sender.py:transition_state():587] send defer: 5 +2024-10-18 21:21:51,202 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:21:51,202 INFO HandlerThread:3327812 [handler.py:handle_request_defer():170] handle defer: 5 +2024-10-18 21:21:51,202 DEBUG SenderThread:3327812 [sender.py:send():336] send: summary +2024-10-18 21:21:51,203 INFO SenderThread:3327812 [sender.py:_save_file():1332] saving file wandb-summary.json with policy end +2024-10-18 21:21:51,203 DEBUG SenderThread:3327812 [sender.py:send_request():363] send_request: defer +2024-10-18 21:21:51,203 INFO SenderThread:3327812 [sender.py:send_request_defer():583] handle sender defer: 5 +2024-10-18 21:21:51,203 INFO SenderThread:3327812 [sender.py:transition_state():587] send defer: 6 +2024-10-18 21:21:51,203 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:21:51,203 INFO HandlerThread:3327812 [handler.py:handle_request_defer():170] handle defer: 6 +2024-10-18 21:21:51,203 DEBUG SenderThread:3327812 [sender.py:send_request():363] send_request: defer +2024-10-18 21:21:51,203 INFO SenderThread:3327812 [sender.py:send_request_defer():583] handle sender defer: 6 +2024-10-18 21:21:51,206 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: status_report +2024-10-18 21:21:51,483 INFO SenderThread:3327812 [sender.py:transition_state():587] send defer: 7 +2024-10-18 21:21:51,483 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:21:51,483 INFO HandlerThread:3327812 [handler.py:handle_request_defer():170] handle defer: 7 +2024-10-18 21:21:51,483 DEBUG SenderThread:3327812 [sender.py:send_request():363] send_request: defer +2024-10-18 21:21:51,483 INFO SenderThread:3327812 [sender.py:send_request_defer():583] handle sender defer: 7 +2024-10-18 21:21:51,902 INFO Thread-13 :3327812 [dir_watcher.py:_on_file_modified():295] file/dir modified: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/files/wandb-summary.json +2024-10-18 21:21:51,903 INFO Thread-13 :3327812 [dir_watcher.py:_on_file_modified():295] file/dir modified: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/files/config.yaml +2024-10-18 21:21:52,163 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: poll_exit +2024-10-18 21:21:52,904 INFO Thread-13 :3327812 [dir_watcher.py:_on_file_modified():295] file/dir modified: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/files/output.log +2024-10-18 21:21:54,233 INFO SenderThread:3327812 [sender.py:transition_state():587] send defer: 8 +2024-10-18 21:21:54,233 DEBUG SenderThread:3327812 [sender.py:send_request():363] send_request: poll_exit +2024-10-18 21:21:54,233 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:21:54,233 INFO HandlerThread:3327812 [handler.py:handle_request_defer():170] handle defer: 8 +2024-10-18 21:21:54,234 DEBUG SenderThread:3327812 [sender.py:send_request():363] send_request: defer +2024-10-18 21:21:54,234 INFO SenderThread:3327812 [sender.py:send_request_defer():583] handle sender defer: 8 +2024-10-18 21:21:54,246 INFO SenderThread:3327812 [sender.py:transition_state():587] send defer: 9 +2024-10-18 21:21:54,246 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:21:54,246 DEBUG SenderThread:3327812 [sender.py:send():336] send: artifact +2024-10-18 21:21:54,247 INFO HandlerThread:3327812 [handler.py:handle_request_defer():170] handle defer: 9 +2024-10-18 21:21:54,906 INFO Thread-13 :3327812 [dir_watcher.py:_on_file_modified():295] file/dir modified: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/files/output.log +2024-10-18 21:21:55,665 INFO SenderThread:3327812 [sender.py:send_artifact():1428] sent artifact job-https___github.com_JayParanjape_SVDSAM.git_train_baselines.py - {'id': 'QXJ0aWZhY3Q6MTI4NTc5Njg5Ng==', 'digest': '5f8b773fe3bdf0ea6bd47c21f45cc4fb', 'state': 'COMMITTED', 'aliases': [{'artifactCollectionName': 'job-https___github.com_JayParanjape_SVDSAM.git_train_baselines.py', 'alias': 'latest'}, {'artifactCollectionName': 'job-https___github.com_JayParanjape_SVDSAM.git_train_baselines.py', 'alias': 'v1'}], 'artifactSequence': {'id': 'QXJ0aWZhY3RDb2xsZWN0aW9uOjQ3ODg1ODkyMQ==', 'latestArtifact': {'id': 'QXJ0aWZhY3Q6MTI4NTc5Njg5Ng==', 'versionIndex': 1}}, 'version': 'v1'} +2024-10-18 21:21:55,665 DEBUG SenderThread:3327812 [sender.py:send_request():363] send_request: defer +2024-10-18 21:21:55,665 INFO SenderThread:3327812 [sender.py:send_request_defer():583] handle sender defer: 9 +2024-10-18 21:21:55,665 INFO SenderThread:3327812 [dir_watcher.py:finish():365] shutting down directory watcher +2024-10-18 21:21:55,907 INFO SenderThread:3327812 [dir_watcher.py:finish():395] scan: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/files +2024-10-18 21:21:55,908 INFO SenderThread:3327812 [dir_watcher.py:finish():409] scan save: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/files/output.log output.log +2024-10-18 21:21:55,908 INFO SenderThread:3327812 [dir_watcher.py:finish():409] scan save: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/files/wandb-metadata.json wandb-metadata.json +2024-10-18 21:21:55,908 INFO SenderThread:3327812 [dir_watcher.py:finish():409] scan save: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/files/conda-environment.yaml conda-environment.yaml +2024-10-18 21:21:55,908 INFO SenderThread:3327812 [dir_watcher.py:finish():409] scan save: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/files/wandb-summary.json wandb-summary.json +2024-10-18 21:21:55,910 INFO SenderThread:3327812 [dir_watcher.py:finish():409] scan save: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/files/config.yaml config.yaml +2024-10-18 21:21:55,910 INFO SenderThread:3327812 [dir_watcher.py:finish():409] scan save: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/files/requirements.txt requirements.txt +2024-10-18 21:21:55,910 INFO SenderThread:3327812 [sender.py:transition_state():587] send defer: 10 +2024-10-18 21:21:55,911 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:21:55,911 INFO HandlerThread:3327812 [handler.py:handle_request_defer():170] handle defer: 10 +2024-10-18 21:21:55,914 DEBUG SenderThread:3327812 [sender.py:send_request():363] send_request: defer +2024-10-18 21:21:55,914 INFO SenderThread:3327812 [sender.py:send_request_defer():583] handle sender defer: 10 +2024-10-18 21:21:55,914 INFO SenderThread:3327812 [file_pusher.py:finish():164] shutting down file pusher +2024-10-18 21:21:56,529 INFO wandb-upload_0:3327812 [upload_job.py:push():138] Uploaded file /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/files/output.log +2024-10-18 21:21:56,680 INFO wandb-upload_3:3327812 [upload_job.py:push():138] Uploaded file /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/files/config.yaml +2024-10-18 21:21:56,741 INFO wandb-upload_2:3327812 [upload_job.py:push():138] Uploaded file /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/files/wandb-summary.json +2024-10-18 21:21:56,790 INFO wandb-upload_4:3327812 [upload_job.py:push():138] Uploaded file /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/files/requirements.txt +2024-10-18 21:21:56,801 INFO wandb-upload_1:3327812 [upload_job.py:push():138] Uploaded file /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/files/conda-environment.yaml +2024-10-18 21:21:56,915 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: status_report +2024-10-18 21:21:57,002 INFO Thread-12 :3327812 [sender.py:transition_state():587] send defer: 11 +2024-10-18 21:21:57,002 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:21:57,002 INFO HandlerThread:3327812 [handler.py:handle_request_defer():170] handle defer: 11 +2024-10-18 21:21:57,003 DEBUG SenderThread:3327812 [sender.py:send_request():363] send_request: defer +2024-10-18 21:21:57,003 INFO SenderThread:3327812 [sender.py:send_request_defer():583] handle sender defer: 11 +2024-10-18 21:21:57,003 INFO SenderThread:3327812 [file_pusher.py:join():169] waiting for file pusher +2024-10-18 21:21:57,003 INFO SenderThread:3327812 [sender.py:transition_state():587] send defer: 12 +2024-10-18 21:21:57,003 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:21:57,003 INFO HandlerThread:3327812 [handler.py:handle_request_defer():170] handle defer: 12 +2024-10-18 21:21:57,003 DEBUG SenderThread:3327812 [sender.py:send_request():363] send_request: defer +2024-10-18 21:21:57,003 INFO SenderThread:3327812 [sender.py:send_request_defer():583] handle sender defer: 12 +2024-10-18 21:21:57,166 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: keepalive +2024-10-18 21:21:57,365 INFO SenderThread:3327812 [sender.py:transition_state():587] send defer: 13 +2024-10-18 21:21:57,365 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:21:57,365 INFO HandlerThread:3327812 [handler.py:handle_request_defer():170] handle defer: 13 +2024-10-18 21:21:57,365 DEBUG SenderThread:3327812 [sender.py:send_request():363] send_request: defer +2024-10-18 21:21:57,365 INFO SenderThread:3327812 [sender.py:send_request_defer():583] handle sender defer: 13 +2024-10-18 21:21:57,365 INFO SenderThread:3327812 [sender.py:transition_state():587] send defer: 14 +2024-10-18 21:21:57,366 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: defer +2024-10-18 21:21:57,366 INFO HandlerThread:3327812 [handler.py:handle_request_defer():170] handle defer: 14 +2024-10-18 21:21:57,366 DEBUG SenderThread:3327812 [sender.py:send():336] send: final +2024-10-18 21:21:57,366 DEBUG SenderThread:3327812 [sender.py:send():336] send: footer +2024-10-18 21:21:57,366 DEBUG SenderThread:3327812 [sender.py:send_request():363] send_request: defer +2024-10-18 21:21:57,366 INFO SenderThread:3327812 [sender.py:send_request_defer():583] handle sender defer: 14 +2024-10-18 21:21:57,366 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: poll_exit +2024-10-18 21:21:57,366 DEBUG SenderThread:3327812 [sender.py:send_request():363] send_request: poll_exit +2024-10-18 21:21:57,367 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: server_info +2024-10-18 21:21:57,367 DEBUG SenderThread:3327812 [sender.py:send_request():363] send_request: server_info +2024-10-18 21:21:57,368 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: get_summary +2024-10-18 21:21:57,369 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: sampled_history +2024-10-18 21:21:57,621 INFO MainThread:3327812 [wandb_run.py:_footer_history_summary_info():3422] rendering history +2024-10-18 21:21:57,621 INFO MainThread:3327812 [wandb_run.py:_footer_history_summary_info():3454] rendering summary +2024-10-18 21:21:57,621 INFO MainThread:3327812 [wandb_run.py:_footer_sync_info():3380] logging synced files +2024-10-18 21:21:57,621 DEBUG HandlerThread:3327812 [handler.py:handle_request():144] handle_request: shutdown +2024-10-18 21:21:57,621 INFO HandlerThread:3327812 [handler.py:finish():842] shutting down handler +2024-10-18 21:21:58,367 INFO WriterThread:3327812 [datastore.py:close():298] close: /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/run-w102ona2.wandb +2024-10-18 21:21:58,621 INFO SenderThread:3327812 [sender.py:finish():1504] shutting down sender +2024-10-18 21:21:58,622 INFO SenderThread:3327812 [file_pusher.py:finish():164] shutting down file pusher +2024-10-18 21:21:58,622 INFO SenderThread:3327812 [file_pusher.py:join():169] waiting for file pusher diff --git a/AllinonSAM/wandb/run-20241018_212146-w102ona2/logs/debug.log b/AllinonSAM/wandb/run-20241018_212146-w102ona2/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..4919901d195e1239334fb54bc5f35836d23b5254 --- /dev/null +++ b/AllinonSAM/wandb/run-20241018_212146-w102ona2/logs/debug.log @@ -0,0 +1,27 @@ +2024-10-18 21:21:46,160 INFO MainThread:3327679 [wandb_setup.py:_flush():76] Configure stats pid to 3327679 +2024-10-18 21:21:46,160 INFO MainThread:3327679 [wandb_setup.py:_flush():76] Loading settings from /home/abdelrahman.elsayed/.config/wandb/settings +2024-10-18 21:21:46,160 INFO MainThread:3327679 [wandb_setup.py:_flush():76] Loading settings from /home/abdelrahman.elsayed/sarim_code/wandb/settings +2024-10-18 21:21:46,160 INFO MainThread:3327679 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-10-18 21:21:46,160 INFO MainThread:3327679 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-10-18 21:21:46,161 INFO MainThread:3327679 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': 'train_baselines.py', 'program': '/home/abdelrahman.elsayed/sarim_code/train_baselines.py'} +2024-10-18 21:21:46,161 INFO MainThread:3327679 [wandb_init.py:_log_setup():506] Logging user logs to /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/logs/debug.log +2024-10-18 21:21:46,161 INFO MainThread:3327679 [wandb_init.py:_log_setup():507] Logging internal logs to /home/abdelrahman.elsayed/sarim_code/wandb/run-20241018_212146-w102ona2/logs/debug-internal.log +2024-10-18 21:21:46,161 INFO MainThread:3327679 [wandb_init.py:init():546] calling init triggers +2024-10-18 21:21:46,161 INFO MainThread:3327679 [wandb_init.py:init():552] wandb.init called with sweep_config: {} +config: {'learning_rate': 0.0001, 'batch_size': 2, 'num_epochs': 500, 'reg_multiplier': 0.01} +2024-10-18 21:21:46,161 INFO MainThread:3327679 [wandb_init.py:init():602] starting backend +2024-10-18 21:21:46,161 INFO MainThread:3327679 [wandb_init.py:init():606] setting up manager +2024-10-18 21:21:46,162 INFO MainThread:3327679 [backend.py:_multiprocessing_setup():106] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-10-18 21:21:46,164 INFO MainThread:3327679 [wandb_init.py:init():613] backend started and connected +2024-10-18 21:21:46,166 INFO MainThread:3327679 [wandb_init.py:init():701] updated telemetry +2024-10-18 21:21:46,222 INFO MainThread:3327679 [wandb_init.py:init():741] communicating run to backend with 60.0 second timeout +2024-10-18 21:21:46,900 INFO MainThread:3327679 [wandb_run.py:_on_init():2133] communicating current version +2024-10-18 21:21:46,965 INFO MainThread:3327679 [wandb_run.py:_on_init():2142] got version response upgrade_message: "wandb version 0.18.5 is available! To upgrade, please run:\n $ pip install wandb --upgrade" + +2024-10-18 21:21:46,965 INFO MainThread:3327679 [wandb_init.py:init():789] starting run threads in backend +2024-10-18 21:21:49,912 INFO MainThread:3327679 [wandb_run.py:_console_start():2114] atexit reg +2024-10-18 21:21:49,912 INFO MainThread:3327679 [wandb_run.py:_redirect():1969] redirect: SettingsConsole.WRAP_RAW +2024-10-18 21:21:49,913 INFO MainThread:3327679 [wandb_run.py:_redirect():2034] Wrapping output streams. +2024-10-18 21:21:49,913 INFO MainThread:3327679 [wandb_run.py:_redirect():2059] Redirects installed. +2024-10-18 21:21:49,913 INFO MainThread:3327679 [wandb_init.py:init():831] run started, returning control to user process +2024-10-18 21:21:58,625 WARNING MsgRouterThr:3327679 [router.py:message_loop():77] message_loop has been closed diff --git a/AllinonSAM/wandb/run-20241018_212146-w102ona2/run-w102ona2.wandb b/AllinonSAM/wandb/run-20241018_212146-w102ona2/run-w102ona2.wandb new file mode 100644 index 0000000000000000000000000000000000000000..0296fb2f7389e3847cd85297ade2a2924ff388c8 Binary files /dev/null and b/AllinonSAM/wandb/run-20241018_212146-w102ona2/run-w102ona2.wandb differ