Spaces:
Running
on
Zero
Running
on
Zero
wondervictor
commited on
Update mask_adapter/sam_maskadapter.py
Browse files- mask_adapter/sam_maskadapter.py +13 -13
mask_adapter/sam_maskadapter.py
CHANGED
@@ -81,8 +81,8 @@ class SAMVisualizationDemo(object):
|
|
81 |
crop_n_points_downscale_factor=2,
|
82 |
min_mask_region_area=100)
|
83 |
|
84 |
-
self.clip_model = clip_model
|
85 |
-
self.mask_adapter = mask_adapter
|
86 |
|
87 |
|
88 |
|
@@ -147,17 +147,17 @@ class SAMVisualizationDemo(object):
|
|
147 |
|
148 |
|
149 |
with torch.no_grad():
|
150 |
-
self.clip_model.
|
151 |
-
text_features = self.clip_model.encode_text(text.
|
152 |
text_features /= text_features.norm(dim=-1, keepdim=True)
|
153 |
|
154 |
-
features = self.extract_features_convnext(image.
|
155 |
|
156 |
clip_feature = features['clip_vis_dense']
|
157 |
|
158 |
clip_vis_dense = self.visual_prediction_forward_convnext_2d(clip_feature)
|
159 |
|
160 |
-
semantic_activation_maps = self.mask_adapter(clip_vis_dense, pred_masks.tensor.unsqueeze(0).float().
|
161 |
|
162 |
maps_for_pooling = F.interpolate(semantic_activation_maps, size=clip_feature.shape[-2:],
|
163 |
mode='bilinear', align_corners=False)
|
@@ -188,7 +188,7 @@ class SAMVisualizationDemo(object):
|
|
188 |
select_mask.extend(locs[0].tolist())
|
189 |
for idx in select_mask:
|
190 |
select_cls[idx] = class_preds[idx]
|
191 |
-
semseg = torch.einsum("qc,qhw->chw", select_cls.float(), pred_masks.tensor.float().
|
192 |
|
193 |
r = semseg
|
194 |
blank_area = (r[0] == 0)
|
@@ -222,9 +222,9 @@ class SAMPointVisualizationDemo(object):
|
|
222 |
|
223 |
self.predictor = SAM2ImagePredictor(sam2)
|
224 |
|
225 |
-
self.clip_model = clip_model
|
226 |
|
227 |
-
self.mask_adapter = mask_adapter
|
228 |
|
229 |
|
230 |
from .data.datasets import openseg_classes
|
@@ -239,7 +239,7 @@ class SAMPointVisualizationDemo(object):
|
|
239 |
lvis_classes = [x[x.find(':')+1:] for x in lvis_classes]
|
240 |
|
241 |
self.class_names = thing_classes + stuff_classes + lvis_classes
|
242 |
-
self.text_embedding = torch.from_numpy(np.load("./text_embedding/lvis_coco_text_embedding.npy")).to("
|
243 |
|
244 |
self.class_names = self._load_class_names()
|
245 |
|
@@ -308,17 +308,17 @@ class SAMPointVisualizationDemo(object):
|
|
308 |
# text = open_clip.tokenize(txts)
|
309 |
|
310 |
with torch.no_grad():
|
311 |
-
self.clip_model.
|
312 |
# text_features = self.clip_model.encode_text(text.cuda())
|
313 |
# text_features /= text_features.norm(dim=-1, keepdim=True)
|
314 |
#np.save("/home/yongkangli/Mask-Adapter/text_embedding/lvis_coco_text_embedding.npy", text_features.cpu().numpy())
|
315 |
text_features = self.text_embedding
|
316 |
-
features = self.extract_features_convnext(image.
|
317 |
clip_feature = features['clip_vis_dense']
|
318 |
|
319 |
clip_vis_dense = self.visual_prediction_forward_convnext_2d(clip_feature)
|
320 |
|
321 |
-
semantic_activation_maps = self.mask_adapter(clip_vis_dense, pred_masks.tensor.unsqueeze(0).float().
|
322 |
maps_for_pooling = F.interpolate(semantic_activation_maps, size=clip_feature.shape[-2:], mode='bilinear', align_corners=False)
|
323 |
|
324 |
B, C = clip_feature.size(0), clip_feature.size(1)
|
|
|
81 |
crop_n_points_downscale_factor=2,
|
82 |
min_mask_region_area=100)
|
83 |
|
84 |
+
self.clip_model = clip_model
|
85 |
+
self.mask_adapter = mask_adapter
|
86 |
|
87 |
|
88 |
|
|
|
147 |
|
148 |
|
149 |
with torch.no_grad():
|
150 |
+
self.clip_model.cuda()
|
151 |
+
text_features = self.clip_model.encode_text(text.cuda())
|
152 |
text_features /= text_features.norm(dim=-1, keepdim=True)
|
153 |
|
154 |
+
features = self.extract_features_convnext(image.cuda().float())
|
155 |
|
156 |
clip_feature = features['clip_vis_dense']
|
157 |
|
158 |
clip_vis_dense = self.visual_prediction_forward_convnext_2d(clip_feature)
|
159 |
|
160 |
+
semantic_activation_maps = self.mask_adapter(clip_vis_dense, pred_masks.tensor.unsqueeze(0).float().cuda())
|
161 |
|
162 |
maps_for_pooling = F.interpolate(semantic_activation_maps, size=clip_feature.shape[-2:],
|
163 |
mode='bilinear', align_corners=False)
|
|
|
188 |
select_mask.extend(locs[0].tolist())
|
189 |
for idx in select_mask:
|
190 |
select_cls[idx] = class_preds[idx]
|
191 |
+
semseg = torch.einsum("qc,qhw->chw", select_cls.float(), pred_masks.tensor.float().cuda())
|
192 |
|
193 |
r = semseg
|
194 |
blank_area = (r[0] == 0)
|
|
|
222 |
|
223 |
self.predictor = SAM2ImagePredictor(sam2)
|
224 |
|
225 |
+
self.clip_model = clip_model
|
226 |
|
227 |
+
self.mask_adapter = mask_adapter
|
228 |
|
229 |
|
230 |
from .data.datasets import openseg_classes
|
|
|
239 |
lvis_classes = [x[x.find(':')+1:] for x in lvis_classes]
|
240 |
|
241 |
self.class_names = thing_classes + stuff_classes + lvis_classes
|
242 |
+
self.text_embedding = torch.from_numpy(np.load("./text_embedding/lvis_coco_text_embedding.npy")).to("cuda")
|
243 |
|
244 |
self.class_names = self._load_class_names()
|
245 |
|
|
|
308 |
# text = open_clip.tokenize(txts)
|
309 |
|
310 |
with torch.no_grad():
|
311 |
+
self.clip_model.cuda()
|
312 |
# text_features = self.clip_model.encode_text(text.cuda())
|
313 |
# text_features /= text_features.norm(dim=-1, keepdim=True)
|
314 |
#np.save("/home/yongkangli/Mask-Adapter/text_embedding/lvis_coco_text_embedding.npy", text_features.cpu().numpy())
|
315 |
text_features = self.text_embedding
|
316 |
+
features = self.extract_features_convnext(image.cuda().float())
|
317 |
clip_feature = features['clip_vis_dense']
|
318 |
|
319 |
clip_vis_dense = self.visual_prediction_forward_convnext_2d(clip_feature)
|
320 |
|
321 |
+
semantic_activation_maps = self.mask_adapter(clip_vis_dense, pred_masks.tensor.unsqueeze(0).float().cuda())
|
322 |
maps_for_pooling = F.interpolate(semantic_activation_maps, size=clip_feature.shape[-2:], mode='bilinear', align_corners=False)
|
323 |
|
324 |
B, C = clip_feature.size(0), clip_feature.size(1)
|