wondervictor commited on
Commit
3f3e9ba
·
verified ·
1 Parent(s): 2ba4ec8

Update mask_adapter/sam_maskadapter.py

Browse files
Files changed (1) hide show
  1. mask_adapter/sam_maskadapter.py +7 -9
mask_adapter/sam_maskadapter.py CHANGED
@@ -147,17 +147,16 @@ class SAMVisualizationDemo(object):
147
 
148
 
149
  with torch.no_grad():
150
- self.clip_model.cuda()
151
- text_features = self.clip_model.encode_text(text.cuda())
152
  text_features /= text_features.norm(dim=-1, keepdim=True)
153
 
154
- features = self.extract_features_convnext(image.cuda().float())
155
 
156
  clip_feature = features['clip_vis_dense']
157
 
158
  clip_vis_dense = self.visual_prediction_forward_convnext_2d(clip_feature)
159
 
160
- semantic_activation_maps = self.mask_adapter(clip_vis_dense, pred_masks.tensor.unsqueeze(0).float().cuda())
161
 
162
  maps_for_pooling = F.interpolate(semantic_activation_maps, size=clip_feature.shape[-2:],
163
  mode='bilinear', align_corners=False)
@@ -188,7 +187,7 @@ class SAMVisualizationDemo(object):
188
  select_mask.extend(locs[0].tolist())
189
  for idx in select_mask:
190
  select_cls[idx] = class_preds[idx]
191
- semseg = torch.einsum("qc,qhw->chw", select_cls.float(), pred_masks.tensor.float().cuda())
192
 
193
  r = semseg
194
  blank_area = (r[0] == 0)
@@ -239,7 +238,7 @@ class SAMPointVisualizationDemo(object):
239
  lvis_classes = [x[x.find(':')+1:] for x in lvis_classes]
240
 
241
  self.class_names = thing_classes + stuff_classes + lvis_classes
242
- self.text_embedding = torch.from_numpy(np.load("./text_embedding/lvis_coco_text_embedding.npy")).to("cuda")
243
 
244
  self.class_names = self._load_class_names()
245
 
@@ -308,17 +307,16 @@ class SAMPointVisualizationDemo(object):
308
  # text = open_clip.tokenize(txts)
309
 
310
  with torch.no_grad():
311
- self.clip_model.cuda()
312
  # text_features = self.clip_model.encode_text(text.cuda())
313
  # text_features /= text_features.norm(dim=-1, keepdim=True)
314
  #np.save("/home/yongkangli/Mask-Adapter/text_embedding/lvis_coco_text_embedding.npy", text_features.cpu().numpy())
315
  text_features = self.text_embedding
316
- features = self.extract_features_convnext(image.cuda().float())
317
  clip_feature = features['clip_vis_dense']
318
 
319
  clip_vis_dense = self.visual_prediction_forward_convnext_2d(clip_feature)
320
 
321
- semantic_activation_maps = self.mask_adapter(clip_vis_dense, pred_masks.tensor.unsqueeze(0).float().cuda())
322
  maps_for_pooling = F.interpolate(semantic_activation_maps, size=clip_feature.shape[-2:], mode='bilinear', align_corners=False)
323
 
324
  B, C = clip_feature.size(0), clip_feature.size(1)
 
147
 
148
 
149
  with torch.no_grad():
150
+ text_features = self.clip_model.encode_text(text)
 
151
  text_features /= text_features.norm(dim=-1, keepdim=True)
152
 
153
+ features = self.extract_features_convnext(image.float())
154
 
155
  clip_feature = features['clip_vis_dense']
156
 
157
  clip_vis_dense = self.visual_prediction_forward_convnext_2d(clip_feature)
158
 
159
+ semantic_activation_maps = self.mask_adapter(clip_vis_dense, pred_masks.tensor.unsqueeze(0).float())
160
 
161
  maps_for_pooling = F.interpolate(semantic_activation_maps, size=clip_feature.shape[-2:],
162
  mode='bilinear', align_corners=False)
 
187
  select_mask.extend(locs[0].tolist())
188
  for idx in select_mask:
189
  select_cls[idx] = class_preds[idx]
190
+ semseg = torch.einsum("qc,qhw->chw", select_cls.float(), pred_masks.tensor.float())
191
 
192
  r = semseg
193
  blank_area = (r[0] == 0)
 
238
  lvis_classes = [x[x.find(':')+1:] for x in lvis_classes]
239
 
240
  self.class_names = thing_classes + stuff_classes + lvis_classes
241
+ self.text_embedding = torch.from_numpy(np.load("./text_embedding/lvis_coco_text_embedding.npy"))
242
 
243
  self.class_names = self._load_class_names()
244
 
 
307
  # text = open_clip.tokenize(txts)
308
 
309
  with torch.no_grad():
 
310
  # text_features = self.clip_model.encode_text(text.cuda())
311
  # text_features /= text_features.norm(dim=-1, keepdim=True)
312
  #np.save("/home/yongkangli/Mask-Adapter/text_embedding/lvis_coco_text_embedding.npy", text_features.cpu().numpy())
313
  text_features = self.text_embedding
314
+ features = self.extract_features_convnext(image.float())
315
  clip_feature = features['clip_vis_dense']
316
 
317
  clip_vis_dense = self.visual_prediction_forward_convnext_2d(clip_feature)
318
 
319
+ semantic_activation_maps = self.mask_adapter(clip_vis_dense, pred_masks.tensor.unsqueeze(0).float()))
320
  maps_for_pooling = F.interpolate(semantic_activation_maps, size=clip_feature.shape[-2:], mode='bilinear', align_corners=False)
321
 
322
  B, C = clip_feature.size(0), clip_feature.size(1)