JeffLiang commited on
Commit
7596b4b
·
1 Parent(s): b9e0bde
open_vocab_seg/modeling/clip_adapter/utils.py CHANGED
@@ -63,7 +63,7 @@ def crop_with_mask(
63
  [image.new_full((1, b - t, r - l), fill_value=val) for val in fill]
64
  )
65
  # return image[:, t:b, l:r], mask[None, t:b, l:r]
66
- return image[:, t:b, l:r] * mask[None, t:b, l:r] + (1 - mask[None, t:b, l:r]) * new_image, mask[None, t:b, l:r]
67
 
68
 
69
  def build_clip_model(model: str, mask_prompt_depth: int = 0, frozen: bool = True):
 
63
  [image.new_full((1, b - t, r - l), fill_value=val) for val in fill]
64
  )
65
  # return image[:, t:b, l:r], mask[None, t:b, l:r]
66
+ return image[:, t:b, l:r] * mask[None, t:b, l:r] + (~ mask[None, t:b, l:r]) * new_image, mask[None, t:b, l:r]
67
 
68
 
69
  def build_clip_model(model: str, mask_prompt_depth: int = 0, frozen: bool = True):