Spaces:
Running
on
Zero
Running
on
Zero
Init
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- README.md +7 -0
- app.py +543 -124
- checkpoints/model_ir_se50.pth +3 -0
- eval/grounded_sam/florence2/config.json +85 -0
- eval/grounded_sam/florence2/configuration_florence2.py +340 -0
- eval/grounded_sam/florence2/generation_config.json +4 -0
- eval/grounded_sam/florence2/modeling_florence2.py +0 -0
- eval/grounded_sam/florence2/preprocessor_config.json +39 -0
- eval/grounded_sam/florence2/processing_florence2.py +1147 -0
- eval/grounded_sam/florence2/tokenizer.json +0 -0
- eval/grounded_sam/florence2/tokenizer_config.json +4 -0
- eval/grounded_sam/florence2/vocab.json +0 -0
- eval/grounded_sam/grounded_sam2_florence2_autolabel_pipeline.py +361 -0
- eval/grounded_sam/sam2/__init__.py +11 -0
- eval/grounded_sam/sam2/automatic_mask_generator.py +454 -0
- eval/grounded_sam/sam2/build_sam.py +172 -0
- eval/grounded_sam/sam2/configs/sam2.1/sam2.1_hiera_b+.yaml +116 -0
- eval/grounded_sam/sam2/configs/sam2.1/sam2.1_hiera_l.yaml +120 -0
- eval/grounded_sam/sam2/configs/sam2.1/sam2.1_hiera_s.yaml +119 -0
- eval/grounded_sam/sam2/configs/sam2.1/sam2.1_hiera_t.yaml +121 -0
- eval/grounded_sam/sam2/configs/sam2.1_training/sam2.1_hiera_b+_MOSE_finetune.yaml +339 -0
- eval/grounded_sam/sam2/configs/sam2/sam2_hiera_b+.yaml +113 -0
- eval/grounded_sam/sam2/configs/sam2/sam2_hiera_l.yaml +117 -0
- eval/grounded_sam/sam2/configs/sam2/sam2_hiera_s.yaml +116 -0
- eval/grounded_sam/sam2/configs/sam2/sam2_hiera_t.yaml +118 -0
- eval/grounded_sam/sam2/csrc/connected_components.cu +289 -0
- eval/grounded_sam/sam2/modeling/__init__.py +5 -0
- eval/grounded_sam/sam2/modeling/backbones/__init__.py +5 -0
- eval/grounded_sam/sam2/modeling/backbones/hieradet.py +317 -0
- eval/grounded_sam/sam2/modeling/backbones/image_encoder.py +134 -0
- eval/grounded_sam/sam2/modeling/backbones/utils.py +95 -0
- eval/grounded_sam/sam2/modeling/memory_attention.py +169 -0
- eval/grounded_sam/sam2/modeling/memory_encoder.py +181 -0
- eval/grounded_sam/sam2/modeling/position_encoding.py +221 -0
- eval/grounded_sam/sam2/modeling/sam/__init__.py +5 -0
- eval/grounded_sam/sam2/modeling/sam/mask_decoder.py +295 -0
- eval/grounded_sam/sam2/modeling/sam/prompt_encoder.py +182 -0
- eval/grounded_sam/sam2/modeling/sam/transformer.py +360 -0
- eval/grounded_sam/sam2/modeling/sam2_base.py +908 -0
- eval/grounded_sam/sam2/modeling/sam2_utils.py +323 -0
- eval/grounded_sam/sam2/sam2_hiera_b+.yaml +113 -0
- eval/grounded_sam/sam2/sam2_hiera_l.yaml +117 -0
- eval/grounded_sam/sam2/sam2_hiera_s.yaml +116 -0
- eval/grounded_sam/sam2/sam2_hiera_t.yaml +118 -0
- eval/grounded_sam/sam2/sam2_image_predictor.py +465 -0
- eval/grounded_sam/sam2/sam2_video_predictor.py +1172 -0
- eval/grounded_sam/sam2/utils/__init__.py +5 -0
- eval/grounded_sam/sam2/utils/amg.py +348 -0
- eval/grounded_sam/sam2/utils/misc.py +349 -0
- eval/grounded_sam/sam2/utils/transforms.py +119 -0
README.md
CHANGED
@@ -9,6 +9,13 @@ app_file: app.py
|
|
9 |
pinned: false
|
10 |
license: apache-2.0
|
11 |
short_description: Online demo for XVerse
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
---
|
13 |
|
14 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
9 |
pinned: false
|
10 |
license: apache-2.0
|
11 |
short_description: Online demo for XVerse
|
12 |
+
preload_from_hub:
|
13 |
+
- black-forest-labs/FLUX.1-dev
|
14 |
+
- microsoft/Florence-2-large
|
15 |
+
- openai/clip-vit-large-patch14
|
16 |
+
- facebook/dino-vits16
|
17 |
+
- xingjianleng/mplug_visual-question-answering_coco_large_en
|
18 |
+
- ByteDance/XVerse
|
19 |
---
|
20 |
|
21 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
CHANGED
@@ -1,154 +1,573 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
import numpy as np
|
3 |
-
import
|
4 |
|
5 |
-
|
6 |
-
|
7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
-
|
10 |
-
model_repo_id = "stabilityai/sdxl-turbo" # Replace to the model you would like to use
|
11 |
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
torch_dtype = torch.float32
|
16 |
|
17 |
-
|
18 |
-
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
-
|
21 |
-
|
|
|
22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
):
|
36 |
-
|
37 |
-
|
38 |
|
39 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
78 |
)
|
|
|
79 |
|
80 |
-
run_button = gr.Button("Run", scale=0, variant="primary")
|
81 |
|
82 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
91 |
|
92 |
-
|
93 |
-
|
94 |
-
minimum=0,
|
95 |
-
maximum=MAX_SEED,
|
96 |
-
step=1,
|
97 |
-
value=0,
|
98 |
-
)
|
99 |
|
100 |
-
|
101 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
102 |
with gr.Row():
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
|
|
109 |
)
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
maximum=MAX_IMAGE_SIZE,
|
115 |
-
step=32,
|
116 |
-
value=1024, # Replace with defaults that work for your model
|
117 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
118 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
119 |
with gr.Row():
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
127 |
|
128 |
-
|
129 |
-
label="
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
|
|
135 |
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
140 |
inputs=[
|
141 |
-
prompt,
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
|
|
|
|
|
|
|
|
|
|
149 |
],
|
150 |
-
outputs=
|
|
|
|
|
151 |
)
|
152 |
|
153 |
-
|
154 |
-
|
|
|
1 |
+
# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
|
2 |
+
# Copyright 2024 Black Forest Labs and The HuggingFace Team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
import tempfile
|
17 |
+
from PIL import Image
|
18 |
+
import subprocess
|
19 |
+
|
20 |
+
import torch
|
21 |
import gradio as gr
|
22 |
+
import string
|
23 |
+
import random, time, os, math
|
24 |
+
|
25 |
+
from src.flux.generate import generate_from_test_sample, seed_everything
|
26 |
+
from src.flux.pipeline_tools import CustomFluxPipeline, load_modulation_adapter, load_dit_lora
|
27 |
+
from src.utils.data_utils import get_train_config, image_grid, pil2tensor, json_dump, pad_to_square, cv2pil, merge_bboxes
|
28 |
+
from eval.tools.face_id import FaceID
|
29 |
+
from eval.tools.florence_sam import ObjectDetector
|
30 |
+
import shutil
|
31 |
+
import yaml
|
32 |
import numpy as np
|
33 |
+
from huggingface_hub import hf_hub_download
|
34 |
|
35 |
+
dtype = torch.bfloat16
|
36 |
+
device = "cuda"
|
37 |
+
|
38 |
+
hf_hub_download(
|
39 |
+
repo_id="facebook/sam2.1-hiera-large",
|
40 |
+
local_dir="./checkpoints/",
|
41 |
+
filename="sam2.1_hiera_large.pt",
|
42 |
+
)
|
43 |
+
|
44 |
+
os.environ["SAM2_MODEL_PATH"] = "./checkpoints/sam2.1_hiera_large.pt"
|
45 |
+
|
46 |
+
config_path = "train/config/XVerse_config_demo.yaml"
|
47 |
+
|
48 |
+
config = config_train = get_train_config(config_path)
|
49 |
+
config["model"]["dit_quant"] = "int8-quanto"
|
50 |
+
config["model"]["use_dit_lora"] = False
|
51 |
+
model = CustomFluxPipeline(
|
52 |
+
config, device, torch_dtype=dtype,
|
53 |
+
)
|
54 |
+
model.pipe.set_progress_bar_config(leave=False)
|
55 |
+
|
56 |
+
face_model = FaceID(device)
|
57 |
+
detector = ObjectDetector(device)
|
58 |
+
config = get_train_config(config_path)
|
59 |
+
model.config = config
|
60 |
+
store_attn_map = False
|
61 |
+
|
62 |
+
ckpt_root = "~/.cache/huggingface/hub/XVerse"
|
63 |
+
modulation_adapter = load_modulation_adapter(model, config, dtype, device, f"{ckpt_root}/modulation_adapter", is_training=False)
|
64 |
+
model.add_modulation_adapter(modulation_adapter)
|
65 |
+
if config["model"]["use_dit_lora"]:
|
66 |
+
load_dit_lora(model, model.pipe, config, dtype, device, f"{ckpt_root}", is_training=False)
|
67 |
|
68 |
+
num_inputs = 6
|
|
|
69 |
|
70 |
+
# 定义清空图像的函数,只返回四个 None
|
71 |
+
def clear_images():
|
72 |
+
return [None, ]*num_inputs
|
|
|
73 |
|
74 |
+
def det_seg_img(image, label):
|
75 |
+
if isinstance(image, str):
|
76 |
+
image = Image.open(image).convert("RGB")
|
77 |
+
instance_result_dict = detector.get_multiple_instances(image, label, min_size=image.size[0]//20)
|
78 |
+
indices = list(range(len(instance_result_dict["instance_images"])))
|
79 |
+
ins, bbox = merge_instances(image, indices, instance_result_dict["instance_bboxes"], instance_result_dict["instance_images"])
|
80 |
+
return ins
|
81 |
|
82 |
+
def crop_face_img(image):
|
83 |
+
if isinstance(image, str):
|
84 |
+
image = Image.open(image).convert("RGB")
|
85 |
|
86 |
+
# image = resize_keep_aspect_ratio(image, 1024)
|
87 |
+
image = pad_to_square(image).resize((2048, 2048))
|
88 |
+
|
89 |
+
face_bbox = face_model.detect(
|
90 |
+
(pil2tensor(image).unsqueeze(0) * 255).to(torch.uint8).to(device), 1.4
|
91 |
+
)[0]
|
92 |
+
face = image.crop(face_bbox)
|
93 |
+
return face
|
94 |
|
95 |
+
def vlm_img_caption(image):
|
96 |
+
if isinstance(image, str):
|
97 |
+
image = Image.open(image).convert("RGB")
|
98 |
+
|
99 |
+
try:
|
100 |
+
caption = detector.detector.caption(image, "<CAPTION>").strip()
|
101 |
+
if caption.endswith("."):
|
102 |
+
caption = caption[:-1]
|
103 |
+
|
104 |
+
except Exception as e:
|
105 |
+
print(e)
|
106 |
+
caption = ""
|
107 |
+
|
108 |
+
caption = caption.lower()
|
109 |
+
return caption
|
110 |
+
|
111 |
+
|
112 |
+
def generate_random_string(length=4):
|
113 |
+
letters = string.ascii_letters # 包含大小写字母的字符串
|
114 |
+
result_str = ''.join(random.choice(letters) for i in range(length))
|
115 |
+
return result_str
|
116 |
+
|
117 |
+
def resize_keep_aspect_ratio(pil_image, target_size=1024):
|
118 |
+
H, W = pil_image.height, pil_image.width
|
119 |
+
target_area = target_size * target_size
|
120 |
+
current_area = H * W
|
121 |
+
scaling_factor = (target_area / current_area) ** 0.5 # sqrt(target_area / current_area)
|
122 |
+
new_H = int(round(H * scaling_factor))
|
123 |
+
new_W = int(round(W * scaling_factor))
|
124 |
+
return pil_image.resize((new_W, new_H))
|
125 |
+
|
126 |
+
# 使用循环生成六个图像输入
|
127 |
+
images = []
|
128 |
+
captions = []
|
129 |
+
face_btns = []
|
130 |
+
det_btns = []
|
131 |
+
vlm_btns = []
|
132 |
+
accordions = []
|
133 |
+
idip_checkboxes = []
|
134 |
+
accordion_states = []
|
135 |
+
|
136 |
+
def open_accordion_on_example_selection(*args):
|
137 |
+
print("enter open_accordion_on_example_selection")
|
138 |
+
images = list(args[-18:-12])
|
139 |
+
outputs = []
|
140 |
+
for i, img in enumerate(images):
|
141 |
+
if img is not None:
|
142 |
+
print(f"open accordions {i}")
|
143 |
+
outputs.append(True)
|
144 |
+
else:
|
145 |
+
print(f"close accordions {i}")
|
146 |
+
outputs.append(False)
|
147 |
+
print(outputs)
|
148 |
+
return outputs
|
149 |
+
|
150 |
+
@spaces.GPU
|
151 |
+
def generate_image(
|
152 |
+
prompt,
|
153 |
+
cond_size, target_height, target_width,
|
154 |
+
seed,
|
155 |
+
vae_skip_iter, control_weight_lambda,
|
156 |
+
double_attention, # 新增参数
|
157 |
+
single_attention, # 新增参数
|
158 |
+
latent_dblora_scale_str,
|
159 |
+
latent_sblora_scale_str, vae_lora_scale,
|
160 |
+
indexs, # 新增参数
|
161 |
+
*images_captions_faces, # Combine all unpacked arguments into one tuple
|
162 |
):
|
163 |
+
torch.cuda.empty_cache()
|
164 |
+
num_images = 4
|
165 |
|
166 |
+
# Determine the number of images, captions, and faces based on the indexs length
|
167 |
+
images = list(images_captions_faces[:num_inputs])
|
168 |
+
captions = list(images_captions_faces[num_inputs:2 * num_inputs])
|
169 |
+
idips_checkboxes = list(images_captions_faces[2 * num_inputs:3 * num_inputs])
|
170 |
+
images = [images[i] for i in indexs]
|
171 |
+
captions = [captions[i] for i in indexs]
|
172 |
+
idips_checkboxes = [idips_checkboxes[i] for i in indexs]
|
173 |
|
174 |
+
print(f"Length of images: {len(images)}")
|
175 |
+
print(f"Length of captions: {len(captions)}")
|
176 |
+
print(f"Indexs: {indexs}")
|
177 |
+
|
178 |
+
print(f"Control weight lambda: {control_weight_lambda}")
|
179 |
+
if control_weight_lambda != "no":
|
180 |
+
parts = control_weight_lambda.split(',')
|
181 |
+
new_parts = []
|
182 |
+
for part in parts:
|
183 |
+
if ':' in part:
|
184 |
+
left, right = part.split(':')
|
185 |
+
values = right.split('/')
|
186 |
+
# 保存整体值
|
187 |
+
global_value = values[0]
|
188 |
+
id_value = values[1]
|
189 |
+
ip_value = values[2]
|
190 |
+
new_values = [global_value]
|
191 |
+
for is_id in idips_checkboxes:
|
192 |
+
if is_id:
|
193 |
+
new_values.append(id_value)
|
194 |
+
else:
|
195 |
+
new_values.append(ip_value)
|
196 |
+
new_part = f"{left}:{('/'.join(new_values))}"
|
197 |
+
new_parts.append(new_part)
|
198 |
+
else:
|
199 |
+
new_parts.append(part)
|
200 |
+
control_weight_lambda = ','.join(new_parts)
|
201 |
+
|
202 |
+
print(f"Control weight lambda: {control_weight_lambda}")
|
203 |
+
|
204 |
+
src_inputs = []
|
205 |
+
use_words = []
|
206 |
+
cur_run_time = time.strftime("%m%d-%H%M%S")
|
207 |
+
tmp_dir_root = f"tmp/gradio_demo/{run_name}"
|
208 |
+
temp_dir = f"{tmp_dir_root}/{cur_run_time}_{generate_random_string(4)}"
|
209 |
+
os.makedirs(temp_dir, exist_ok=True)
|
210 |
+
print(f"Temporary directory created: {temp_dir}")
|
211 |
+
for i, (image_path, caption) in enumerate(zip(images, captions)):
|
212 |
+
if image_path:
|
213 |
+
if caption.startswith("a ") or caption.startswith("A "):
|
214 |
+
word = caption[2:]
|
215 |
+
else:
|
216 |
+
word = caption
|
217 |
+
|
218 |
+
if f"ENT{i+1}" in prompt:
|
219 |
+
prompt = prompt.replace(f"ENT{i+1}", caption)
|
220 |
+
|
221 |
+
image = resize_keep_aspect_ratio(Image.open(image_path), 768)
|
222 |
+
save_path = f"{temp_dir}/tmp_resized_input_{i}.png"
|
223 |
+
image.save(save_path)
|
224 |
+
|
225 |
+
input_image_path = save_path
|
226 |
+
|
227 |
+
src_inputs.append(
|
228 |
+
{
|
229 |
+
"image_path": input_image_path,
|
230 |
+
"caption": caption
|
231 |
+
}
|
232 |
)
|
233 |
+
use_words.append((i, word, word))
|
234 |
|
|
|
235 |
|
236 |
+
test_sample = dict(
|
237 |
+
input_images=[], position_delta=[0, -32],
|
238 |
+
prompt=prompt,
|
239 |
+
target_height=target_height,
|
240 |
+
target_width=target_width,
|
241 |
+
seed=seed,
|
242 |
+
cond_size=cond_size,
|
243 |
+
vae_skip_iter=vae_skip_iter,
|
244 |
+
lora_scale=latent_dblora_scale_str,
|
245 |
+
control_weight_lambda=control_weight_lambda,
|
246 |
+
latent_sblora_scale=latent_sblora_scale_str,
|
247 |
+
condition_sblora_scale=vae_lora_scale,
|
248 |
+
double_attention=double_attention,
|
249 |
+
single_attention=single_attention,
|
250 |
+
)
|
251 |
+
if len(src_inputs) > 0:
|
252 |
+
test_sample["modulation"] = [
|
253 |
+
dict(
|
254 |
+
type="adapter",
|
255 |
+
src_inputs=src_inputs,
|
256 |
+
use_words=use_words,
|
257 |
+
),
|
258 |
+
]
|
259 |
+
|
260 |
+
json_dump(test_sample, f"{temp_dir}/test_sample.json", 'utf-8')
|
261 |
+
assert single_attention == True
|
262 |
+
target_size = int(round((target_width * target_height) ** 0.5) // 16 * 16)
|
263 |
+
print(test_sample)
|
264 |
|
265 |
+
model.config["train"]["dataset"]["val_condition_size"] = cond_size
|
266 |
+
model.config["train"]["dataset"]["val_target_size"] = target_size
|
267 |
+
|
268 |
+
if control_weight_lambda == "no":
|
269 |
+
control_weight_lambda = None
|
270 |
+
if vae_skip_iter == "no":
|
271 |
+
vae_skip_iter = None
|
272 |
+
use_condition_sblora_control = True
|
273 |
+
use_latent_sblora_control = True
|
274 |
+
image = generate_from_test_sample(
|
275 |
+
test_sample, model.pipe, model.config,
|
276 |
+
num_images=num_images,
|
277 |
+
target_height=target_height,
|
278 |
+
target_width=target_width,
|
279 |
+
seed=seed,
|
280 |
+
store_attn_map=store_attn_map,
|
281 |
+
vae_skip_iter=vae_skip_iter, # 使用新的参数
|
282 |
+
control_weight_lambda=control_weight_lambda, # 传递新的参数
|
283 |
+
double_attention=double_attention, # 新增参数
|
284 |
+
single_attention=single_attention, # 新增参数
|
285 |
+
ip_scale=latent_dblora_scale_str,
|
286 |
+
use_latent_sblora_control=use_latent_sblora_control,
|
287 |
+
latent_sblora_scale=latent_sblora_scale_str,
|
288 |
+
use_condition_sblora_control=use_condition_sblora_control,
|
289 |
+
condition_sblora_scale=vae_lora_scale,
|
290 |
+
)
|
291 |
+
if isinstance(image, list):
|
292 |
+
num_cols = 2
|
293 |
+
num_rows = int(math.ceil(num_images / num_cols))
|
294 |
+
image = image_grid(image, num_rows, num_cols)
|
295 |
|
296 |
+
save_path = f"{temp_dir}/tmp_result.png"
|
297 |
+
image.save(save_path)
|
|
|
|
|
|
|
|
|
|
|
298 |
|
299 |
+
return image
|
300 |
|
301 |
+
def create_image_input(index, open=True, indexs_state=None):
|
302 |
+
accordion_state = gr.State(open)
|
303 |
+
with gr.Column():
|
304 |
+
with gr.Accordion(f"Input Image {index + 1}", open=accordion_state.value) as accordion:
|
305 |
+
image = gr.Image(type="filepath", label=f"Image {index + 1}")
|
306 |
+
caption = gr.Textbox(label=f"Caption {index + 1}", value="")
|
307 |
+
id_ip_checkbox = gr.Checkbox(value=False, label=f"ID or not {index + 1}", visible=True)
|
308 |
with gr.Row():
|
309 |
+
vlm_btn = gr.Button("Auto Caption")
|
310 |
+
det_btn = gr.Button("Det & Seg")
|
311 |
+
face_btn = gr.Button("Crop Face")
|
312 |
+
accordion.expand(
|
313 |
+
inputs=[indexs_state],
|
314 |
+
fn = lambda x: update_inputs(True, index, x),
|
315 |
+
outputs=[indexs_state, accordion_state],
|
316 |
)
|
317 |
+
accordion.collapse(
|
318 |
+
inputs=[indexs_state],
|
319 |
+
fn = lambda x: update_inputs(False, index, x),
|
320 |
+
outputs=[indexs_state, accordion_state],
|
|
|
|
|
|
|
321 |
)
|
322 |
+
return image, caption, face_btn, det_btn, vlm_btn, accordion_state, accordion, id_ip_checkbox
|
323 |
+
|
324 |
+
|
325 |
+
def merge_instances(orig_img, indices, ins_bboxes, ins_images):
|
326 |
+
orig_image_width, orig_image_height = orig_img.width, orig_img.height
|
327 |
+
final_img = Image.new("RGB", (orig_image_width, orig_image_height), color=(255, 255, 255))
|
328 |
+
bboxes = []
|
329 |
+
for i in indices:
|
330 |
+
bbox = np.array(ins_bboxes[i], dtype=int).tolist()
|
331 |
+
bboxes.append(bbox)
|
332 |
+
|
333 |
+
img = cv2pil(ins_images[i])
|
334 |
+
mask = (np.array(img)[..., :3] != 255).any(axis=-1)
|
335 |
+
mask = Image.fromarray(mask.astype(np.uint8) * 255, mode='L')
|
336 |
+
final_img.paste(img, (bbox[0], bbox[1]), mask)
|
337 |
+
|
338 |
+
bbox = merge_bboxes(bboxes)
|
339 |
+
img = final_img.crop(bbox)
|
340 |
+
return img, bbox
|
341 |
+
|
342 |
+
|
343 |
+
def change_accordion(at: bool, index: int, state: list):
|
344 |
+
print(at, state)
|
345 |
+
indexs = state
|
346 |
+
if at:
|
347 |
+
if index not in indexs:
|
348 |
+
indexs.append(index)
|
349 |
+
else:
|
350 |
+
if index in indexs:
|
351 |
+
indexs.remove(index)
|
352 |
+
|
353 |
+
# 确保 indexs 是有序的
|
354 |
+
indexs.sort()
|
355 |
+
print(indexs)
|
356 |
+
return gr.Accordion(open=at), indexs
|
357 |
|
358 |
+
def update_inputs(is_open, index, state: list):
|
359 |
+
indexs = state
|
360 |
+
if is_open:
|
361 |
+
if index not in indexs:
|
362 |
+
indexs.append(index)
|
363 |
+
else:
|
364 |
+
if index in indexs:
|
365 |
+
indexs.remove(index)
|
366 |
+
|
367 |
+
# 确保 indexs 是有序的
|
368 |
+
indexs.sort()
|
369 |
+
print(indexs)
|
370 |
+
return indexs, is_open
|
371 |
+
|
372 |
+
with gr.Blocks() as demo:
|
373 |
+
|
374 |
+
indexs_state = gr.State([0, 1]) # 添加状态来存储 indexs
|
375 |
+
|
376 |
+
gr.Markdown("### XVerse Demo")
|
377 |
+
with gr.Row():
|
378 |
+
with gr.Column():
|
379 |
+
prompt = gr.Textbox(label="Prompt", value="")
|
380 |
+
|
381 |
+
clear_btn = gr.Button("清空输入图像")
|
382 |
with gr.Row():
|
383 |
+
for i in range(num_inputs):
|
384 |
+
image, caption, face_btn, det_btn, vlm_btn, accordion_state, accordion, id_ip_checkbox = create_image_input(i, open=i<2, indexs_state=indexs_state)
|
385 |
+
images.append(image)
|
386 |
+
idip_checkboxes.append(id_ip_checkbox)
|
387 |
+
captions.append(caption)
|
388 |
+
face_btns.append(face_btn)
|
389 |
+
det_btns.append(det_btn)
|
390 |
+
vlm_btns.append(vlm_btn)
|
391 |
+
accordion_states.append(accordion_state)
|
392 |
+
|
393 |
+
accordions.append(accordion)
|
394 |
+
|
395 |
+
# 将其他设置参数压缩到 Advanced Accordion 内
|
396 |
+
with gr.Accordion("Advanced", open=False):
|
397 |
+
# 使用 Row 和 Column 来布局四个图像和描述
|
398 |
+
with gr.Row():
|
399 |
+
target_height = gr.Slider(512, 1024, step=128, value=768, label="Generated Height", info="")
|
400 |
+
target_width = gr.Slider(512, 1024, step=128, value=768, label="Generated Width", info="")
|
401 |
+
cond_size = gr.Slider(256, 384, step=128, value=256, label="Condition Size", info="")
|
402 |
+
with gr.Row():
|
403 |
+
# 修改 weight_id_ip_str 为两个 Slider
|
404 |
+
weight_id = gr.Slider(0.1, 5, step=0.1, value=3, label="weight_id")
|
405 |
+
weight_ip = gr.Slider(0.1, 5, step=0.1, value=5, label="weight_ip")
|
406 |
+
with gr.Row():
|
407 |
+
# 修改 ip_scale_str 为 Slider,并添加 Textbox 显示转换后的格式
|
408 |
+
ip_scale_str = gr.Slider(0.5, 1.5, step=0.01, value=0.85, label="latent_lora_scale")
|
409 |
+
vae_lora_scale = gr.Slider(0.5, 1.5, step=0.01, value=1.3, label="vae_lora_scale")
|
410 |
+
with gr.Row():
|
411 |
+
# 修改 vae_skip_iter 为两个 Slider
|
412 |
+
vae_skip_iter_s1 = gr.Slider(0, 1, step=0.01, value=0.05, label="vae_skip_iter_before")
|
413 |
+
vae_skip_iter_s2 = gr.Slider(0, 1, step=0.01, value=0.8, label="vae_skip_iter_after")
|
414 |
+
|
415 |
+
with gr.Row():
|
416 |
+
weight_id_ip_str = gr.Textbox(
|
417 |
+
value="0-1:1/3/5",
|
418 |
+
label="weight_id_ip_str",
|
419 |
+
interactive=False, visible=False
|
420 |
+
)
|
421 |
+
weight_id.change(
|
422 |
+
lambda s1, s2: f"0-1:1/{s1}/{s2}",
|
423 |
+
inputs=[weight_id, weight_ip],
|
424 |
+
outputs=weight_id_ip_str
|
425 |
+
)
|
426 |
+
weight_ip.change(
|
427 |
+
lambda s1, s2: f"0-1:1/{s1}/{s2}",
|
428 |
+
inputs=[weight_id, weight_ip],
|
429 |
+
outputs=weight_id_ip_str
|
430 |
+
)
|
431 |
+
vae_skip_iter = gr.Textbox(
|
432 |
+
value="0-0.05:1,0.8-1:1",
|
433 |
+
label="vae_skip_iter",
|
434 |
+
interactive=False, visible=False
|
435 |
+
)
|
436 |
+
vae_skip_iter_s1.change(
|
437 |
+
lambda s1, s2: f"0-{s1}:1,{s2}-1:1",
|
438 |
+
inputs=[vae_skip_iter_s1, vae_skip_iter_s2],
|
439 |
+
outputs=vae_skip_iter
|
440 |
+
)
|
441 |
+
vae_skip_iter_s2.change(
|
442 |
+
lambda s1, s2: f"0-{s1}:1,{s2}-1:1",
|
443 |
+
inputs=[vae_skip_iter_s1, vae_skip_iter_s2],
|
444 |
+
outputs=vae_skip_iter
|
445 |
+
)
|
446 |
+
|
447 |
+
with gr.Row():
|
448 |
+
db_latent_lora_scale_str = gr.Textbox(
|
449 |
+
value="0-1:0.85",
|
450 |
+
label="db_latent_lora_scale_str",
|
451 |
+
interactive=False, visible=False
|
452 |
+
)
|
453 |
+
sb_latent_lora_scale_str = gr.Textbox(
|
454 |
+
value="0-1:0.85",
|
455 |
+
label="sb_latent_lora_scale_str",
|
456 |
+
interactive=False, visible=False
|
457 |
+
)
|
458 |
+
vae_lora_scale_str = gr.Textbox(
|
459 |
+
value="0-1:1.3",
|
460 |
+
label="vae_lora_scale_str",
|
461 |
+
interactive=False, visible=False
|
462 |
+
)
|
463 |
+
vae_lora_scale.change(
|
464 |
+
lambda s: f"0-1:{s}",
|
465 |
+
inputs=vae_lora_scale,
|
466 |
+
outputs=vae_lora_scale_str
|
467 |
+
)
|
468 |
+
ip_scale_str.change(
|
469 |
+
lambda s: [f"0-1:{s}", f"0-1:{s}"],
|
470 |
+
inputs=ip_scale_str,
|
471 |
+
outputs=[db_latent_lora_scale_str, sb_latent_lora_scale_str]
|
472 |
+
)
|
473 |
|
474 |
+
with gr.Row():
|
475 |
+
double_attention = gr.Checkbox(value=False, label="Double Attention", visible=False)
|
476 |
+
single_attention = gr.Checkbox(value=True, label="Single Attention", visible=False)
|
477 |
+
|
478 |
+
with gr.Column():
|
479 |
+
output = gr.Image(label="生成的图像")
|
480 |
+
seed = gr.Number(value=42, label="Seed", info="")
|
481 |
+
gen_btn = gr.Button("生成图像")
|
482 |
|
483 |
+
gr.Markdown("### Examples")
|
484 |
+
gen_btn.click(
|
485 |
+
generate_image,
|
486 |
+
inputs=[
|
487 |
+
prompt, cond_size, target_height, target_width, seed,
|
488 |
+
vae_skip_iter, weight_id_ip_str,
|
489 |
+
double_attention, single_attention,
|
490 |
+
db_latent_lora_scale_str, sb_latent_lora_scale_str, vae_lora_scale_str,
|
491 |
+
indexs_state, # 传递 indexs 状态
|
492 |
+
*images,
|
493 |
+
*captions,
|
494 |
+
*idip_checkboxes,
|
495 |
+
],
|
496 |
+
outputs=output
|
497 |
+
)
|
498 |
+
|
499 |
+
# 修改清空函数的输出参数
|
500 |
+
clear_btn.click(clear_images, outputs=images)
|
501 |
+
|
502 |
+
# 循环绑定 Det & Seg 和 Auto Caption 按钮的点击事件
|
503 |
+
for i in range(num_inputs):
|
504 |
+
face_btns[i].click(crop_face_img, inputs=[images[i]], outputs=[images[i]])
|
505 |
+
det_btns[i].click(det_seg_img, inputs=[images[i], captions[i]], outputs=[images[i]])
|
506 |
+
vlm_btns[i].click(vlm_img_caption, inputs=[images[i]], outputs=[captions[i]])
|
507 |
+
accordion_states[i].change(fn=lambda x, state, index=i: change_accordion(x, index, state), inputs=[accordion_states[i], indexs_state], outputs=[accordions[i], indexs_state])
|
508 |
+
|
509 |
+
examples = gr.Examples(
|
510 |
+
examples=[
|
511 |
+
[
|
512 |
+
"ENT1 wearing a tiny hat",
|
513 |
+
42, 256, 768, 768,
|
514 |
+
3, 5,
|
515 |
+
0.85, 1.3,
|
516 |
+
0.05, 0.8,
|
517 |
+
"sample/hamster.jpg", None, None, None, None, None,
|
518 |
+
"a hamster", None, None, None, None, None,
|
519 |
+
False, False, False, False, False, False
|
520 |
+
],
|
521 |
+
[
|
522 |
+
"ENT1 in a red dress is smiling",
|
523 |
+
42, 256, 768, 768,
|
524 |
+
3, 5,
|
525 |
+
0.85, 1.3,
|
526 |
+
0.05, 0.8,
|
527 |
+
"sample/woman.jpg", None, None, None, None, None,
|
528 |
+
"a woman", None, None, None, None, None,
|
529 |
+
True, False, False, False, False, False
|
530 |
+
],
|
531 |
+
[
|
532 |
+
"ENT1 and ENT2 standing together in a park.",
|
533 |
+
42, 256, 768, 768,
|
534 |
+
2, 5,
|
535 |
+
0.85, 1.3,
|
536 |
+
0.05, 0.8,
|
537 |
+
"sample/woman.jpg", "sample/girl.jpg", None, None, None, None,
|
538 |
+
"a woman", "a girl", None, None, None, None,
|
539 |
+
True, True, False, False, False, False
|
540 |
+
],
|
541 |
+
[
|
542 |
+
"ENT1, ENT2, and ENT3 standing together in a park.",
|
543 |
+
42, 256, 768, 768,
|
544 |
+
2.5, 5,
|
545 |
+
0.8, 1.2,
|
546 |
+
0.05, 0.8,
|
547 |
+
"sample/woman.jpg", "sample/girl.jpg", "sample/old_man.jpg", None, None, None,
|
548 |
+
"a woman", "a girl", "an old man", None, None, None,
|
549 |
+
True, True, True, False, False, False
|
550 |
+
],
|
551 |
+
],
|
552 |
inputs=[
|
553 |
+
prompt, seed,
|
554 |
+
cond_size,
|
555 |
+
target_height,
|
556 |
+
target_width,
|
557 |
+
weight_id,
|
558 |
+
weight_ip,
|
559 |
+
ip_scale_str,
|
560 |
+
vae_lora_scale,
|
561 |
+
vae_skip_iter_s1,
|
562 |
+
vae_skip_iter_s2,
|
563 |
+
*images,
|
564 |
+
*captions,
|
565 |
+
*idip_checkboxes
|
566 |
],
|
567 |
+
outputs=accordion_states,
|
568 |
+
fn=open_accordion_on_example_selection,
|
569 |
+
run_on_click=True
|
570 |
)
|
571 |
|
572 |
+
port = int(os.environ.get("ARNOLD_WORKER_0_PORT", "-1").split(",")[3])
|
573 |
+
demo.queue().launch(share=True, inbrowser=True, server_name="0.0.0.0", server_port=port)
|
checkpoints/model_ir_se50.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a035c768259b98ab1ce0e646312f48b9e1e218197a0f80ac6765e88f8b6ddf28
|
3 |
+
size 175367323
|
eval/grounded_sam/florence2/config.json
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "florence2",
|
3 |
+
"architectures": [
|
4 |
+
"Florence2ForConditionalGeneration"
|
5 |
+
],
|
6 |
+
"auto_map": {
|
7 |
+
"AutoConfig": "configuration_florence2.Florence2Config",
|
8 |
+
"AutoModelForCausalLM": "modeling_florence2.Florence2ForConditionalGeneration"
|
9 |
+
},
|
10 |
+
"bos_token_id": 0,
|
11 |
+
"eos_token_id": 2,
|
12 |
+
"ignore_index": -100,
|
13 |
+
"model_type": "florence2",
|
14 |
+
"pad_token_id": 1,
|
15 |
+
"projection_dim": 1024,
|
16 |
+
"text_config": {
|
17 |
+
"vocab_size": 51289,
|
18 |
+
"activation_dropout": 0.1,
|
19 |
+
"activation_function": "gelu",
|
20 |
+
"add_bias_logits": false,
|
21 |
+
"add_final_layer_norm": false,
|
22 |
+
"attention_dropout": 0.1,
|
23 |
+
"bos_token_id": 0,
|
24 |
+
"classif_dropout": 0.1,
|
25 |
+
"classifier_dropout": 0.0,
|
26 |
+
"d_model": 1024,
|
27 |
+
"decoder_attention_heads": 16,
|
28 |
+
"decoder_ffn_dim": 4096,
|
29 |
+
"decoder_layerdrop": 0.0,
|
30 |
+
"decoder_layers": 12,
|
31 |
+
"decoder_start_token_id": 2,
|
32 |
+
"dropout": 0.1,
|
33 |
+
"early_stopping": true,
|
34 |
+
"encoder_attention_heads": 16,
|
35 |
+
"encoder_ffn_dim": 4096,
|
36 |
+
"encoder_layerdrop": 0.0,
|
37 |
+
"encoder_layers": 12,
|
38 |
+
"eos_token_id": 2,
|
39 |
+
"forced_eos_token_id": 2,
|
40 |
+
"forced_bos_token_id": 0,
|
41 |
+
"gradient_checkpointing": false,
|
42 |
+
"init_std": 0.02,
|
43 |
+
"is_encoder_decoder": true,
|
44 |
+
"label2id": {
|
45 |
+
"LABEL_0": 0,
|
46 |
+
"LABEL_1": 1,
|
47 |
+
"LABEL_2": 2
|
48 |
+
},
|
49 |
+
"max_position_embeddings": 1024,
|
50 |
+
"no_repeat_ngram_size": 3,
|
51 |
+
"normalize_before": false,
|
52 |
+
"num_hidden_layers": 12,
|
53 |
+
"pad_token_id": 1,
|
54 |
+
"scale_embedding": false,
|
55 |
+
"num_beams": 3
|
56 |
+
},
|
57 |
+
"vision_config": {
|
58 |
+
"model_type": "davit",
|
59 |
+
"drop_path_rate": 0.1,
|
60 |
+
"patch_size": [7, 3, 3, 3],
|
61 |
+
"patch_stride": [4, 2, 2, 2],
|
62 |
+
"patch_padding": [3, 1, 1, 1],
|
63 |
+
"patch_prenorm": [false, true, true, true],
|
64 |
+
"enable_checkpoint": false,
|
65 |
+
"dim_embed": [256, 512, 1024, 2048],
|
66 |
+
"num_heads": [8, 16, 32, 64],
|
67 |
+
"num_groups": [8, 16, 32, 64],
|
68 |
+
"depths": [1, 1, 9, 1],
|
69 |
+
"window_size": 12,
|
70 |
+
"projection_dim": 1024,
|
71 |
+
"visual_temporal_embedding": {
|
72 |
+
"type": "COSINE",
|
73 |
+
"max_temporal_embeddings": 100
|
74 |
+
},
|
75 |
+
"image_pos_embed": {
|
76 |
+
"type": "learned_abs_2d",
|
77 |
+
"max_pos_embeddings": 50
|
78 |
+
},
|
79 |
+
"image_feature_source": ["spatial_avg_pool", "temporal_avg_pool"]
|
80 |
+
},
|
81 |
+
"vocab_size": 51289,
|
82 |
+
"torch_dtype": "float16",
|
83 |
+
"transformers_version": "4.41.0.dev0",
|
84 |
+
"is_encoder_decoder": true
|
85 |
+
}
|
eval/grounded_sam/florence2/configuration_florence2.py
ADDED
@@ -0,0 +1,340 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved.
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
import warnings
|
15 |
+
""" Florence-2 configuration"""
|
16 |
+
|
17 |
+
from typing import Optional
|
18 |
+
|
19 |
+
from transformers import AutoConfig
|
20 |
+
from transformers.configuration_utils import PretrainedConfig
|
21 |
+
from transformers.utils import logging
|
22 |
+
|
23 |
+
logger = logging.get_logger(__name__)
|
24 |
+
|
25 |
+
class Florence2VisionConfig(PretrainedConfig):
|
26 |
+
r"""
|
27 |
+
This is the configuration class to store the configuration of a [`Florence2VisionModel`]. It is used to instantiate a Florence2VisionModel
|
28 |
+
according to the specified arguments, defining the model architecture. Instantiating a configuration with the
|
29 |
+
defaults will yield a similar configuration to that of the Florence2VisionModel architecture.
|
30 |
+
|
31 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
32 |
+
documentation from [`PretrainedConfig`] for more information.
|
33 |
+
|
34 |
+
Args:
|
35 |
+
drop_path_rate (`float`, *optional*, defaults to 0.1):
|
36 |
+
The dropout rate of the drop path layer.
|
37 |
+
patch_size (`List[int]`, *optional*, defaults to [7, 3, 3, 3]):
|
38 |
+
The patch size of the image.
|
39 |
+
patch_stride (`List[int]`, *optional*, defaults to [4, 2, 2, 2]):
|
40 |
+
The patch stride of the image.
|
41 |
+
patch_padding (`List[int]`, *optional*, defaults to [3, 1, 1, 1]):
|
42 |
+
The patch padding of the image.
|
43 |
+
patch_prenorm (`List[bool]`, *optional*, defaults to [false, true, true, true]):
|
44 |
+
Whether to apply layer normalization before the patch embedding layer.
|
45 |
+
enable_checkpoint (`bool`, *optional*, defaults to False):
|
46 |
+
Whether to enable checkpointing.
|
47 |
+
dim_embed (`List[int]`, *optional*, defaults to [256, 512, 1024, 2048]):
|
48 |
+
The dimension of the embedding layer.
|
49 |
+
num_heads (`List[int]`, *optional*, defaults to [8, 16, 32, 64]):
|
50 |
+
The number of attention heads.
|
51 |
+
num_groups (`List[int]`, *optional*, defaults to [8, 16, 32, 64]):
|
52 |
+
The number of groups.
|
53 |
+
depths (`List[int]`, *optional*, defaults to [1, 1, 9, 1]):
|
54 |
+
The depth of the model.
|
55 |
+
window_size (`int`, *optional*, defaults to 12):
|
56 |
+
The window size of the model.
|
57 |
+
projection_dim (`int`, *optional*, defaults to 1024):
|
58 |
+
The dimension of the projection layer.
|
59 |
+
visual_temporal_embedding (`dict`, *optional*):
|
60 |
+
The configuration of the visual temporal embedding.
|
61 |
+
image_pos_embed (`dict`, *optional*):
|
62 |
+
The configuration of the image position embedding.
|
63 |
+
image_feature_source (`List[str]`, *optional*, defaults to ["spatial_avg_pool", "temporal_avg_pool"]):
|
64 |
+
The source of the image feature.
|
65 |
+
Example:
|
66 |
+
|
67 |
+
```python
|
68 |
+
>>> from transformers import Florence2VisionConfig, Florence2VisionModel
|
69 |
+
|
70 |
+
>>> # Initializing a Florence2 Vision style configuration
|
71 |
+
>>> configuration = Florence2VisionConfig()
|
72 |
+
|
73 |
+
>>> # Initializing a model (with random weights)
|
74 |
+
>>> model = Florence2VisionModel(configuration)
|
75 |
+
|
76 |
+
>>> # Accessing the model configuration
|
77 |
+
>>> configuration = model.config
|
78 |
+
```"""
|
79 |
+
|
80 |
+
model_type = "florence2_vision"
|
81 |
+
keys_to_ignore_at_inference = ["past_key_values"]
|
82 |
+
|
83 |
+
def __init__(
|
84 |
+
self,
|
85 |
+
drop_path_rate=0.1,
|
86 |
+
patch_size=[7, 3, 3, 3],
|
87 |
+
patch_stride=[4, 2, 2, 2],
|
88 |
+
patch_padding=[3, 1, 1, 1],
|
89 |
+
patch_prenorm=[False, True, True, True],
|
90 |
+
enable_checkpoint=False,
|
91 |
+
dim_embed=[256, 512, 1024, 2048],
|
92 |
+
num_heads=[8, 16, 32, 64],
|
93 |
+
num_groups=[8, 16, 32, 64],
|
94 |
+
depths=[1, 1, 9, 1],
|
95 |
+
window_size=12,
|
96 |
+
projection_dim=1024,
|
97 |
+
visual_temporal_embedding=None,
|
98 |
+
image_pos_embed=None,
|
99 |
+
image_feature_source=["spatial_avg_pool", "temporal_avg_pool"],
|
100 |
+
**kwargs,
|
101 |
+
):
|
102 |
+
self.drop_path_rate = drop_path_rate
|
103 |
+
self.patch_size = patch_size
|
104 |
+
self.patch_stride = patch_stride
|
105 |
+
self.patch_padding = patch_padding
|
106 |
+
self.patch_prenorm = patch_prenorm
|
107 |
+
self.enable_checkpoint = enable_checkpoint
|
108 |
+
self.dim_embed = dim_embed
|
109 |
+
self.num_heads = num_heads
|
110 |
+
self.num_groups = num_groups
|
111 |
+
self.depths = depths
|
112 |
+
self.window_size = window_size
|
113 |
+
self.projection_dim = projection_dim
|
114 |
+
self.visual_temporal_embedding = visual_temporal_embedding
|
115 |
+
self.image_pos_embed = image_pos_embed
|
116 |
+
self.image_feature_source = image_feature_source
|
117 |
+
|
118 |
+
super().__init__(**kwargs)
|
119 |
+
|
120 |
+
|
121 |
+
|
122 |
+
class Florence2LanguageConfig(PretrainedConfig):
|
123 |
+
r"""
|
124 |
+
This is the configuration class to store the configuration of a [`Florence2LanguagePreTrainedModel`]. It is used to instantiate a BART
|
125 |
+
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
|
126 |
+
defaults will yield a similar configuration to that of the BART
|
127 |
+
[facebook/bart-large](https://huggingface.co/facebook/bart-large) architecture.
|
128 |
+
|
129 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
130 |
+
documentation from [`PretrainedConfig`] for more information.
|
131 |
+
|
132 |
+
|
133 |
+
Args:
|
134 |
+
vocab_size (`int`, *optional*, defaults to 51289):
|
135 |
+
Vocabulary size of the Florence2Language model. Defines the number of different tokens that can be represented by the
|
136 |
+
`inputs_ids` passed when calling [`Florence2LanguageModel`].
|
137 |
+
d_model (`int`, *optional*, defaults to 1024):
|
138 |
+
Dimensionality of the layers and the pooler layer.
|
139 |
+
encoder_layers (`int`, *optional*, defaults to 12):
|
140 |
+
Number of encoder layers.
|
141 |
+
decoder_layers (`int`, *optional*, defaults to 12):
|
142 |
+
Number of decoder layers.
|
143 |
+
encoder_attention_heads (`int`, *optional*, defaults to 16):
|
144 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
145 |
+
decoder_attention_heads (`int`, *optional*, defaults to 16):
|
146 |
+
Number of attention heads for each attention layer in the Transformer decoder.
|
147 |
+
decoder_ffn_dim (`int`, *optional*, defaults to 4096):
|
148 |
+
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
|
149 |
+
encoder_ffn_dim (`int`, *optional*, defaults to 4096):
|
150 |
+
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
|
151 |
+
activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
|
152 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
153 |
+
`"relu"`, `"silu"` and `"gelu_new"` are supported.
|
154 |
+
dropout (`float`, *optional*, defaults to 0.1):
|
155 |
+
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
156 |
+
attention_dropout (`float`, *optional*, defaults to 0.0):
|
157 |
+
The dropout ratio for the attention probabilities.
|
158 |
+
activation_dropout (`float`, *optional*, defaults to 0.0):
|
159 |
+
The dropout ratio for activations inside the fully connected layer.
|
160 |
+
classifier_dropout (`float`, *optional*, defaults to 0.0):
|
161 |
+
The dropout ratio for classifier.
|
162 |
+
max_position_embeddings (`int`, *optional*, defaults to 1024):
|
163 |
+
The maximum sequence length that this model might ever be used with. Typically set this to something large
|
164 |
+
just in case (e.g., 512 or 1024 or 2048).
|
165 |
+
init_std (`float`, *optional*, defaults to 0.02):
|
166 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
167 |
+
encoder_layerdrop (`float`, *optional*, defaults to 0.0):
|
168 |
+
The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
|
169 |
+
for more details.
|
170 |
+
decoder_layerdrop (`float`, *optional*, defaults to 0.0):
|
171 |
+
The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
|
172 |
+
for more details.
|
173 |
+
scale_embedding (`bool`, *optional*, defaults to `False`):
|
174 |
+
Scale embeddings by diving by sqrt(d_model).
|
175 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
176 |
+
Whether or not the model should return the last key/values attentions (not used by all models).
|
177 |
+
num_labels (`int`, *optional*, defaults to 3):
|
178 |
+
The number of labels to use in [`Florence2LanguageForSequenceClassification`].
|
179 |
+
forced_eos_token_id (`int`, *optional*, defaults to 2):
|
180 |
+
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
|
181 |
+
`eos_token_id`.
|
182 |
+
|
183 |
+
Example:
|
184 |
+
|
185 |
+
```python
|
186 |
+
>>> from transformers import Florence2LanguageConfig, Florence2LanguageModel
|
187 |
+
|
188 |
+
>>> # Initializing a Florence2 Language style configuration
|
189 |
+
>>> configuration = Florence2LanguageConfig()
|
190 |
+
|
191 |
+
>>> # Initializing a model (with random weights)
|
192 |
+
>>> model = Florence2LangaugeModel(configuration)
|
193 |
+
|
194 |
+
>>> # Accessing the model configuration
|
195 |
+
>>> configuration = model.config
|
196 |
+
```"""
|
197 |
+
|
198 |
+
model_type = "florence2_language"
|
199 |
+
keys_to_ignore_at_inference = ["past_key_values"]
|
200 |
+
attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
|
201 |
+
|
202 |
+
def __init__(
|
203 |
+
self,
|
204 |
+
vocab_size=51289,
|
205 |
+
max_position_embeddings=1024,
|
206 |
+
encoder_layers=12,
|
207 |
+
encoder_ffn_dim=4096,
|
208 |
+
encoder_attention_heads=16,
|
209 |
+
decoder_layers=12,
|
210 |
+
decoder_ffn_dim=4096,
|
211 |
+
decoder_attention_heads=16,
|
212 |
+
encoder_layerdrop=0.0,
|
213 |
+
decoder_layerdrop=0.0,
|
214 |
+
activation_function="gelu",
|
215 |
+
d_model=1024,
|
216 |
+
dropout=0.1,
|
217 |
+
attention_dropout=0.0,
|
218 |
+
activation_dropout=0.0,
|
219 |
+
init_std=0.02,
|
220 |
+
classifier_dropout=0.0,
|
221 |
+
scale_embedding=False,
|
222 |
+
use_cache=True,
|
223 |
+
num_labels=3,
|
224 |
+
pad_token_id=1,
|
225 |
+
bos_token_id=0,
|
226 |
+
eos_token_id=2,
|
227 |
+
is_encoder_decoder=True,
|
228 |
+
decoder_start_token_id=2,
|
229 |
+
forced_eos_token_id=2,
|
230 |
+
**kwargs,
|
231 |
+
):
|
232 |
+
self.vocab_size = vocab_size
|
233 |
+
self.max_position_embeddings = max_position_embeddings
|
234 |
+
self.d_model = d_model
|
235 |
+
self.encoder_ffn_dim = encoder_ffn_dim
|
236 |
+
self.encoder_layers = encoder_layers
|
237 |
+
self.encoder_attention_heads = encoder_attention_heads
|
238 |
+
self.decoder_ffn_dim = decoder_ffn_dim
|
239 |
+
self.decoder_layers = decoder_layers
|
240 |
+
self.decoder_attention_heads = decoder_attention_heads
|
241 |
+
self.dropout = dropout
|
242 |
+
self.attention_dropout = attention_dropout
|
243 |
+
self.activation_dropout = activation_dropout
|
244 |
+
self.activation_function = activation_function
|
245 |
+
self.init_std = init_std
|
246 |
+
self.encoder_layerdrop = encoder_layerdrop
|
247 |
+
self.decoder_layerdrop = decoder_layerdrop
|
248 |
+
self.classifier_dropout = classifier_dropout
|
249 |
+
self.use_cache = use_cache
|
250 |
+
self.num_hidden_layers = encoder_layers
|
251 |
+
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
|
252 |
+
|
253 |
+
super().__init__(
|
254 |
+
num_labels=num_labels,
|
255 |
+
pad_token_id=pad_token_id,
|
256 |
+
bos_token_id=bos_token_id,
|
257 |
+
eos_token_id=eos_token_id,
|
258 |
+
is_encoder_decoder=is_encoder_decoder,
|
259 |
+
decoder_start_token_id=decoder_start_token_id,
|
260 |
+
forced_eos_token_id=forced_eos_token_id,
|
261 |
+
**kwargs,
|
262 |
+
)
|
263 |
+
|
264 |
+
# ensure backward compatibility for BART CNN models
|
265 |
+
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated", False):
|
266 |
+
self.forced_bos_token_id = self.bos_token_id
|
267 |
+
warnings.warn(
|
268 |
+
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
|
269 |
+
"The config can simply be saved and uploaded again to be fixed."
|
270 |
+
)
|
271 |
+
|
272 |
+
class Florence2Config(PretrainedConfig):
|
273 |
+
r"""
|
274 |
+
This is the configuration class to store the configuration of a [`Florence2ForConditionalGeneration`]. It is used to instantiate an
|
275 |
+
Florence-2 model according to the specified arguments, defining the model architecture.
|
276 |
+
|
277 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
278 |
+
documentation from [`PretrainedConfig`] for more information.
|
279 |
+
|
280 |
+
Args:
|
281 |
+
vision_config (`Florence2VisionConfig`, *optional*):
|
282 |
+
Custom vision config or dict
|
283 |
+
text_config (`Union[AutoConfig, dict]`, *optional*):
|
284 |
+
The config object of the text backbone.
|
285 |
+
ignore_index (`int`, *optional*, defaults to -100):
|
286 |
+
The ignore index for the loss function.
|
287 |
+
vocab_size (`int`, *optional*, defaults to 51289):
|
288 |
+
Vocabulary size of the Florence2model. Defines the number of different tokens that can be represented by the
|
289 |
+
`inputs_ids` passed when calling [`~Florence2ForConditionalGeneration`]
|
290 |
+
projection_dim (`int`, *optional*, defaults to 1024):
|
291 |
+
Dimension of the multimodal projection space.
|
292 |
+
|
293 |
+
Example:
|
294 |
+
|
295 |
+
```python
|
296 |
+
>>> from transformers import Florence2ForConditionalGeneration, Florence2Config, CLIPVisionConfig, BartConfig
|
297 |
+
|
298 |
+
>>> # Initializing a clip-like vision config
|
299 |
+
>>> vision_config = CLIPVisionConfig()
|
300 |
+
|
301 |
+
>>> # Initializing a Bart config
|
302 |
+
>>> text_config = BartConfig()
|
303 |
+
|
304 |
+
>>> # Initializing a Florence-2 configuration
|
305 |
+
>>> configuration = Florence2Config(vision_config, text_config)
|
306 |
+
|
307 |
+
>>> # Initializing a model from the florence-2 configuration
|
308 |
+
>>> model = Florence2ForConditionalGeneration(configuration)
|
309 |
+
|
310 |
+
>>> # Accessing the model configuration
|
311 |
+
>>> configuration = model.config
|
312 |
+
```"""
|
313 |
+
|
314 |
+
model_type = "florence2"
|
315 |
+
is_composition = False
|
316 |
+
|
317 |
+
def __init__(
|
318 |
+
self,
|
319 |
+
vision_config=None,
|
320 |
+
text_config=None,
|
321 |
+
ignore_index=-100,
|
322 |
+
vocab_size=51289,
|
323 |
+
projection_dim=1024,
|
324 |
+
**kwargs,
|
325 |
+
):
|
326 |
+
self.ignore_index = ignore_index
|
327 |
+
self.vocab_size = vocab_size
|
328 |
+
self.projection_dim = projection_dim
|
329 |
+
if vision_config is not None:
|
330 |
+
vision_config = PretrainedConfig(**vision_config)
|
331 |
+
self.vision_config = vision_config
|
332 |
+
self.vocab_size = self.vocab_size
|
333 |
+
|
334 |
+
self.text_config = text_config
|
335 |
+
if text_config is not None:
|
336 |
+
self.text_config = Florence2LanguageConfig(**text_config)
|
337 |
+
|
338 |
+
|
339 |
+
super().__init__(**kwargs)
|
340 |
+
|
eval/grounded_sam/florence2/generation_config.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_beams": 3,
|
3 |
+
"early_stopping": false
|
4 |
+
}
|
eval/grounded_sam/florence2/modeling_florence2.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
eval/grounded_sam/florence2/preprocessor_config.json
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"auto_map": {
|
3 |
+
"AutoProcessor": "processing_florence2.Florence2Processor"
|
4 |
+
},
|
5 |
+
"_valid_processor_keys": [
|
6 |
+
"images",
|
7 |
+
"do_resize",
|
8 |
+
"size",
|
9 |
+
"resample",
|
10 |
+
"do_rescale",
|
11 |
+
"rescale_factor",
|
12 |
+
"do_normalize",
|
13 |
+
"image_mean",
|
14 |
+
"image_std",
|
15 |
+
"return_tensors",
|
16 |
+
"data_format",
|
17 |
+
"input_data_format",
|
18 |
+
"do_convert_rgb"
|
19 |
+
],
|
20 |
+
"do_convert_rgb": null,
|
21 |
+
"do_normalize": true,
|
22 |
+
"do_rescale": true,
|
23 |
+
"do_resize": true,
|
24 |
+
"do_center_crop": false,
|
25 |
+
"image_processor_type": "CLIPImageProcessor",
|
26 |
+
"image_seq_length": 577,
|
27 |
+
"image_mean": [0.485, 0.456, 0.406],
|
28 |
+
"image_std": [0.229, 0.224, 0.225],
|
29 |
+
"processor_class": "Florence2Processor",
|
30 |
+
"resample": 3,
|
31 |
+
"size": {
|
32 |
+
"height": 768,
|
33 |
+
"width":768
|
34 |
+
},
|
35 |
+
"crop_size": {
|
36 |
+
"height": 768,
|
37 |
+
"width": 768
|
38 |
+
}
|
39 |
+
}
|
eval/grounded_sam/florence2/processing_florence2.py
ADDED
@@ -0,0 +1,1147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2024 Microsoft and The HuggingFace Inc. team.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""
|
16 |
+
Processor class for Florence-2.
|
17 |
+
"""
|
18 |
+
|
19 |
+
import re
|
20 |
+
import logging
|
21 |
+
from typing import List, Optional, Union
|
22 |
+
import numpy as np
|
23 |
+
import math
|
24 |
+
|
25 |
+
import torch
|
26 |
+
|
27 |
+
from transformers.feature_extraction_utils import BatchFeature
|
28 |
+
from transformers.image_utils import ImageInput, is_valid_image
|
29 |
+
from transformers.processing_utils import ProcessorMixin
|
30 |
+
from transformers.tokenization_utils_base import (
|
31 |
+
PaddingStrategy,
|
32 |
+
PreTokenizedInput,
|
33 |
+
TextInput,
|
34 |
+
TruncationStrategy,
|
35 |
+
)
|
36 |
+
from transformers import BartTokenizer, BartTokenizerFast
|
37 |
+
from transformers.utils import TensorType
|
38 |
+
|
39 |
+
|
40 |
+
logger = logging.getLogger(__name__)
|
41 |
+
|
42 |
+
# Copied from transformers.models.idefics2.processing_idefics2.is_url
|
43 |
+
def is_url(val) -> bool:
|
44 |
+
return isinstance(val, str) and val.startswith("http")
|
45 |
+
|
46 |
+
# Copied from transformers.models.idefics2.processing_idefics2.is_image_or_image_url
|
47 |
+
def is_image_or_image_url(elem):
|
48 |
+
return is_url(elem) or is_valid_image(elem)
|
49 |
+
|
50 |
+
|
51 |
+
def _is_str_or_image(elem):
|
52 |
+
return isinstance(elem, (str)) or is_image_or_image_url(elem)
|
53 |
+
|
54 |
+
|
55 |
+
class Florence2Processor(ProcessorMixin):
|
56 |
+
r"""
|
57 |
+
Constructs a Florence2 processor which wraps a Florence2 image processor and a Florence2 tokenizer into a single processor.
|
58 |
+
|
59 |
+
[`Florence2Processor`] offers all the functionalities of [`CLIPImageProcessor`] and [`BartTokenizerFast`]. See the
|
60 |
+
[`~Florence2Processor.__call__`] and [`~Florence2Processor.decode`] for more information.
|
61 |
+
|
62 |
+
Args:
|
63 |
+
image_processor ([`CLIPImageProcessor`], *optional*):
|
64 |
+
The image processor is a required input.
|
65 |
+
tokenizer ([`BartTokenizerFast`], *optional*):
|
66 |
+
The tokenizer is a required input.
|
67 |
+
"""
|
68 |
+
|
69 |
+
attributes = ["image_processor", "tokenizer"]
|
70 |
+
image_processor_class = "CLIPImageProcessor"
|
71 |
+
tokenizer_class = ("BartTokenizer", "BartTokenizerFast")
|
72 |
+
|
73 |
+
def __init__(
|
74 |
+
self,
|
75 |
+
image_processor=None,
|
76 |
+
tokenizer=None,
|
77 |
+
):
|
78 |
+
if image_processor is None:
|
79 |
+
raise ValueError("You need to specify an `image_processor`.")
|
80 |
+
if tokenizer is None:
|
81 |
+
raise ValueError("You need to specify a `tokenizer`.")
|
82 |
+
if not hasattr(image_processor, "image_seq_length"):
|
83 |
+
raise ValueError("Image processor is missing an `image_seq_length` attribute.")
|
84 |
+
|
85 |
+
self.image_seq_length = image_processor.image_seq_length
|
86 |
+
|
87 |
+
tokens_to_add = {
|
88 |
+
'additional_special_tokens': \
|
89 |
+
tokenizer.additional_special_tokens + \
|
90 |
+
['<od>', '</od>', '<ocr>', '</ocr>'] + \
|
91 |
+
[f'<loc_{x}>' for x in range(1000)] + \
|
92 |
+
['<cap>', '</cap>', '<ncap>', '</ncap>','<dcap>', '</dcap>', '<grounding>', '</grounding>', '<seg>', '</seg>', '<sep>', '<region_cap>', '</region_cap>', '<region_to_desciption>', '</region_to_desciption>', '<proposal>', '</proposal>', '<poly>', '</poly>', '<and>']
|
93 |
+
}
|
94 |
+
tokenizer.add_special_tokens(tokens_to_add)
|
95 |
+
|
96 |
+
self.tasks_answer_post_processing_type = {
|
97 |
+
'<OCR>': 'pure_text',
|
98 |
+
'<OCR_WITH_REGION>': 'ocr',
|
99 |
+
'<CAPTION>': 'pure_text',
|
100 |
+
'<DETAILED_CAPTION>': 'pure_text',
|
101 |
+
'<MORE_DETAILED_CAPTION>': 'pure_text',
|
102 |
+
'<OD>': 'description_with_bboxes',
|
103 |
+
'<DENSE_REGION_CAPTION>': 'description_with_bboxes',
|
104 |
+
'<CAPTION_TO_PHRASE_GROUNDING>': "phrase_grounding",
|
105 |
+
'<REFERRING_EXPRESSION_SEGMENTATION>': 'polygons',
|
106 |
+
'<REGION_TO_SEGMENTATION>': 'polygons',
|
107 |
+
'<OPEN_VOCABULARY_DETECTION>': 'description_with_bboxes_or_polygons',
|
108 |
+
'<REGION_TO_CATEGORY>': 'pure_text',
|
109 |
+
'<REGION_TO_DESCRIPTION>': 'pure_text',
|
110 |
+
'<REGION_TO_OCR>': 'pure_text',
|
111 |
+
'<REGION_PROPOSAL>': 'bboxes'
|
112 |
+
}
|
113 |
+
|
114 |
+
self.task_prompts_without_inputs = {
|
115 |
+
'<OCR>': 'What is the text in the image?',
|
116 |
+
'<OCR_WITH_REGION>': 'What is the text in the image, with regions?',
|
117 |
+
'<CAPTION>': 'What does the image describe?',
|
118 |
+
'<DETAILED_CAPTION>': 'Describe in detail what is shown in the image.',
|
119 |
+
'<MORE_DETAILED_CAPTION>': 'Describe with a paragraph what is shown in the image.',
|
120 |
+
'<OD>': 'Locate the objects with category name in the image.',
|
121 |
+
'<DENSE_REGION_CAPTION>': 'Locate the objects in the image, with their descriptions.',
|
122 |
+
'<REGION_PROPOSAL>': 'Locate the region proposals in the image.'
|
123 |
+
}
|
124 |
+
|
125 |
+
self.task_prompts_with_input = {
|
126 |
+
'<CAPTION_TO_PHRASE_GROUNDING>': "Locate the phrases in the caption: {input}",
|
127 |
+
'<REFERRING_EXPRESSION_SEGMENTATION>': 'Locate {input} in the image with mask',
|
128 |
+
'<REGION_TO_SEGMENTATION>': 'What is the polygon mask of region {input}',
|
129 |
+
'<OPEN_VOCABULARY_DETECTION>': 'Locate {input} in the image.',
|
130 |
+
'<REGION_TO_CATEGORY>': 'What is the region {input}?',
|
131 |
+
'<REGION_TO_DESCRIPTION>': 'What does the region {input} describe?',
|
132 |
+
'<REGION_TO_OCR>': 'What text is in the region {input}?',
|
133 |
+
}
|
134 |
+
|
135 |
+
self.post_processor = Florence2PostProcesser(tokenizer=tokenizer)
|
136 |
+
|
137 |
+
|
138 |
+
super().__init__(image_processor, tokenizer)
|
139 |
+
|
140 |
+
def _construct_prompts(self, text):
|
141 |
+
# replace the task tokens with the task prompts if task token is in the text
|
142 |
+
prompts = []
|
143 |
+
for _text in text:
|
144 |
+
# 1. fixed task prompts without additional inputs
|
145 |
+
for task_token, task_prompt in self.task_prompts_without_inputs.items():
|
146 |
+
if task_token in _text:
|
147 |
+
assert _text == task_token, f"Task token {task_token} should be the only token in the text."
|
148 |
+
_text = task_prompt
|
149 |
+
break
|
150 |
+
# 2. task prompts with additional inputs
|
151 |
+
for task_token, task_prompt in self.task_prompts_with_input.items():
|
152 |
+
if task_token in _text:
|
153 |
+
_text = task_prompt.format(input=_text.replace(task_token, ''))
|
154 |
+
break
|
155 |
+
prompts.append(_text)
|
156 |
+
return prompts
|
157 |
+
|
158 |
+
def __call__(
|
159 |
+
self,
|
160 |
+
text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
|
161 |
+
images: ImageInput = None,
|
162 |
+
tokenize_newline_separately: bool = True,
|
163 |
+
padding: Union[bool, str, PaddingStrategy] = False,
|
164 |
+
truncation: Union[bool, str, TruncationStrategy] = None,
|
165 |
+
max_length=None,
|
166 |
+
return_tensors: Optional[Union[str, TensorType]] = TensorType.PYTORCH,
|
167 |
+
do_resize: bool = None,
|
168 |
+
do_normalize: bool = None,
|
169 |
+
image_mean: Optional[Union[float, List[float]]] = None,
|
170 |
+
image_std: Optional[Union[float, List[float]]] = None,
|
171 |
+
data_format: Optional["ChannelDimension"] = "channels_first", # noqa: F821
|
172 |
+
input_data_format: Optional[
|
173 |
+
Union[str, "ChannelDimension"] # noqa: F821
|
174 |
+
] = None,
|
175 |
+
resample: "PILImageResampling" = None, # noqa: F821
|
176 |
+
do_convert_rgb: bool = None,
|
177 |
+
do_thumbnail: bool = None,
|
178 |
+
do_align_long_axis: bool = None,
|
179 |
+
do_rescale: bool = None,
|
180 |
+
) -> BatchFeature:
|
181 |
+
"""
|
182 |
+
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
|
183 |
+
and `kwargs` arguments to BartTokenizerFast's [`~BartTokenizerFast.__call__`] if `text` is not `None` to encode
|
184 |
+
the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to
|
185 |
+
CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring
|
186 |
+
of the above two methods for more information.
|
187 |
+
|
188 |
+
Args:
|
189 |
+
text (`str`, `List[str]`, `List[List[str]]`):
|
190 |
+
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
|
191 |
+
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
|
192 |
+
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
|
193 |
+
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
|
194 |
+
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
|
195 |
+
tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a
|
196 |
+
number of channels, H and W are image height and width.
|
197 |
+
tokenize_newline_separately (`bool`, defaults to `True`):
|
198 |
+
Adds a separately tokenized '\n' at the end of the prompt.
|
199 |
+
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
|
200 |
+
Select a strategy to pad the returned sequences (according to the model's padding side and padding
|
201 |
+
index) among:
|
202 |
+
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
|
203 |
+
sequence if provided).
|
204 |
+
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
|
205 |
+
acceptable input length for the model if that argument is not provided.
|
206 |
+
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
|
207 |
+
lengths).
|
208 |
+
max_length (`int`, *optional*):
|
209 |
+
Maximum length of the returned list and optionally padding length (see above).
|
210 |
+
truncation (`bool`, *optional*):
|
211 |
+
Activates truncation to cut input sequences longer than `max_length` to `max_length`.
|
212 |
+
return_tensors (`str` or [`~utils.TensorType`], *optional*):
|
213 |
+
If set, will return tensors of a particular framework. Acceptable values are:
|
214 |
+
|
215 |
+
- `'tf'`: Return TensorFlow `tf.constant` objects.
|
216 |
+
- `'pt'`: Return PyTorch `torch.Tensor` objects.
|
217 |
+
- `'np'`: Return NumPy `np.ndarray` objects.
|
218 |
+
- `'jax'`: Return JAX `jnp.ndarray` objects.
|
219 |
+
|
220 |
+
Returns:
|
221 |
+
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
|
222 |
+
|
223 |
+
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. If `suffix`
|
224 |
+
is provided, the `input_ids` will also contain the suffix input ids.
|
225 |
+
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
|
226 |
+
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
|
227 |
+
`None`).
|
228 |
+
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
|
229 |
+
- **labels** -- Labels compatible with training if `suffix` is not None
|
230 |
+
"""
|
231 |
+
|
232 |
+
return_token_type_ids = False
|
233 |
+
|
234 |
+
if images is None:
|
235 |
+
raise ValueError("`images` are expected as arguments to a `Florence2Processor` instance.")
|
236 |
+
if text is None:
|
237 |
+
logger.warning_once(
|
238 |
+
"You are using Florence-2 without a text prompt."
|
239 |
+
)
|
240 |
+
text = ""
|
241 |
+
|
242 |
+
if isinstance(text, List) and isinstance(images, List):
|
243 |
+
if len(images) < len(text):
|
244 |
+
raise ValueError(
|
245 |
+
f"Received {len(images)} images for {len(text)} prompts. Each prompt should be associated with an image."
|
246 |
+
)
|
247 |
+
if _is_str_or_image(text):
|
248 |
+
text = [text]
|
249 |
+
elif isinstance(text, list) and _is_str_or_image(text[0]):
|
250 |
+
pass
|
251 |
+
|
252 |
+
pixel_values = self.image_processor(
|
253 |
+
images,
|
254 |
+
do_resize=do_resize,
|
255 |
+
do_normalize=do_normalize,
|
256 |
+
return_tensors=return_tensors,
|
257 |
+
image_mean=image_mean,
|
258 |
+
image_std=image_std,
|
259 |
+
input_data_format=input_data_format,
|
260 |
+
data_format=data_format,
|
261 |
+
resample=resample,
|
262 |
+
do_convert_rgb=do_convert_rgb,
|
263 |
+
)["pixel_values"]
|
264 |
+
|
265 |
+
if max_length is not None:
|
266 |
+
max_length -= self.image_seq_length # max_length has to account for the image tokens
|
267 |
+
|
268 |
+
text = self._construct_prompts(text)
|
269 |
+
|
270 |
+
inputs = self.tokenizer(
|
271 |
+
text,
|
272 |
+
return_tensors=return_tensors,
|
273 |
+
padding=padding,
|
274 |
+
max_length=max_length,
|
275 |
+
truncation=truncation,
|
276 |
+
return_token_type_ids=return_token_type_ids,
|
277 |
+
)
|
278 |
+
|
279 |
+
return_data = {**inputs, "pixel_values": pixel_values}
|
280 |
+
|
281 |
+
if return_token_type_ids:
|
282 |
+
labels = inputs["input_ids"].masked_fill(inputs["token_type_ids"] == 0, -100)
|
283 |
+
return_data.update({"labels": labels})
|
284 |
+
return BatchFeature(data=return_data)
|
285 |
+
|
286 |
+
# Copied from transformers.models.clip.processing_clip.CLIPProcessor.batch_decode with CLIP->Florence2
|
287 |
+
def batch_decode(self, *args, **kwargs):
|
288 |
+
"""
|
289 |
+
This method forwards all its arguments to BartTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
|
290 |
+
refer to the docstring of this method for more information.
|
291 |
+
"""
|
292 |
+
return self.tokenizer.batch_decode(*args, **kwargs)
|
293 |
+
|
294 |
+
# Copied from transformers.models.clip.processing_clip.CLIPProcessor.decode with CLIP->Florence2
|
295 |
+
def decode(self, *args, **kwargs):
|
296 |
+
"""
|
297 |
+
This method forwards all its arguments to BartTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
|
298 |
+
the docstring of this method for more information.
|
299 |
+
"""
|
300 |
+
return self.tokenizer.decode(*args, **kwargs)
|
301 |
+
|
302 |
+
@property
|
303 |
+
# Copied from transformers.models.clip.processing_clip.CLIPProcessor.model_input_names with CLIP->Florence2
|
304 |
+
def model_input_names(self):
|
305 |
+
tokenizer_input_names = self.tokenizer.model_input_names
|
306 |
+
image_processor_input_names = self.image_processor.model_input_names
|
307 |
+
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
|
308 |
+
|
309 |
+
def post_process_generation(self, text=None, sequence=None, transition_beam_score=None, task=None, image_size=None):
|
310 |
+
"""
|
311 |
+
Post-process the output of the model to each of the task outputs.
|
312 |
+
|
313 |
+
Args:
|
314 |
+
text (`str`): The text to post-process.
|
315 |
+
task (`str`): The task to post-process the text for.
|
316 |
+
image_size (`Tuple[int, int]`): The size of the image. height x width.
|
317 |
+
"""
|
318 |
+
|
319 |
+
task_answer_post_processing_type = self.tasks_answer_post_processing_type.get(task, 'pure_text')
|
320 |
+
task_answer = self.post_processor(
|
321 |
+
text=text,
|
322 |
+
sequence=sequence,
|
323 |
+
transition_beam_score=transition_beam_score,
|
324 |
+
image_size=image_size,
|
325 |
+
parse_tasks=task_answer_post_processing_type,
|
326 |
+
)[task_answer_post_processing_type]
|
327 |
+
|
328 |
+
if task_answer_post_processing_type == 'pure_text':
|
329 |
+
final_answer = task_answer
|
330 |
+
# remove the special tokens
|
331 |
+
final_answer = final_answer.replace('<s>', '').replace('</s>', '')
|
332 |
+
elif task_answer_post_processing_type in ['od', 'description_with_bboxes', 'bboxes']:
|
333 |
+
od_instances = task_answer
|
334 |
+
bboxes_od = [_od_instance['bbox'] for _od_instance in od_instances]
|
335 |
+
labels_od = [str(_od_instance['cat_name']) for _od_instance in od_instances]
|
336 |
+
final_answer = {'bboxes': bboxes_od, 'labels': labels_od}
|
337 |
+
if len(od_instances) and 'score' in od_instances[0]:
|
338 |
+
scores_od = [_od_instance['score'] for _od_instance in od_instances]
|
339 |
+
final_answer['scores'] = scores_od
|
340 |
+
elif task_answer_post_processing_type in ['ocr']:
|
341 |
+
bboxes = [_od_instance['quad_box'] for _od_instance in task_answer]
|
342 |
+
labels = [str(_od_instance['text']) for _od_instance in task_answer]
|
343 |
+
final_answer = {'quad_boxes': bboxes, 'labels': labels}
|
344 |
+
elif task_answer_post_processing_type in ['phrase_grounding']:
|
345 |
+
bboxes = []
|
346 |
+
labels = []
|
347 |
+
for _grounded_phrase in task_answer:
|
348 |
+
for _bbox in _grounded_phrase['bbox']:
|
349 |
+
bboxes.append(_bbox)
|
350 |
+
labels.append(_grounded_phrase['cat_name'])
|
351 |
+
final_answer = {'bboxes': bboxes, 'labels': labels}
|
352 |
+
elif task_answer_post_processing_type in ['description_with_polygons', 'polygons']:
|
353 |
+
labels = []
|
354 |
+
polygons = []
|
355 |
+
for result in task_answer:
|
356 |
+
label = result['cat_name']
|
357 |
+
_polygons = result['polygons']
|
358 |
+
labels.append(label)
|
359 |
+
polygons.append(_polygons)
|
360 |
+
final_answer = {'polygons': polygons, 'labels': labels}
|
361 |
+
elif task_answer_post_processing_type in ['description_with_bboxes_or_polygons']:
|
362 |
+
bboxes = []
|
363 |
+
bboxes_labels = []
|
364 |
+
polygons = []
|
365 |
+
polygons_labels = []
|
366 |
+
for result in task_answer:
|
367 |
+
label = result['cat_name']
|
368 |
+
if 'polygons' in result:
|
369 |
+
_polygons = result['polygons']
|
370 |
+
polygons.append(_polygons)
|
371 |
+
polygons_labels.append(label)
|
372 |
+
else:
|
373 |
+
_bbox = result['bbox']
|
374 |
+
bboxes.append(_bbox)
|
375 |
+
bboxes_labels.append(label)
|
376 |
+
final_answer = {'bboxes': bboxes, 'bboxes_labels': bboxes_labels, 'polygons': polygons, 'polygons_labels': polygons_labels}
|
377 |
+
else:
|
378 |
+
raise ValueError('Unknown task answer post processing type: {}'.format(task_answer_post_processing_type))
|
379 |
+
|
380 |
+
final_answer = {
|
381 |
+
task: final_answer}
|
382 |
+
return final_answer
|
383 |
+
|
384 |
+
class BoxQuantizer(object):
|
385 |
+
def __init__(self, mode, bins):
|
386 |
+
self.mode = mode
|
387 |
+
self.bins = bins
|
388 |
+
|
389 |
+
def quantize(self, boxes: torch.Tensor, size):
|
390 |
+
bins_w, bins_h = self.bins # Quantization bins.
|
391 |
+
size_w, size_h = size # Original image size.
|
392 |
+
size_per_bin_w = size_w / bins_w
|
393 |
+
size_per_bin_h = size_h / bins_h
|
394 |
+
xmin, ymin, xmax, ymax = boxes.split(1, dim=-1) # Shape: 4 * [N, 1].
|
395 |
+
|
396 |
+
if self.mode == 'floor':
|
397 |
+
quantized_xmin = (
|
398 |
+
xmin / size_per_bin_w).floor().clamp(0, bins_w - 1)
|
399 |
+
quantized_ymin = (
|
400 |
+
ymin / size_per_bin_h).floor().clamp(0, bins_h - 1)
|
401 |
+
quantized_xmax = (
|
402 |
+
xmax / size_per_bin_w).floor().clamp(0, bins_w - 1)
|
403 |
+
quantized_ymax = (
|
404 |
+
ymax / size_per_bin_h).floor().clamp(0, bins_h - 1)
|
405 |
+
|
406 |
+
elif self.mode == 'round':
|
407 |
+
raise NotImplementedError()
|
408 |
+
|
409 |
+
else:
|
410 |
+
raise ValueError('Incorrect quantization type.')
|
411 |
+
|
412 |
+
quantized_boxes = torch.cat(
|
413 |
+
(quantized_xmin, quantized_ymin, quantized_xmax, quantized_ymax), dim=-1
|
414 |
+
).int()
|
415 |
+
|
416 |
+
return quantized_boxes
|
417 |
+
|
418 |
+
def dequantize(self, boxes: torch.Tensor, size):
|
419 |
+
bins_w, bins_h = self.bins # Quantization bins.
|
420 |
+
size_w, size_h = size # Original image size.
|
421 |
+
size_per_bin_w = size_w / bins_w
|
422 |
+
size_per_bin_h = size_h / bins_h
|
423 |
+
xmin, ymin, xmax, ymax = boxes.split(1, dim=-1) # Shape: 4 * [N, 1].
|
424 |
+
|
425 |
+
if self.mode == 'floor':
|
426 |
+
# Add 0.5 to use the center position of the bin as the coordinate.
|
427 |
+
dequantized_xmin = (xmin + 0.5) * size_per_bin_w
|
428 |
+
dequantized_ymin = (ymin + 0.5) * size_per_bin_h
|
429 |
+
dequantized_xmax = (xmax + 0.5) * size_per_bin_w
|
430 |
+
dequantized_ymax = (ymax + 0.5) * size_per_bin_h
|
431 |
+
|
432 |
+
elif self.mode == 'round':
|
433 |
+
raise NotImplementedError()
|
434 |
+
|
435 |
+
else:
|
436 |
+
raise ValueError('Incorrect quantization type.')
|
437 |
+
|
438 |
+
dequantized_boxes = torch.cat(
|
439 |
+
(dequantized_xmin, dequantized_ymin,
|
440 |
+
dequantized_xmax, dequantized_ymax), dim=-1
|
441 |
+
)
|
442 |
+
|
443 |
+
return dequantized_boxes
|
444 |
+
|
445 |
+
|
446 |
+
class CoordinatesQuantizer(object):
|
447 |
+
"""
|
448 |
+
Quantize coornidates (Nx2)
|
449 |
+
"""
|
450 |
+
|
451 |
+
def __init__(self, mode, bins):
|
452 |
+
self.mode = mode
|
453 |
+
self.bins = bins
|
454 |
+
|
455 |
+
def quantize(self, coordinates: torch.Tensor, size):
|
456 |
+
bins_w, bins_h = self.bins # Quantization bins.
|
457 |
+
size_w, size_h = size # Original image size.
|
458 |
+
size_per_bin_w = size_w / bins_w
|
459 |
+
size_per_bin_h = size_h / bins_h
|
460 |
+
assert coordinates.shape[-1] == 2, 'coordinates should be shape (N, 2)'
|
461 |
+
x, y = coordinates.split(1, dim=-1) # Shape: 4 * [N, 1].
|
462 |
+
|
463 |
+
if self.mode == 'floor':
|
464 |
+
quantized_x = (x / size_per_bin_w).floor().clamp(0, bins_w - 1)
|
465 |
+
quantized_y = (y / size_per_bin_h).floor().clamp(0, bins_h - 1)
|
466 |
+
|
467 |
+
elif self.mode == 'round':
|
468 |
+
raise NotImplementedError()
|
469 |
+
|
470 |
+
else:
|
471 |
+
raise ValueError('Incorrect quantization type.')
|
472 |
+
|
473 |
+
quantized_coordinates = torch.cat(
|
474 |
+
(quantized_x, quantized_y), dim=-1
|
475 |
+
).int()
|
476 |
+
|
477 |
+
return quantized_coordinates
|
478 |
+
|
479 |
+
def dequantize(self, coordinates: torch.Tensor, size):
|
480 |
+
bins_w, bins_h = self.bins # Quantization bins.
|
481 |
+
size_w, size_h = size # Original image size.
|
482 |
+
size_per_bin_w = size_w / bins_w
|
483 |
+
size_per_bin_h = size_h / bins_h
|
484 |
+
assert coordinates.shape[-1] == 2, 'coordinates should be shape (N, 2)'
|
485 |
+
x, y = coordinates.split(1, dim=-1) # Shape: 4 * [N, 1].
|
486 |
+
|
487 |
+
if self.mode == 'floor':
|
488 |
+
# Add 0.5 to use the center position of the bin as the coordinate.
|
489 |
+
dequantized_x = (x + 0.5) * size_per_bin_w
|
490 |
+
dequantized_y = (y + 0.5) * size_per_bin_h
|
491 |
+
|
492 |
+
elif self.mode == 'round':
|
493 |
+
raise NotImplementedError()
|
494 |
+
|
495 |
+
else:
|
496 |
+
raise ValueError('Incorrect quantization type.')
|
497 |
+
|
498 |
+
dequantized_coordinates = torch.cat(
|
499 |
+
(dequantized_x, dequantized_y), dim=-1
|
500 |
+
)
|
501 |
+
|
502 |
+
return dequantized_coordinates
|
503 |
+
|
504 |
+
|
505 |
+
class Florence2PostProcesser(object):
|
506 |
+
r"""
|
507 |
+
Florence-2 post process for converting text prediction to various tasks results.
|
508 |
+
|
509 |
+
Args:
|
510 |
+
config: A dict of configs.
|
511 |
+
tokenizer: A tokenizer for decoding text to spans.
|
512 |
+
sample config:
|
513 |
+
UNIFIED_POST_PROCESS:
|
514 |
+
# commom configs
|
515 |
+
NUM_BBOX_HEIGHT_BINS: 1000
|
516 |
+
NUM_BBOX_WIDTH_BINS: 1000
|
517 |
+
COORDINATES_HEIGHT_BINS: 1000
|
518 |
+
COORDINATES_WIDTH_BINS: 1000
|
519 |
+
# task specific configs, override the common configs
|
520 |
+
PRASE_TASKS:
|
521 |
+
- TASK_NAME: 'video_dense_caption'
|
522 |
+
PATTERN: 'r<time_(\d+)><time_(\d+)>([a-zA-Z0-9 ]+)'
|
523 |
+
SCORE_MODE: 'avg_cat_name_scores'
|
524 |
+
NUM_BINS: 100
|
525 |
+
- TASK_NAME: 'od'
|
526 |
+
PATTERN: 'r<loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)>([a-zA-Z0-9 ]+)'
|
527 |
+
SCORE_MODE: 'avg_cat_name_scores'
|
528 |
+
|
529 |
+
Returns:
|
530 |
+
parsed_dict (dict): A dict of parsed results.
|
531 |
+
"""
|
532 |
+
def __init__(
|
533 |
+
self,
|
534 |
+
tokenizer=None
|
535 |
+
):
|
536 |
+
parse_tasks = []
|
537 |
+
parse_task_configs = {}
|
538 |
+
config = self._create_default_config()
|
539 |
+
for task in config['PARSE_TASKS']:
|
540 |
+
parse_tasks.append(task['TASK_NAME'])
|
541 |
+
parse_task_configs[task['TASK_NAME']] = task
|
542 |
+
|
543 |
+
self.config = config
|
544 |
+
self.parse_tasks = parse_tasks
|
545 |
+
self.parse_tasks_configs = parse_task_configs
|
546 |
+
|
547 |
+
self.tokenizer = tokenizer
|
548 |
+
if self.tokenizer is not None:
|
549 |
+
self.all_special_tokens = set(self.tokenizer.all_special_tokens)
|
550 |
+
|
551 |
+
self.init_quantizers()
|
552 |
+
self.black_list_of_phrase_grounding = self._create_black_list_of_phrase_grounding()
|
553 |
+
|
554 |
+
def _create_black_list_of_phrase_grounding(self):
|
555 |
+
black_list = {}
|
556 |
+
|
557 |
+
if 'phrase_grounding' in self.parse_tasks and self.parse_tasks_configs['phrase_grounding']['FILTER_BY_BLACK_LIST']:
|
558 |
+
black_list = set(
|
559 |
+
['it', 'I', 'me', 'mine',
|
560 |
+
'you', 'your', 'yours',
|
561 |
+
'he', 'him', 'his',
|
562 |
+
'she', 'her', 'hers',
|
563 |
+
'they', 'them', 'their', 'theirs',
|
564 |
+
'one', 'oneself',
|
565 |
+
'we', 'us', 'our', 'ours',
|
566 |
+
'you', 'your', 'yours',
|
567 |
+
'they', 'them', 'their', 'theirs',
|
568 |
+
'mine', 'yours', 'his', 'hers', 'its',
|
569 |
+
'ours', 'yours', 'theirs',
|
570 |
+
'myself', 'yourself', 'himself', 'herself', 'itself',
|
571 |
+
'ourselves', 'yourselves', 'themselves',
|
572 |
+
'this', 'that',
|
573 |
+
'these', 'those',
|
574 |
+
'who', 'whom', 'whose', 'which', 'what',
|
575 |
+
'who', 'whom', 'whose', 'which', 'that',
|
576 |
+
'all', 'another', 'any', 'anybody', 'anyone', 'anything',
|
577 |
+
'each', 'everybody', 'everyone', 'everything',
|
578 |
+
'few', 'many', 'nobody', 'none', 'one', 'several',
|
579 |
+
'some', 'somebody', 'someone', 'something',
|
580 |
+
'each other', 'one another',
|
581 |
+
'myself', 'yourself', 'himself', 'herself', 'itself',
|
582 |
+
'ourselves', 'yourselves', 'themselves',
|
583 |
+
'the image', 'image', 'images', 'the', 'a', 'an', 'a group',
|
584 |
+
'other objects', 'lots', 'a set',
|
585 |
+
]
|
586 |
+
)
|
587 |
+
|
588 |
+
return black_list
|
589 |
+
|
590 |
+
def _create_default_config(self):
|
591 |
+
config = {
|
592 |
+
'NUM_BBOX_HEIGHT_BINS': 1000,
|
593 |
+
'NUM_BBOX_WIDTH_BINS': 1000,
|
594 |
+
'BOX_QUANTIZATION_MODE': 'floor',
|
595 |
+
'COORDINATES_HEIGHT_BINS': 1000,
|
596 |
+
'COORDINATES_WIDTH_BINS': 1000,
|
597 |
+
'COORDINATES_QUANTIZATION_MODE': 'floor',
|
598 |
+
'PARSE_TASKS': [
|
599 |
+
{
|
600 |
+
'TASK_NAME': 'od',
|
601 |
+
'PATTERN': r'([a-zA-Z0-9 ]+)<loc_(\\d+)><loc_(\\d+)><loc_(\\d+)><loc_(\\d+)>',
|
602 |
+
'SCORE_MODE': 'avg_loc_scores'
|
603 |
+
},
|
604 |
+
{
|
605 |
+
'TASK_NAME': 'ocr',
|
606 |
+
'PATTERN': r'(.+?)<loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)>',
|
607 |
+
'AREA_THRESHOLD': 0.00
|
608 |
+
},
|
609 |
+
{
|
610 |
+
'TASK_NAME': 'phrase_grounding',
|
611 |
+
'FILTER_BY_BLACK_LIST': True
|
612 |
+
},
|
613 |
+
{
|
614 |
+
'TASK_NAME': 'pure_text',
|
615 |
+
},
|
616 |
+
{
|
617 |
+
'TASK_NAME': 'description_with_bboxes',
|
618 |
+
'SCORE_MODE': 'avg_loc_scores'
|
619 |
+
},
|
620 |
+
{
|
621 |
+
'TASK_NAME': 'description_with_polygons',
|
622 |
+
},
|
623 |
+
{
|
624 |
+
'TASK_NAME': 'polygons',
|
625 |
+
},
|
626 |
+
{
|
627 |
+
'TASK_NAME': 'bboxes',
|
628 |
+
},
|
629 |
+
{
|
630 |
+
'TASK_NAME': 'description_with_bboxes_or_polygons',
|
631 |
+
}
|
632 |
+
]
|
633 |
+
}
|
634 |
+
|
635 |
+
return config
|
636 |
+
|
637 |
+
def init_quantizers(self):
|
638 |
+
# we have box_quantizer (od, grounding) and coordinates_quantizer (ocr, referring_segmentation)
|
639 |
+
num_bbox_height_bins = self.config.get('NUM_BBOX_HEIGHT_BINS', 1000)
|
640 |
+
num_bbox_width_bins = self.config.get('NUM_BBOX_WIDTH_BINS', 1000)
|
641 |
+
box_quantization_mode = self.config.get('BOX_QUANTIZATION_MODE', 'floor')
|
642 |
+
self.box_quantizer = BoxQuantizer(
|
643 |
+
box_quantization_mode,
|
644 |
+
(num_bbox_width_bins, num_bbox_height_bins),
|
645 |
+
)
|
646 |
+
|
647 |
+
num_bbox_height_bins = self.config['COORDINATES_HEIGHT_BINS'] if 'COORDINATES_HEIGHT_BINS' in self.config else self.config.get('NUM_BBOX_HEIGHT_BINS', 1000)
|
648 |
+
num_bbox_width_bins = self.config['COORDINATES_WIDTH_BINS'] if 'COORDINATES_WIDTH_BINS' in self.config else self.config.get('NUM_BBOX_WIDTH_BINS', 1000)
|
649 |
+
box_quantization_mode = self.config.get('COORDINATES_QUANTIZATION_MODE') if 'COORDINATES_QUANTIZATION_MODE' in self.config else self.config.get('BOX_QUANTIZATION_MODE', 'floor')
|
650 |
+
self.coordinates_quantizer = CoordinatesQuantizer(
|
651 |
+
box_quantization_mode,
|
652 |
+
(num_bbox_width_bins, num_bbox_height_bins),
|
653 |
+
)
|
654 |
+
|
655 |
+
def decode_with_spans(self, tokenizer, token_ids):
|
656 |
+
filtered_tokens = tokenizer.convert_ids_to_tokens(
|
657 |
+
token_ids, skip_special_tokens=False)
|
658 |
+
assert len(filtered_tokens) == len(token_ids)
|
659 |
+
sub_texts = []
|
660 |
+
for token in filtered_tokens:
|
661 |
+
if token in self.all_special_tokens:
|
662 |
+
sub_texts.append(token)
|
663 |
+
else:
|
664 |
+
if isinstance(tokenizer, (BartTokenizer, BartTokenizerFast)):
|
665 |
+
sub_text = tokenizer.convert_tokens_to_string([token])
|
666 |
+
else:
|
667 |
+
raise ValueError(f'type {type(tokenizer)} not supported')
|
668 |
+
sub_texts.append(sub_text)
|
669 |
+
|
670 |
+
text = ''
|
671 |
+
spans = []
|
672 |
+
for sub_text in sub_texts:
|
673 |
+
span = (len(text), len(text) + len(sub_text)) # [start index, end index).
|
674 |
+
text += sub_text
|
675 |
+
spans.append(span)
|
676 |
+
return text, spans
|
677 |
+
|
678 |
+
def parse_od_from_text_and_spans(
|
679 |
+
self,
|
680 |
+
text,
|
681 |
+
pattern,
|
682 |
+
image_size,
|
683 |
+
phrase_centric=False
|
684 |
+
):
|
685 |
+
parsed = list(re.finditer(pattern, text))
|
686 |
+
|
687 |
+
instances = []
|
688 |
+
for i in range(len(parsed)):
|
689 |
+
# Prepare instance.
|
690 |
+
instance = {}
|
691 |
+
|
692 |
+
if phrase_centric:
|
693 |
+
bbox_bins = [int(parsed[i].group(j)) for j in range(2, 6)]
|
694 |
+
else:
|
695 |
+
bbox_bins = [int(parsed[i].group(j)) for j in range(1, 5)]
|
696 |
+
instance['bbox'] = self.box_quantizer.dequantize(
|
697 |
+
boxes=torch.tensor(bbox_bins),
|
698 |
+
size=image_size
|
699 |
+
).tolist()
|
700 |
+
|
701 |
+
if phrase_centric:
|
702 |
+
instance['cat_name'] = parsed[i].group(1).lower().strip()
|
703 |
+
else:
|
704 |
+
instance['cat_name'] = parsed[i].group(5).lower().strip()
|
705 |
+
instances.append(instance)
|
706 |
+
|
707 |
+
return instances
|
708 |
+
|
709 |
+
def parse_ocr_from_text_and_spans(self,
|
710 |
+
text,
|
711 |
+
pattern,
|
712 |
+
image_size,
|
713 |
+
area_threshold=-1.0,
|
714 |
+
):
|
715 |
+
bboxes = []
|
716 |
+
labels = []
|
717 |
+
text = text.replace('<s>', '')
|
718 |
+
# ocr with regions
|
719 |
+
parsed = re.findall(pattern, text)
|
720 |
+
instances = []
|
721 |
+
image_width, image_height = image_size
|
722 |
+
|
723 |
+
for ocr_line in parsed:
|
724 |
+
ocr_content = ocr_line[0]
|
725 |
+
quad_box = ocr_line[1:]
|
726 |
+
quad_box = [int(i) for i in quad_box]
|
727 |
+
quad_box = self.coordinates_quantizer.dequantize(
|
728 |
+
torch.tensor(np.array(quad_box).reshape(-1, 2)),
|
729 |
+
size=image_size
|
730 |
+
).reshape(-1).tolist()
|
731 |
+
|
732 |
+
if area_threshold > 0:
|
733 |
+
x_coords = [i for i in quad_box[0::2]]
|
734 |
+
y_coords = [i for i in quad_box[1::2]]
|
735 |
+
|
736 |
+
# apply the Shoelace formula
|
737 |
+
area = 0.5 * abs(sum(x_coords[i] * y_coords[i + 1] - x_coords[i + 1] * y_coords[i] for i in range(4 - 1)))
|
738 |
+
|
739 |
+
if area < (image_width * image_height) * area_threshold:
|
740 |
+
continue
|
741 |
+
|
742 |
+
bboxes.append(quad_box)
|
743 |
+
labels.append(ocr_content)
|
744 |
+
instances.append({
|
745 |
+
'quad_box': quad_box,
|
746 |
+
'text': ocr_content,
|
747 |
+
})
|
748 |
+
return instances
|
749 |
+
|
750 |
+
def parse_phrase_grounding_from_text_and_spans(self, text, pattern, image_size):
|
751 |
+
# ignore <s> </s> and <pad>
|
752 |
+
cur_span = 0
|
753 |
+
if text.startswith('<s>'):
|
754 |
+
cur_span += 3
|
755 |
+
|
756 |
+
text = text.replace('<s>', '')
|
757 |
+
text = text.replace('</s>', '')
|
758 |
+
text = text.replace('<pad>', '')
|
759 |
+
|
760 |
+
pattern = r"([^<]+(?:<loc_\d+>){4,})"
|
761 |
+
phrases = re.findall(pattern, text)
|
762 |
+
|
763 |
+
# pattern should be text pattern and od pattern
|
764 |
+
pattern = r'^\s*(.*?)(?=<od>|</od>|<box>|</box>|<bbox>|</bbox>|<loc_)'
|
765 |
+
box_pattern = r'<loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)>'
|
766 |
+
|
767 |
+
instances = []
|
768 |
+
for pharse_text in phrases:
|
769 |
+
phrase_text_strip = pharse_text.replace('<ground>', '', 1)
|
770 |
+
phrase_text_strip = pharse_text.replace('<obj>', '', 1)
|
771 |
+
|
772 |
+
if phrase_text_strip == '':
|
773 |
+
cur_span += len(pharse_text)
|
774 |
+
continue
|
775 |
+
|
776 |
+
# Prepare instance.
|
777 |
+
instance = {}
|
778 |
+
|
779 |
+
# parse phrase, get string
|
780 |
+
phrase = re.search(pattern, phrase_text_strip)
|
781 |
+
if phrase is None:
|
782 |
+
cur_span += len(pharse_text)
|
783 |
+
continue
|
784 |
+
|
785 |
+
# parse bboxes by box_pattern
|
786 |
+
bboxes_parsed = list(re.finditer(box_pattern, pharse_text))
|
787 |
+
if len(bboxes_parsed) == 0:
|
788 |
+
cur_span += len(pharse_text)
|
789 |
+
continue
|
790 |
+
|
791 |
+
phrase = phrase.group()
|
792 |
+
# remove leading and trailing spaces
|
793 |
+
phrase = phrase.strip()
|
794 |
+
|
795 |
+
if phrase in self.black_list_of_phrase_grounding:
|
796 |
+
cur_span += len(pharse_text)
|
797 |
+
continue
|
798 |
+
|
799 |
+
# a list of list
|
800 |
+
bbox_bins = [[int(_bboxes_parsed.group(j)) for j in range(1, 5)] for _bboxes_parsed in bboxes_parsed]
|
801 |
+
instance['bbox'] = self.box_quantizer.dequantize(
|
802 |
+
boxes=torch.tensor(bbox_bins),
|
803 |
+
size=image_size
|
804 |
+
).tolist()
|
805 |
+
|
806 |
+
# exclude non-ascii characters
|
807 |
+
phrase = phrase.encode('ascii',errors='ignore').decode('ascii')
|
808 |
+
instance['cat_name'] = phrase
|
809 |
+
|
810 |
+
instances.append(instance)
|
811 |
+
|
812 |
+
return instances
|
813 |
+
|
814 |
+
def parse_description_with_bboxes_from_text_and_spans(
|
815 |
+
self,
|
816 |
+
text,
|
817 |
+
spans=None,
|
818 |
+
scores=None,
|
819 |
+
score_mode=None,
|
820 |
+
pattern=None,
|
821 |
+
image_size=None,
|
822 |
+
allow_empty_phrase=False
|
823 |
+
):
|
824 |
+
def find_matched_token_indices(cur_span, token_spans):
|
825 |
+
inds = []
|
826 |
+
for i, token_span in enumerate(token_spans):
|
827 |
+
if not (token_span[1] <= cur_span[0] or token_span[0] >= cur_span[1]):
|
828 |
+
inds.append(i)
|
829 |
+
return inds
|
830 |
+
|
831 |
+
cur_span = 0
|
832 |
+
if text.startswith('<s>'):
|
833 |
+
cur_span += 3
|
834 |
+
|
835 |
+
text = text.replace('<s>', '')
|
836 |
+
text = text.replace('</s>', '')
|
837 |
+
text = text.replace('<pad>', '')
|
838 |
+
|
839 |
+
if allow_empty_phrase:
|
840 |
+
pattern = rf"(?:(?:<loc_\d+>){{4,}})"
|
841 |
+
else:
|
842 |
+
pattern = r"([^<]+(?:<loc_\d+>){4,})"
|
843 |
+
phrases = re.findall(pattern, text)
|
844 |
+
|
845 |
+
# pattern should be text pattern and od pattern
|
846 |
+
pattern = r'^\s*(.*?)(?=<od>|</od>|<box>|</box>|<bbox>|</bbox>|<loc_)'
|
847 |
+
box_pattern = r'<loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)>'
|
848 |
+
|
849 |
+
instances = []
|
850 |
+
for pharse_text in phrases:
|
851 |
+
phrase_text_strip = pharse_text.replace('<ground>', '', 1)
|
852 |
+
phrase_text_strip = pharse_text.replace('<obj>', '', 1)
|
853 |
+
|
854 |
+
if phrase_text_strip == '' and not allow_empty_phrase:
|
855 |
+
cur_span += len(pharse_text)
|
856 |
+
continue
|
857 |
+
|
858 |
+
# parse phrase, get string
|
859 |
+
phrase = re.search(pattern, phrase_text_strip)
|
860 |
+
if phrase is None:
|
861 |
+
cur_span += len(pharse_text)
|
862 |
+
continue
|
863 |
+
|
864 |
+
phrase_span = phrase.span()
|
865 |
+
phrase = phrase.group()
|
866 |
+
# remove leading and trailing spaces
|
867 |
+
phrase = phrase.strip()
|
868 |
+
|
869 |
+
# parse bboxes by box_pattern
|
870 |
+
bboxes_parsed = list(re.finditer(box_pattern, pharse_text))
|
871 |
+
if len(bboxes_parsed) == 0:
|
872 |
+
cur_span += len(pharse_text)
|
873 |
+
continue
|
874 |
+
|
875 |
+
# a list of list
|
876 |
+
bbox_bins = [[int(_bboxes_parsed.group(j)) for j in range(1, 5)] for _bboxes_parsed in bboxes_parsed]
|
877 |
+
|
878 |
+
bboxes = self.box_quantizer.dequantize(
|
879 |
+
boxes=torch.tensor(bbox_bins),
|
880 |
+
size=image_size
|
881 |
+
).tolist()
|
882 |
+
|
883 |
+
if score_mode == 'avg_loc_scores':
|
884 |
+
if spans is None or scores is None:
|
885 |
+
all_scores = None
|
886 |
+
else:
|
887 |
+
bbox_end_spans = [_bboxes_parsed.span(0) for _bboxes_parsed in bboxes_parsed]
|
888 |
+
all_scores = []
|
889 |
+
for _spans in bbox_end_spans:
|
890 |
+
token_inds = find_matched_token_indices((_spans[0] + cur_span, _spans[1]+ cur_span), spans)
|
891 |
+
loc_scores = [scores[token_i] for token_i in token_inds]
|
892 |
+
score = sum(loc_scores) / len(loc_scores)
|
893 |
+
all_scores.append(score)
|
894 |
+
elif score_mode == 'avg_cat_name_scores':
|
895 |
+
if spans is None or scores is None:
|
896 |
+
all_scores = None
|
897 |
+
else:
|
898 |
+
cat_name_token_inds = find_matched_token_indices((phrase_span[0] + cur_span, phrase_span[1]+cur_span), spans)
|
899 |
+
cat_name_scores = [scores[token_i] for token_i in cat_name_token_inds]
|
900 |
+
score = sum(cat_name_scores) / len(cat_name_scores)
|
901 |
+
all_scores = [score] * len(bboxes)
|
902 |
+
elif score_mode is None:
|
903 |
+
all_scores = None
|
904 |
+
else:
|
905 |
+
raise ValueError('Unknown score mode: {}'.format(score_mode))
|
906 |
+
|
907 |
+
phrase = phrase.encode('ascii',errors='ignore').decode('ascii')
|
908 |
+
for _idx, _bboxes in enumerate(bboxes):
|
909 |
+
# Prepare instance.
|
910 |
+
instance = {}
|
911 |
+
instance['bbox'] = _bboxes
|
912 |
+
# exclude non-ascii characters
|
913 |
+
instance['cat_name'] = phrase
|
914 |
+
if all_scores is not None:
|
915 |
+
instance['score'] = math.exp(all_scores[_idx])
|
916 |
+
instances.append(instance)
|
917 |
+
|
918 |
+
cur_span += len(pharse_text)
|
919 |
+
|
920 |
+
return instances
|
921 |
+
|
922 |
+
def parse_description_with_polygons_from_text_and_spans(self, text, pattern, image_size,
|
923 |
+
allow_empty_phrase=False,
|
924 |
+
polygon_sep_token='<sep>',
|
925 |
+
polygon_start_token='<poly>',
|
926 |
+
polygon_end_token='</poly>',
|
927 |
+
with_box_at_start=False,
|
928 |
+
):
|
929 |
+
|
930 |
+
# ref_seg format: '<expression><x1><y1><x2><y2><><><sep><><><><>'
|
931 |
+
# ignore <s> </s> and <pad>
|
932 |
+
|
933 |
+
text = text.replace('<s>', '')
|
934 |
+
text = text.replace('</s>', '')
|
935 |
+
text = text.replace('<pad>', '')
|
936 |
+
|
937 |
+
if allow_empty_phrase:
|
938 |
+
pattern = rf"(?:(?:<loc_\d+>|{re.escape(polygon_sep_token)}|{re.escape(polygon_start_token)}|{re.escape(polygon_end_token)}){{4,}})"
|
939 |
+
else:
|
940 |
+
# [^<]+: This part matches one or more characters that are not the < symbol.
|
941 |
+
# The ^ inside the square brackets [] is a negation, meaning it matches anything except <.
|
942 |
+
#
|
943 |
+
pattern = rf"([^<]+(?:<loc_\d+>|{re.escape(polygon_sep_token)}|{re.escape(polygon_start_token)}|{re.escape(polygon_end_token)}){{4,}})"
|
944 |
+
phrases = re.findall(pattern, text)
|
945 |
+
|
946 |
+
phrase_string_pattern = r'^\s*(.*?)(?=<od>|</od>|<box>|</box>|<bbox>|</bbox>|<loc_|<poly>)'
|
947 |
+
box_pattern = rf'((?:<loc_\d+>)+)(?:{re.escape(polygon_sep_token)}|$)'
|
948 |
+
|
949 |
+
# one polygons instance is separated by polygon_start_token and polygon_end_token
|
950 |
+
polygons_instance_pattern = rf'{re.escape(polygon_start_token)}(.*?){re.escape(polygon_end_token)}'
|
951 |
+
|
952 |
+
instances = []
|
953 |
+
for phrase_text in phrases:
|
954 |
+
|
955 |
+
# exclude loc_\d+>
|
956 |
+
# need to get span if want to include category score
|
957 |
+
phrase_text_strip = re.sub(r'^loc_\d+>', '', phrase_text, count=1)
|
958 |
+
|
959 |
+
# phrase = phrase.replace('<poly>', '')
|
960 |
+
# phrase = phrase.replace('poly>', '')
|
961 |
+
|
962 |
+
if phrase_text_strip == '' and not allow_empty_phrase:
|
963 |
+
continue
|
964 |
+
|
965 |
+
|
966 |
+
# parse phrase, get string
|
967 |
+
phrase = re.search(phrase_string_pattern, phrase_text_strip)
|
968 |
+
if phrase is None:
|
969 |
+
continue
|
970 |
+
phrase = phrase.group()
|
971 |
+
# remove leading and trailing spaces
|
972 |
+
phrase = phrase.strip()
|
973 |
+
|
974 |
+
# parse bboxes by box_pattern
|
975 |
+
|
976 |
+
# split by polygon_start_token and polygon_end_token first using polygons_instance_pattern
|
977 |
+
if polygon_start_token in phrase_text and polygon_end_token in phrase_text:
|
978 |
+
polygons_instances_parsed = list(re.finditer(polygons_instance_pattern, phrase_text))
|
979 |
+
else:
|
980 |
+
polygons_instances_parsed = [phrase_text]
|
981 |
+
|
982 |
+
for _polygons_instances_parsed in polygons_instances_parsed:
|
983 |
+
# Prepare instance.
|
984 |
+
instance = {}
|
985 |
+
|
986 |
+
# polygons_parsed= list(re.finditer(box_pattern, phrase_text))
|
987 |
+
if isinstance(_polygons_instances_parsed, str):
|
988 |
+
polygons_parsed= list(re.finditer(box_pattern, _polygons_instances_parsed))
|
989 |
+
else:
|
990 |
+
polygons_parsed= list(re.finditer(box_pattern, _polygons_instances_parsed.group(1)))
|
991 |
+
if len(polygons_parsed) == 0:
|
992 |
+
continue
|
993 |
+
|
994 |
+
# a list of list (polygon)
|
995 |
+
bbox = []
|
996 |
+
polygons = []
|
997 |
+
for _polygon_parsed in polygons_parsed:
|
998 |
+
# group 1: whole <loc_\d+>...</loc_\d+>
|
999 |
+
_polygon = _polygon_parsed.group(1)
|
1000 |
+
# parse into list of int
|
1001 |
+
_polygon = [int(_loc_parsed.group(1)) for _loc_parsed in re.finditer(r'<loc_(\d+)>', _polygon)]
|
1002 |
+
if with_box_at_start and len(bbox) == 0:
|
1003 |
+
if len(_polygon) > 4:
|
1004 |
+
# no valid bbox prediction
|
1005 |
+
bbox = _polygon[:4]
|
1006 |
+
_polygon = _polygon[4:]
|
1007 |
+
else:
|
1008 |
+
bbox = [0, 0, 0, 0]
|
1009 |
+
# abandon last element if is not paired
|
1010 |
+
if len(_polygon) % 2 == 1:
|
1011 |
+
_polygon = _polygon[:-1]
|
1012 |
+
|
1013 |
+
# reshape into (n, 2)
|
1014 |
+
_polygon = self.coordinates_quantizer.dequantize(
|
1015 |
+
torch.tensor(np.array(_polygon).reshape(-1, 2)),
|
1016 |
+
size=image_size
|
1017 |
+
).reshape(-1).tolist()
|
1018 |
+
# reshape back
|
1019 |
+
polygons.append(_polygon)
|
1020 |
+
|
1021 |
+
instance['cat_name'] = phrase
|
1022 |
+
instance['polygons'] = polygons
|
1023 |
+
if len(bbox) != 0:
|
1024 |
+
instance['bbox'] = self.box_quantizer.dequantize(
|
1025 |
+
boxes=torch.tensor([bbox]),
|
1026 |
+
size=image_size
|
1027 |
+
).tolist()[0]
|
1028 |
+
|
1029 |
+
instances.append(instance)
|
1030 |
+
|
1031 |
+
return instances
|
1032 |
+
|
1033 |
+
def __call__(
|
1034 |
+
self,
|
1035 |
+
text=None,
|
1036 |
+
sequence=None,
|
1037 |
+
transition_beam_score=None,
|
1038 |
+
image_size=None,
|
1039 |
+
parse_tasks=None,
|
1040 |
+
):
|
1041 |
+
"""
|
1042 |
+
Args:
|
1043 |
+
text: model outputs
|
1044 |
+
image_size: (width, height)
|
1045 |
+
parse_tasks: a list of tasks to parse, if None, parse all tasks.
|
1046 |
+
|
1047 |
+
"""
|
1048 |
+
if parse_tasks is not None:
|
1049 |
+
if isinstance(parse_tasks, str):
|
1050 |
+
parse_tasks = [parse_tasks]
|
1051 |
+
for _parse_task in parse_tasks:
|
1052 |
+
assert _parse_task in self.parse_tasks, f'parse task {_parse_task} not supported'
|
1053 |
+
|
1054 |
+
# sequence or text should be provided
|
1055 |
+
assert sequence is not None or text is not None, 'sequence or text should be provided'
|
1056 |
+
assert sequence is None or text is None, 'only one of sequence and text should be provided'
|
1057 |
+
|
1058 |
+
if sequence is not None:
|
1059 |
+
sequence = sequence.tolist()[1:]
|
1060 |
+
text, spans = self.decode_with_spans(self.tokenizer, sequence)
|
1061 |
+
if transition_beam_score is not None:
|
1062 |
+
transition_beam_score = transition_beam_score.tolist()
|
1063 |
+
assert len(sequence) == len(transition_beam_score)
|
1064 |
+
else:
|
1065 |
+
spans = None
|
1066 |
+
transition_beam_score = None
|
1067 |
+
|
1068 |
+
parsed_dict = {
|
1069 |
+
'text': text
|
1070 |
+
}
|
1071 |
+
|
1072 |
+
for task in self.parse_tasks:
|
1073 |
+
if parse_tasks is not None and task not in parse_tasks:
|
1074 |
+
continue
|
1075 |
+
|
1076 |
+
pattern = self.parse_tasks_configs[task].get('PATTERN', None)
|
1077 |
+
score_mode = self.parse_tasks_configs[task].get('SCORE_MODE', None)
|
1078 |
+
|
1079 |
+
if task == 'ocr':
|
1080 |
+
instances = self.parse_ocr_from_text_and_spans(
|
1081 |
+
text,
|
1082 |
+
pattern=pattern,
|
1083 |
+
image_size=image_size,
|
1084 |
+
area_threshold=self.parse_tasks_configs[task].get('AREA_THRESHOLD', 0.0),
|
1085 |
+
)
|
1086 |
+
parsed_dict['ocr'] = instances
|
1087 |
+
elif task == 'phrase_grounding':
|
1088 |
+
instances = self.parse_phrase_grounding_from_text_and_spans(
|
1089 |
+
text,
|
1090 |
+
pattern=pattern,
|
1091 |
+
image_size=image_size,
|
1092 |
+
)
|
1093 |
+
parsed_dict['phrase_grounding'] = instances
|
1094 |
+
elif task == 'pure_text':
|
1095 |
+
parsed_dict['pure_text'] = text
|
1096 |
+
elif task == 'description_with_bboxes':
|
1097 |
+
instances = self.parse_description_with_bboxes_from_text_and_spans(
|
1098 |
+
text,
|
1099 |
+
spans=spans,
|
1100 |
+
scores=transition_beam_score,
|
1101 |
+
score_mode=score_mode,
|
1102 |
+
pattern=pattern,
|
1103 |
+
image_size=image_size,
|
1104 |
+
)
|
1105 |
+
parsed_dict['description_with_bboxes'] = instances
|
1106 |
+
elif task == 'description_with_polygons':
|
1107 |
+
instances = self.parse_description_with_polygons_from_text_and_spans(
|
1108 |
+
text,
|
1109 |
+
pattern=pattern,
|
1110 |
+
image_size=image_size,
|
1111 |
+
)
|
1112 |
+
parsed_dict['description_with_polygons'] = instances
|
1113 |
+
elif task == 'polygons':
|
1114 |
+
instances = self.parse_description_with_polygons_from_text_and_spans(
|
1115 |
+
text,
|
1116 |
+
pattern=pattern,
|
1117 |
+
image_size=image_size,
|
1118 |
+
allow_empty_phrase=True,
|
1119 |
+
)
|
1120 |
+
parsed_dict['polygons'] = instances
|
1121 |
+
elif task == 'bboxes':
|
1122 |
+
instances = self.parse_description_with_bboxes_from_text_and_spans(
|
1123 |
+
text,
|
1124 |
+
pattern=pattern,
|
1125 |
+
image_size=image_size,
|
1126 |
+
allow_empty_phrase=True,
|
1127 |
+
)
|
1128 |
+
parsed_dict['bboxes'] = instances
|
1129 |
+
elif task == 'description_with_bboxes_or_polygons':
|
1130 |
+
if '<poly>' in text:
|
1131 |
+
# only support either polygons or bboxes, not both at the same time
|
1132 |
+
instances = self.parse_description_with_polygons_from_text_and_spans(
|
1133 |
+
text,
|
1134 |
+
pattern=pattern,
|
1135 |
+
image_size=image_size,
|
1136 |
+
)
|
1137 |
+
else:
|
1138 |
+
instances = self.parse_description_with_bboxes_from_text_and_spans(
|
1139 |
+
text,
|
1140 |
+
pattern=pattern,
|
1141 |
+
image_size=image_size,
|
1142 |
+
)
|
1143 |
+
parsed_dict['description_with_bboxes_or_polygons'] = instances
|
1144 |
+
else:
|
1145 |
+
raise ValueError("task {} is not supported".format(task))
|
1146 |
+
|
1147 |
+
return parsed_dict
|
eval/grounded_sam/florence2/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
eval/grounded_sam/florence2/tokenizer_config.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_max_length": 1024
|
3 |
+
}
|
4 |
+
|
eval/grounded_sam/florence2/vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
eval/grounded_sam/grounded_sam2_florence2_autolabel_pipeline.py
ADDED
@@ -0,0 +1,361 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import cv2
|
3 |
+
import torch
|
4 |
+
import argparse
|
5 |
+
import numpy as np
|
6 |
+
import supervision as sv
|
7 |
+
from PIL import Image
|
8 |
+
import gc
|
9 |
+
import sys
|
10 |
+
|
11 |
+
from eval.grounded_sam.florence2.modeling_florence2 import Florence2ForConditionalGeneration
|
12 |
+
from eval.grounded_sam.florence2.processing_florence2 import Florence2Processor
|
13 |
+
from eval.grounded_sam.sam2.build_sam import build_sam2
|
14 |
+
from eval.grounded_sam.sam2.sam2_image_predictor import SAM2ImagePredictor
|
15 |
+
|
16 |
+
|
17 |
+
class FlorenceSAM:
|
18 |
+
|
19 |
+
# official usage: https://huggingface.co/microsoft/Florence-2-large/blob/main/sample_inference.ipynb
|
20 |
+
TASK_PROMPT = {
|
21 |
+
"original": "<GIVEN>",
|
22 |
+
"caption": "<CAPTION>",
|
23 |
+
"detailed_caption": "<DETAILED_CAPTION>",
|
24 |
+
"more_detailed_caption": "<MORE_DETAILED_CAPTION>",
|
25 |
+
"object_detection": "<OD>",
|
26 |
+
"dense_region_caption": "<DENSE_REGION_CAPTION>",
|
27 |
+
"region_proposal": "<REGION_PROPOSAL>",
|
28 |
+
"phrase_grounding": "<CAPTION_TO_PHRASE_GROUNDING>",
|
29 |
+
"referring_expression_segmentation": "<REFERRING_EXPRESSION_SEGMENTATION>",
|
30 |
+
"region_to_segmentation": "<REGION_TO_SEGMENTATION>",
|
31 |
+
"open_vocabulary_detection": "<OPEN_VOCABULARY_DETECTION>",
|
32 |
+
"region_to_category": "<REGION_TO_CATEGORY>",
|
33 |
+
"region_to_description": "<REGION_TO_DESCRIPTION>",
|
34 |
+
"ocr": "<OCR>",
|
35 |
+
"ocr_with_region": "<OCR_WITH_REGION>",
|
36 |
+
}
|
37 |
+
|
38 |
+
|
39 |
+
def __init__(self, device):
|
40 |
+
"""
|
41 |
+
Init Florence-2 and SAM 2 Model
|
42 |
+
"""
|
43 |
+
print(f"[{self}] init on device {device}")
|
44 |
+
self.device = torch.device(device)
|
45 |
+
|
46 |
+
# with torch.autocast(device_type="cuda", dtype=torch.float32).__enter__()
|
47 |
+
# self.torch_dtype = torch.float32
|
48 |
+
# self.torch_dtype = torch.float16
|
49 |
+
self.torch_dtype = torch.bfloat16
|
50 |
+
|
51 |
+
try:
|
52 |
+
if torch.cuda.get_device_properties(0).major >= 8:
|
53 |
+
# turn on tfloat32 for Ampere GPUs (https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices)
|
54 |
+
torch.backends.cuda.matmul.allow_tf32 = True
|
55 |
+
torch.backends.cudnn.allow_tf32 = True
|
56 |
+
# self.torch_dtype = torch.bfloat16
|
57 |
+
# else:
|
58 |
+
# self.torch_dtype = torch.float16
|
59 |
+
except:
|
60 |
+
self.torch_dtype = torch.bfloat16
|
61 |
+
|
62 |
+
FLORENCE2_MODEL_ID = os.getenv('FLORENCE2_MODEL_PATH', "microsoft/Florence-2-large")
|
63 |
+
SAM2_CHECKPOINT = os.getenv('SAM2_MODEL_PATH')
|
64 |
+
SAM2_CONFIG = "configs/sam2.1/sam2.1_hiera_l.yaml"
|
65 |
+
|
66 |
+
self.florence2_model = Florence2ForConditionalGeneration.from_pretrained(
|
67 |
+
FLORENCE2_MODEL_ID,
|
68 |
+
torch_dtype=self.torch_dtype,
|
69 |
+
).eval().to(self.device)
|
70 |
+
self.florence2_processor = Florence2Processor.from_pretrained(
|
71 |
+
FLORENCE2_MODEL_ID,
|
72 |
+
)
|
73 |
+
sam2_model = build_sam2(SAM2_CONFIG, SAM2_CHECKPOINT, device=self.device)
|
74 |
+
self.sam2_predictor = SAM2ImagePredictor(sam2_model)
|
75 |
+
|
76 |
+
def __str__(self):
|
77 |
+
return "FlorenceSAM"
|
78 |
+
|
79 |
+
|
80 |
+
@torch.no_grad()
|
81 |
+
def run_florence2(self, task_prompt, text_input, image):
|
82 |
+
model = self.florence2_model
|
83 |
+
processor = self.florence2_processor
|
84 |
+
device = self.device
|
85 |
+
assert model is not None, "You should pass the init florence-2 model here"
|
86 |
+
assert processor is not None, "You should set florence-2 processor here"
|
87 |
+
|
88 |
+
with torch.autocast(device_type="cuda", dtype=torch.float32):
|
89 |
+
if text_input is None:
|
90 |
+
prompt = task_prompt
|
91 |
+
else:
|
92 |
+
prompt = task_prompt + text_input
|
93 |
+
|
94 |
+
inputs = processor(
|
95 |
+
text=prompt, images=image,
|
96 |
+
max_length=1024,
|
97 |
+
truncation=True,
|
98 |
+
return_tensors="pt",
|
99 |
+
).to(device, self.torch_dtype)
|
100 |
+
# inputs = processor(text=prompt, images=image, return_tensors="pt").to(device, self.torch_dtype)
|
101 |
+
generated_ids = model.generate(
|
102 |
+
input_ids=inputs["input_ids"].to(device),
|
103 |
+
pixel_values=inputs["pixel_values"].to(device),
|
104 |
+
# max_new_tokens=1024,
|
105 |
+
max_new_tokens=768,
|
106 |
+
early_stopping=False,
|
107 |
+
do_sample=False,
|
108 |
+
num_beams=3,
|
109 |
+
)
|
110 |
+
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=False)[0]
|
111 |
+
parsed_answer = processor.post_process_generation(
|
112 |
+
generated_text,
|
113 |
+
task=task_prompt,
|
114 |
+
image_size=(image.width, image.height)
|
115 |
+
)
|
116 |
+
return parsed_answer
|
117 |
+
|
118 |
+
|
119 |
+
|
120 |
+
def caption(self, image, caption_task_prompt='<CAPTION>'):
|
121 |
+
assert caption_task_prompt in ["<CAPTION>", "<DETAILED_CAPTION>", "<MORE_DETAILED_CAPTION>"]
|
122 |
+
caption_results = self.run_florence2(caption_task_prompt, None, image)
|
123 |
+
text_input = caption_results[caption_task_prompt]
|
124 |
+
caption = text_input
|
125 |
+
return caption
|
126 |
+
|
127 |
+
|
128 |
+
def segmentation(self, image, input_boxes, seg_model="sam"):
|
129 |
+
if seg_model == "sam":
|
130 |
+
with torch.inference_mode(), torch.autocast("cuda", dtype=torch.float32):
|
131 |
+
sam2_predictor = self.sam2_predictor
|
132 |
+
sam2_predictor.set_image(np.array(image))
|
133 |
+
masks, scores, logits = sam2_predictor.predict(
|
134 |
+
point_coords=None,
|
135 |
+
point_labels=None,
|
136 |
+
box=input_boxes,
|
137 |
+
multimask_output=False,
|
138 |
+
)
|
139 |
+
if masks.ndim == 4:
|
140 |
+
masks = masks.squeeze(1)
|
141 |
+
if scores.ndim == 2:
|
142 |
+
scores = scores.squeeze(1)
|
143 |
+
else:
|
144 |
+
raise NotImplementedError()
|
145 |
+
|
146 |
+
return masks, scores
|
147 |
+
|
148 |
+
def post_process_results(self, image, caption, labels, detections, output_dir=None):
|
149 |
+
result_dict = {
|
150 |
+
"caption": caption,
|
151 |
+
"instance_images": [],
|
152 |
+
"instance_labels": [],
|
153 |
+
"instance_bboxes": [],
|
154 |
+
"instance_mask_scores": [],
|
155 |
+
}
|
156 |
+
|
157 |
+
if detections is None:
|
158 |
+
return detections, result_dict
|
159 |
+
|
160 |
+
if output_dir is not None:
|
161 |
+
os.makedirs(output_dir, exist_ok=True)
|
162 |
+
|
163 |
+
cv_image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
|
164 |
+
|
165 |
+
box_annotator = sv.BoxAnnotator()
|
166 |
+
annotated_frame = box_annotator.annotate(scene=cv_image.copy(), detections=detections)
|
167 |
+
|
168 |
+
label_annotator = sv.LabelAnnotator(text_position=sv.Position.CENTER)
|
169 |
+
annotated_frame = label_annotator.annotate(scene=annotated_frame, detections=detections, labels=labels)
|
170 |
+
if output_dir is not None:
|
171 |
+
cv2.imwrite(os.path.join(output_dir, "detections.jpg"), annotated_frame)
|
172 |
+
|
173 |
+
mask_annotator = sv.MaskAnnotator()
|
174 |
+
annotated_frame = mask_annotator.annotate(scene=annotated_frame, detections=detections)
|
175 |
+
if output_dir is not None:
|
176 |
+
cv2.imwrite(os.path.join(output_dir, "masks.jpg"), annotated_frame)
|
177 |
+
|
178 |
+
for detection in detections:
|
179 |
+
xyxy, mask, confidence, class_id, tracker_id, data = detection
|
180 |
+
|
181 |
+
label = labels[class_id]
|
182 |
+
cropped_img = sv.crop_image(image=cv_image, xyxy=xyxy)
|
183 |
+
if output_dir is not None:
|
184 |
+
cv2.imwrite(os.path.join(output_dir, f"cropped_image_{label}.jpg"), cropped_img)
|
185 |
+
|
186 |
+
if mask is None:
|
187 |
+
result_dict["instance_mask_scores"].append(0)
|
188 |
+
result_dict["instance_images"].append(cropped_img)
|
189 |
+
else:
|
190 |
+
mask = np.repeat(mask[..., np.newaxis], 3, axis=-1)
|
191 |
+
masked_img = np.where(mask, cv_image, 255)
|
192 |
+
cropped_masked_img = sv.crop_image(image=masked_img, xyxy=xyxy)
|
193 |
+
result_dict["instance_mask_scores"].append(confidence.item())
|
194 |
+
result_dict["instance_images"].append(cropped_masked_img)
|
195 |
+
|
196 |
+
result_dict["instance_labels"].append(label)
|
197 |
+
result_dict["instance_bboxes"].append(xyxy)
|
198 |
+
if output_dir is not None:
|
199 |
+
cv2.imwrite(os.path.join(output_dir, f"masked_image_{label}.jpg"), cropped_masked_img)
|
200 |
+
|
201 |
+
torch.cuda.empty_cache()
|
202 |
+
gc.collect()
|
203 |
+
return detections, result_dict
|
204 |
+
|
205 |
+
def caption_phrase_grounding_and_segmentation(
|
206 |
+
self,
|
207 |
+
image,
|
208 |
+
seg_model="sam",
|
209 |
+
caption_task_prompt='<CAPTION>',
|
210 |
+
original_caption=None,
|
211 |
+
output_dir=None
|
212 |
+
):
|
213 |
+
|
214 |
+
assert caption_task_prompt in ["<CAPTION>", "<DETAILED_CAPTION>", "<MORE_DETAILED_CAPTION>", "<GIVEN>", "<OPEN_VOCABULARY_DETECTION>"]
|
215 |
+
assert seg_model in ["sam", "florence2"]
|
216 |
+
|
217 |
+
# image caption
|
218 |
+
if caption_task_prompt in ["<GIVEN>", "<OPEN_VOCABULARY_DETECTION>"]:
|
219 |
+
assert original_caption is not None
|
220 |
+
caption = original_caption
|
221 |
+
else:
|
222 |
+
caption_results = self.run_florence2(caption_task_prompt, None, image)
|
223 |
+
text_input = caption_results[caption_task_prompt]
|
224 |
+
caption = text_input
|
225 |
+
|
226 |
+
# phrase grounding
|
227 |
+
grounding_results = self.run_florence2('<CAPTION_TO_PHRASE_GROUNDING>', caption, image)['<CAPTION_TO_PHRASE_GROUNDING>']
|
228 |
+
input_boxes = np.array(grounding_results["bboxes"])
|
229 |
+
class_names = grounding_results["labels"]
|
230 |
+
class_ids = np.array(list(range(len(class_names))))
|
231 |
+
|
232 |
+
# segmentation
|
233 |
+
masks, scores = self.segmentation(image, input_boxes, seg_model)
|
234 |
+
|
235 |
+
labels = [f"{class_name}" for class_name in class_names]
|
236 |
+
detections = sv.Detections(
|
237 |
+
xyxy=input_boxes,
|
238 |
+
mask=masks.astype(bool),
|
239 |
+
class_id=class_ids,
|
240 |
+
confidence=scores,
|
241 |
+
)
|
242 |
+
|
243 |
+
return self.post_process_results(image, caption, labels, detections, output_dir)
|
244 |
+
|
245 |
+
def od_grounding_and_segmentation(
|
246 |
+
self,
|
247 |
+
image,
|
248 |
+
text_input,
|
249 |
+
seg_model="sam",
|
250 |
+
output_dir=None
|
251 |
+
):
|
252 |
+
assert seg_model in ["sam", "florence2"]
|
253 |
+
|
254 |
+
# od grounding
|
255 |
+
grounding_results = self.run_florence2('<OPEN_VOCABULARY_DETECTION>', text_input, image)['<OPEN_VOCABULARY_DETECTION>']
|
256 |
+
if len(grounding_results["bboxes"]) == 0:
|
257 |
+
detections = None
|
258 |
+
labels = []
|
259 |
+
else:
|
260 |
+
input_boxes = np.array(grounding_results["bboxes"])
|
261 |
+
class_names = grounding_results["bboxes_labels"]
|
262 |
+
class_ids = np.array(list(range(len(class_names))))
|
263 |
+
|
264 |
+
# segmentation
|
265 |
+
masks, scores = self.segmentation(image, input_boxes, seg_model)
|
266 |
+
|
267 |
+
labels = [f"{class_name}" for class_name in class_names]
|
268 |
+
detections = sv.Detections(
|
269 |
+
xyxy=input_boxes,
|
270 |
+
mask=masks.astype(bool),
|
271 |
+
class_id=class_ids,
|
272 |
+
confidence=scores,
|
273 |
+
)
|
274 |
+
|
275 |
+
return self.post_process_results(image, text_input, labels, detections, output_dir)
|
276 |
+
|
277 |
+
def od_grounding(
|
278 |
+
self,
|
279 |
+
image,
|
280 |
+
text_input,
|
281 |
+
output_dir=None
|
282 |
+
):
|
283 |
+
|
284 |
+
# od grounding
|
285 |
+
grounding_results = self.run_florence2('<OPEN_VOCABULARY_DETECTION>', text_input, image)['<OPEN_VOCABULARY_DETECTION>']
|
286 |
+
if len(grounding_results["bboxes"]) == 0:
|
287 |
+
detections = None
|
288 |
+
labels = []
|
289 |
+
else:
|
290 |
+
input_boxes = np.array(grounding_results["bboxes"])
|
291 |
+
class_names = grounding_results["bboxes_labels"]
|
292 |
+
class_ids = np.array(list(range(len(class_names))))
|
293 |
+
|
294 |
+
labels = [f"{class_name}" for class_name in class_names]
|
295 |
+
detections = sv.Detections(
|
296 |
+
xyxy=input_boxes,
|
297 |
+
class_id=class_ids,
|
298 |
+
)
|
299 |
+
|
300 |
+
return self.post_process_results(image, text_input, labels, detections, output_dir)
|
301 |
+
|
302 |
+
def phrase_grounding_and_segmentation(
|
303 |
+
self,
|
304 |
+
image,
|
305 |
+
text_input,
|
306 |
+
seg_model="sam",
|
307 |
+
output_dir=None
|
308 |
+
):
|
309 |
+
assert seg_model in ["sam", "florence2"]
|
310 |
+
|
311 |
+
# phrase grounding
|
312 |
+
grounding_results = self.run_florence2('<CAPTION_TO_PHRASE_GROUNDING>', text_input, image)['<CAPTION_TO_PHRASE_GROUNDING>']
|
313 |
+
input_boxes = np.array(grounding_results["bboxes"])
|
314 |
+
class_names = grounding_results["labels"]
|
315 |
+
# print(f"[phrase_grounding_and_segmentation] input_label={text_input}, output_label={class_names}")
|
316 |
+
class_ids = np.array(list(range(len(class_names))))
|
317 |
+
|
318 |
+
# segmentation
|
319 |
+
masks, scores = self.segmentation(image, input_boxes, seg_model)
|
320 |
+
|
321 |
+
labels = [f"{class_name}" for class_name in class_names]
|
322 |
+
detections = sv.Detections(
|
323 |
+
xyxy=input_boxes,
|
324 |
+
mask=masks.astype(bool),
|
325 |
+
class_id=class_ids,
|
326 |
+
confidence=scores,
|
327 |
+
)
|
328 |
+
|
329 |
+
return self.post_process_results(image, text_input, labels, detections, output_dir)
|
330 |
+
|
331 |
+
|
332 |
+
if __name__ == "__main__":
|
333 |
+
|
334 |
+
parser = argparse.ArgumentParser("Grounded SAM 2 Florence-2 Demos", add_help=True)
|
335 |
+
parser.add_argument("--image_path", type=str, default="./notebooks/images/cars.jpg", required=True, help="path to image file")
|
336 |
+
parser.add_argument("--caption_type", type=str, default="caption", required=False, help="granularity of caption")
|
337 |
+
args = parser.parse_args()
|
338 |
+
|
339 |
+
|
340 |
+
|
341 |
+
# IMAGE_PATH = args.image_path
|
342 |
+
PIPELINE = "caption_to_phrase_grounding"
|
343 |
+
CAPTION_TYPE = args.caption_type
|
344 |
+
assert CAPTION_TYPE in ["caption", "detailed_caption", "more_detailed_caption", "original"]
|
345 |
+
|
346 |
+
print(f"Running pipeline: {PIPELINE} now.")
|
347 |
+
|
348 |
+
pipeline = FlorenceSAM("cuda:0")
|
349 |
+
|
350 |
+
from glob import glob
|
351 |
+
from tqdm import tqdm
|
352 |
+
for image_path in tqdm(glob("/mnt/bn/lq-prompt-alignment/personal/chenbowen/code/IPVerse/prompt_alignment/Grounded-SAM-2/notebooks/images/*") * 3):
|
353 |
+
# for image_path in tqdm(glob("/mnt/bn/lq-prompt-alignment/personal/chenbowen/code/IPVerse/prompt_alignment/Grounded-SAM-2/outputs/gcg_pipeline/00001.tar_debug/*.png")):
|
354 |
+
print(pipeline.TASK_PROMPT, CAPTION_TYPE)
|
355 |
+
image = Image.open(image_path).convert("RGB")
|
356 |
+
pipeline.caption_phrase_grounding_and_segmentation(
|
357 |
+
image=image,
|
358 |
+
seg_model="sam",
|
359 |
+
caption_task_prompt=pipeline.TASK_PROMPT[CAPTION_TYPE],
|
360 |
+
output_dir=f"./outputs/{os.path.basename(image_path)}"
|
361 |
+
)
|
eval/grounded_sam/sam2/__init__.py
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
from hydra import initialize_config_module
|
8 |
+
from hydra.core.global_hydra import GlobalHydra
|
9 |
+
|
10 |
+
if not GlobalHydra.instance().is_initialized():
|
11 |
+
initialize_config_module("sam2", version_base="1.2")
|
eval/grounded_sam/sam2/automatic_mask_generator.py
ADDED
@@ -0,0 +1,454 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
# Adapted from https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/automatic_mask_generator.py
|
8 |
+
from typing import Any, Dict, List, Optional, Tuple
|
9 |
+
|
10 |
+
import numpy as np
|
11 |
+
import torch
|
12 |
+
from torchvision.ops.boxes import batched_nms, box_area # type: ignore
|
13 |
+
|
14 |
+
from sam2.modeling.sam2_base import SAM2Base
|
15 |
+
from sam2.sam2_image_predictor import SAM2ImagePredictor
|
16 |
+
from sam2.utils.amg import (
|
17 |
+
area_from_rle,
|
18 |
+
batch_iterator,
|
19 |
+
batched_mask_to_box,
|
20 |
+
box_xyxy_to_xywh,
|
21 |
+
build_all_layer_point_grids,
|
22 |
+
calculate_stability_score,
|
23 |
+
coco_encode_rle,
|
24 |
+
generate_crop_boxes,
|
25 |
+
is_box_near_crop_edge,
|
26 |
+
mask_to_rle_pytorch,
|
27 |
+
MaskData,
|
28 |
+
remove_small_regions,
|
29 |
+
rle_to_mask,
|
30 |
+
uncrop_boxes_xyxy,
|
31 |
+
uncrop_masks,
|
32 |
+
uncrop_points,
|
33 |
+
)
|
34 |
+
|
35 |
+
|
36 |
+
class SAM2AutomaticMaskGenerator:
|
37 |
+
def __init__(
|
38 |
+
self,
|
39 |
+
model: SAM2Base,
|
40 |
+
points_per_side: Optional[int] = 32,
|
41 |
+
points_per_batch: int = 64,
|
42 |
+
pred_iou_thresh: float = 0.8,
|
43 |
+
stability_score_thresh: float = 0.95,
|
44 |
+
stability_score_offset: float = 1.0,
|
45 |
+
mask_threshold: float = 0.0,
|
46 |
+
box_nms_thresh: float = 0.7,
|
47 |
+
crop_n_layers: int = 0,
|
48 |
+
crop_nms_thresh: float = 0.7,
|
49 |
+
crop_overlap_ratio: float = 512 / 1500,
|
50 |
+
crop_n_points_downscale_factor: int = 1,
|
51 |
+
point_grids: Optional[List[np.ndarray]] = None,
|
52 |
+
min_mask_region_area: int = 0,
|
53 |
+
output_mode: str = "binary_mask",
|
54 |
+
use_m2m: bool = False,
|
55 |
+
multimask_output: bool = True,
|
56 |
+
**kwargs,
|
57 |
+
) -> None:
|
58 |
+
"""
|
59 |
+
Using a SAM 2 model, generates masks for the entire image.
|
60 |
+
Generates a grid of point prompts over the image, then filters
|
61 |
+
low quality and duplicate masks. The default settings are chosen
|
62 |
+
for SAM 2 with a HieraL backbone.
|
63 |
+
|
64 |
+
Arguments:
|
65 |
+
model (Sam): The SAM 2 model to use for mask prediction.
|
66 |
+
points_per_side (int or None): The number of points to be sampled
|
67 |
+
along one side of the image. The total number of points is
|
68 |
+
points_per_side**2. If None, 'point_grids' must provide explicit
|
69 |
+
point sampling.
|
70 |
+
points_per_batch (int): Sets the number of points run simultaneously
|
71 |
+
by the model. Higher numbers may be faster but use more GPU memory.
|
72 |
+
pred_iou_thresh (float): A filtering threshold in [0,1], using the
|
73 |
+
model's predicted mask quality.
|
74 |
+
stability_score_thresh (float): A filtering threshold in [0,1], using
|
75 |
+
the stability of the mask under changes to the cutoff used to binarize
|
76 |
+
the model's mask predictions.
|
77 |
+
stability_score_offset (float): The amount to shift the cutoff when
|
78 |
+
calculated the stability score.
|
79 |
+
mask_threshold (float): Threshold for binarizing the mask logits
|
80 |
+
box_nms_thresh (float): The box IoU cutoff used by non-maximal
|
81 |
+
suppression to filter duplicate masks.
|
82 |
+
crop_n_layers (int): If >0, mask prediction will be run again on
|
83 |
+
crops of the image. Sets the number of layers to run, where each
|
84 |
+
layer has 2**i_layer number of image crops.
|
85 |
+
crop_nms_thresh (float): The box IoU cutoff used by non-maximal
|
86 |
+
suppression to filter duplicate masks between different crops.
|
87 |
+
crop_overlap_ratio (float): Sets the degree to which crops overlap.
|
88 |
+
In the first crop layer, crops will overlap by this fraction of
|
89 |
+
the image length. Later layers with more crops scale down this overlap.
|
90 |
+
crop_n_points_downscale_factor (int): The number of points-per-side
|
91 |
+
sampled in layer n is scaled down by crop_n_points_downscale_factor**n.
|
92 |
+
point_grids (list(np.ndarray) or None): A list over explicit grids
|
93 |
+
of points used for sampling, normalized to [0,1]. The nth grid in the
|
94 |
+
list is used in the nth crop layer. Exclusive with points_per_side.
|
95 |
+
min_mask_region_area (int): If >0, postprocessing will be applied
|
96 |
+
to remove disconnected regions and holes in masks with area smaller
|
97 |
+
than min_mask_region_area. Requires opencv.
|
98 |
+
output_mode (str): The form masks are returned in. Can be 'binary_mask',
|
99 |
+
'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools.
|
100 |
+
For large resolutions, 'binary_mask' may consume large amounts of
|
101 |
+
memory.
|
102 |
+
use_m2m (bool): Whether to add a one step refinement using previous mask predictions.
|
103 |
+
multimask_output (bool): Whether to output multimask at each point of the grid.
|
104 |
+
"""
|
105 |
+
|
106 |
+
assert (points_per_side is None) != (
|
107 |
+
point_grids is None
|
108 |
+
), "Exactly one of points_per_side or point_grid must be provided."
|
109 |
+
if points_per_side is not None:
|
110 |
+
self.point_grids = build_all_layer_point_grids(
|
111 |
+
points_per_side,
|
112 |
+
crop_n_layers,
|
113 |
+
crop_n_points_downscale_factor,
|
114 |
+
)
|
115 |
+
elif point_grids is not None:
|
116 |
+
self.point_grids = point_grids
|
117 |
+
else:
|
118 |
+
raise ValueError("Can't have both points_per_side and point_grid be None.")
|
119 |
+
|
120 |
+
assert output_mode in [
|
121 |
+
"binary_mask",
|
122 |
+
"uncompressed_rle",
|
123 |
+
"coco_rle",
|
124 |
+
], f"Unknown output_mode {output_mode}."
|
125 |
+
if output_mode == "coco_rle":
|
126 |
+
try:
|
127 |
+
from pycocotools import mask as mask_utils # type: ignore # noqa: F401
|
128 |
+
except ImportError as e:
|
129 |
+
print("Please install pycocotools")
|
130 |
+
raise e
|
131 |
+
|
132 |
+
self.predictor = SAM2ImagePredictor(
|
133 |
+
model,
|
134 |
+
max_hole_area=min_mask_region_area,
|
135 |
+
max_sprinkle_area=min_mask_region_area,
|
136 |
+
)
|
137 |
+
self.points_per_batch = points_per_batch
|
138 |
+
self.pred_iou_thresh = pred_iou_thresh
|
139 |
+
self.stability_score_thresh = stability_score_thresh
|
140 |
+
self.stability_score_offset = stability_score_offset
|
141 |
+
self.mask_threshold = mask_threshold
|
142 |
+
self.box_nms_thresh = box_nms_thresh
|
143 |
+
self.crop_n_layers = crop_n_layers
|
144 |
+
self.crop_nms_thresh = crop_nms_thresh
|
145 |
+
self.crop_overlap_ratio = crop_overlap_ratio
|
146 |
+
self.crop_n_points_downscale_factor = crop_n_points_downscale_factor
|
147 |
+
self.min_mask_region_area = min_mask_region_area
|
148 |
+
self.output_mode = output_mode
|
149 |
+
self.use_m2m = use_m2m
|
150 |
+
self.multimask_output = multimask_output
|
151 |
+
|
152 |
+
@classmethod
|
153 |
+
def from_pretrained(cls, model_id: str, **kwargs) -> "SAM2AutomaticMaskGenerator":
|
154 |
+
"""
|
155 |
+
Load a pretrained model from the Hugging Face hub.
|
156 |
+
|
157 |
+
Arguments:
|
158 |
+
model_id (str): The Hugging Face repository ID.
|
159 |
+
**kwargs: Additional arguments to pass to the model constructor.
|
160 |
+
|
161 |
+
Returns:
|
162 |
+
(SAM2AutomaticMaskGenerator): The loaded model.
|
163 |
+
"""
|
164 |
+
from sam2.build_sam import build_sam2_hf
|
165 |
+
|
166 |
+
sam_model = build_sam2_hf(model_id, **kwargs)
|
167 |
+
return cls(sam_model, **kwargs)
|
168 |
+
|
169 |
+
@torch.no_grad()
|
170 |
+
def generate(self, image: np.ndarray) -> List[Dict[str, Any]]:
|
171 |
+
"""
|
172 |
+
Generates masks for the given image.
|
173 |
+
|
174 |
+
Arguments:
|
175 |
+
image (np.ndarray): The image to generate masks for, in HWC uint8 format.
|
176 |
+
|
177 |
+
Returns:
|
178 |
+
list(dict(str, any)): A list over records for masks. Each record is
|
179 |
+
a dict containing the following keys:
|
180 |
+
segmentation (dict(str, any) or np.ndarray): The mask. If
|
181 |
+
output_mode='binary_mask', is an array of shape HW. Otherwise,
|
182 |
+
is a dictionary containing the RLE.
|
183 |
+
bbox (list(float)): The box around the mask, in XYWH format.
|
184 |
+
area (int): The area in pixels of the mask.
|
185 |
+
predicted_iou (float): The model's own prediction of the mask's
|
186 |
+
quality. This is filtered by the pred_iou_thresh parameter.
|
187 |
+
point_coords (list(list(float))): The point coordinates input
|
188 |
+
to the model to generate this mask.
|
189 |
+
stability_score (float): A measure of the mask's quality. This
|
190 |
+
is filtered on using the stability_score_thresh parameter.
|
191 |
+
crop_box (list(float)): The crop of the image used to generate
|
192 |
+
the mask, given in XYWH format.
|
193 |
+
"""
|
194 |
+
|
195 |
+
# Generate masks
|
196 |
+
mask_data = self._generate_masks(image)
|
197 |
+
|
198 |
+
# Encode masks
|
199 |
+
if self.output_mode == "coco_rle":
|
200 |
+
mask_data["segmentations"] = [
|
201 |
+
coco_encode_rle(rle) for rle in mask_data["rles"]
|
202 |
+
]
|
203 |
+
elif self.output_mode == "binary_mask":
|
204 |
+
mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]]
|
205 |
+
else:
|
206 |
+
mask_data["segmentations"] = mask_data["rles"]
|
207 |
+
|
208 |
+
# Write mask records
|
209 |
+
curr_anns = []
|
210 |
+
for idx in range(len(mask_data["segmentations"])):
|
211 |
+
ann = {
|
212 |
+
"segmentation": mask_data["segmentations"][idx],
|
213 |
+
"area": area_from_rle(mask_data["rles"][idx]),
|
214 |
+
"bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(),
|
215 |
+
"predicted_iou": mask_data["iou_preds"][idx].item(),
|
216 |
+
"point_coords": [mask_data["points"][idx].tolist()],
|
217 |
+
"stability_score": mask_data["stability_score"][idx].item(),
|
218 |
+
"crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(),
|
219 |
+
}
|
220 |
+
curr_anns.append(ann)
|
221 |
+
|
222 |
+
return curr_anns
|
223 |
+
|
224 |
+
def _generate_masks(self, image: np.ndarray) -> MaskData:
|
225 |
+
orig_size = image.shape[:2]
|
226 |
+
crop_boxes, layer_idxs = generate_crop_boxes(
|
227 |
+
orig_size, self.crop_n_layers, self.crop_overlap_ratio
|
228 |
+
)
|
229 |
+
|
230 |
+
# Iterate over image crops
|
231 |
+
data = MaskData()
|
232 |
+
for crop_box, layer_idx in zip(crop_boxes, layer_idxs):
|
233 |
+
crop_data = self._process_crop(image, crop_box, layer_idx, orig_size)
|
234 |
+
data.cat(crop_data)
|
235 |
+
|
236 |
+
# Remove duplicate masks between crops
|
237 |
+
if len(crop_boxes) > 1:
|
238 |
+
# Prefer masks from smaller crops
|
239 |
+
scores = 1 / box_area(data["crop_boxes"])
|
240 |
+
scores = scores.to(data["boxes"].device)
|
241 |
+
keep_by_nms = batched_nms(
|
242 |
+
data["boxes"].float(),
|
243 |
+
scores,
|
244 |
+
torch.zeros_like(data["boxes"][:, 0]), # categories
|
245 |
+
iou_threshold=self.crop_nms_thresh,
|
246 |
+
)
|
247 |
+
data.filter(keep_by_nms)
|
248 |
+
data.to_numpy()
|
249 |
+
return data
|
250 |
+
|
251 |
+
def _process_crop(
|
252 |
+
self,
|
253 |
+
image: np.ndarray,
|
254 |
+
crop_box: List[int],
|
255 |
+
crop_layer_idx: int,
|
256 |
+
orig_size: Tuple[int, ...],
|
257 |
+
) -> MaskData:
|
258 |
+
# Crop the image and calculate embeddings
|
259 |
+
x0, y0, x1, y1 = crop_box
|
260 |
+
cropped_im = image[y0:y1, x0:x1, :]
|
261 |
+
cropped_im_size = cropped_im.shape[:2]
|
262 |
+
self.predictor.set_image(cropped_im)
|
263 |
+
|
264 |
+
# Get points for this crop
|
265 |
+
points_scale = np.array(cropped_im_size)[None, ::-1]
|
266 |
+
points_for_image = self.point_grids[crop_layer_idx] * points_scale
|
267 |
+
|
268 |
+
# Generate masks for this crop in batches
|
269 |
+
data = MaskData()
|
270 |
+
for (points,) in batch_iterator(self.points_per_batch, points_for_image):
|
271 |
+
batch_data = self._process_batch(
|
272 |
+
points, cropped_im_size, crop_box, orig_size, normalize=True
|
273 |
+
)
|
274 |
+
data.cat(batch_data)
|
275 |
+
del batch_data
|
276 |
+
self.predictor.reset_predictor()
|
277 |
+
|
278 |
+
# Remove duplicates within this crop.
|
279 |
+
keep_by_nms = batched_nms(
|
280 |
+
data["boxes"].float(),
|
281 |
+
data["iou_preds"],
|
282 |
+
torch.zeros_like(data["boxes"][:, 0]), # categories
|
283 |
+
iou_threshold=self.box_nms_thresh,
|
284 |
+
)
|
285 |
+
data.filter(keep_by_nms)
|
286 |
+
|
287 |
+
# Return to the original image frame
|
288 |
+
data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box)
|
289 |
+
data["points"] = uncrop_points(data["points"], crop_box)
|
290 |
+
data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))])
|
291 |
+
|
292 |
+
return data
|
293 |
+
|
294 |
+
def _process_batch(
|
295 |
+
self,
|
296 |
+
points: np.ndarray,
|
297 |
+
im_size: Tuple[int, ...],
|
298 |
+
crop_box: List[int],
|
299 |
+
orig_size: Tuple[int, ...],
|
300 |
+
normalize=False,
|
301 |
+
) -> MaskData:
|
302 |
+
orig_h, orig_w = orig_size
|
303 |
+
|
304 |
+
# Run model on this batch
|
305 |
+
points = torch.as_tensor(
|
306 |
+
points, dtype=torch.float32, device=self.predictor.device
|
307 |
+
)
|
308 |
+
in_points = self.predictor._transforms.transform_coords(
|
309 |
+
points, normalize=normalize, orig_hw=im_size
|
310 |
+
)
|
311 |
+
in_labels = torch.ones(
|
312 |
+
in_points.shape[0], dtype=torch.int, device=in_points.device
|
313 |
+
)
|
314 |
+
masks, iou_preds, low_res_masks = self.predictor._predict(
|
315 |
+
in_points[:, None, :],
|
316 |
+
in_labels[:, None],
|
317 |
+
multimask_output=self.multimask_output,
|
318 |
+
return_logits=True,
|
319 |
+
)
|
320 |
+
|
321 |
+
# Serialize predictions and store in MaskData
|
322 |
+
data = MaskData(
|
323 |
+
masks=masks.flatten(0, 1),
|
324 |
+
iou_preds=iou_preds.flatten(0, 1),
|
325 |
+
points=points.repeat_interleave(masks.shape[1], dim=0),
|
326 |
+
low_res_masks=low_res_masks.flatten(0, 1),
|
327 |
+
)
|
328 |
+
del masks
|
329 |
+
|
330 |
+
if not self.use_m2m:
|
331 |
+
# Filter by predicted IoU
|
332 |
+
if self.pred_iou_thresh > 0.0:
|
333 |
+
keep_mask = data["iou_preds"] > self.pred_iou_thresh
|
334 |
+
data.filter(keep_mask)
|
335 |
+
|
336 |
+
# Calculate and filter by stability score
|
337 |
+
data["stability_score"] = calculate_stability_score(
|
338 |
+
data["masks"], self.mask_threshold, self.stability_score_offset
|
339 |
+
)
|
340 |
+
if self.stability_score_thresh > 0.0:
|
341 |
+
keep_mask = data["stability_score"] >= self.stability_score_thresh
|
342 |
+
data.filter(keep_mask)
|
343 |
+
else:
|
344 |
+
# One step refinement using previous mask predictions
|
345 |
+
in_points = self.predictor._transforms.transform_coords(
|
346 |
+
data["points"], normalize=normalize, orig_hw=im_size
|
347 |
+
)
|
348 |
+
labels = torch.ones(
|
349 |
+
in_points.shape[0], dtype=torch.int, device=in_points.device
|
350 |
+
)
|
351 |
+
masks, ious = self.refine_with_m2m(
|
352 |
+
in_points, labels, data["low_res_masks"], self.points_per_batch
|
353 |
+
)
|
354 |
+
data["masks"] = masks.squeeze(1)
|
355 |
+
data["iou_preds"] = ious.squeeze(1)
|
356 |
+
|
357 |
+
if self.pred_iou_thresh > 0.0:
|
358 |
+
keep_mask = data["iou_preds"] > self.pred_iou_thresh
|
359 |
+
data.filter(keep_mask)
|
360 |
+
|
361 |
+
data["stability_score"] = calculate_stability_score(
|
362 |
+
data["masks"], self.mask_threshold, self.stability_score_offset
|
363 |
+
)
|
364 |
+
if self.stability_score_thresh > 0.0:
|
365 |
+
keep_mask = data["stability_score"] >= self.stability_score_thresh
|
366 |
+
data.filter(keep_mask)
|
367 |
+
|
368 |
+
# Threshold masks and calculate boxes
|
369 |
+
data["masks"] = data["masks"] > self.mask_threshold
|
370 |
+
data["boxes"] = batched_mask_to_box(data["masks"])
|
371 |
+
|
372 |
+
# Filter boxes that touch crop boundaries
|
373 |
+
keep_mask = ~is_box_near_crop_edge(
|
374 |
+
data["boxes"], crop_box, [0, 0, orig_w, orig_h]
|
375 |
+
)
|
376 |
+
if not torch.all(keep_mask):
|
377 |
+
data.filter(keep_mask)
|
378 |
+
|
379 |
+
# Compress to RLE
|
380 |
+
data["masks"] = uncrop_masks(data["masks"], crop_box, orig_h, orig_w)
|
381 |
+
data["rles"] = mask_to_rle_pytorch(data["masks"])
|
382 |
+
del data["masks"]
|
383 |
+
|
384 |
+
return data
|
385 |
+
|
386 |
+
@staticmethod
|
387 |
+
def postprocess_small_regions(
|
388 |
+
mask_data: MaskData, min_area: int, nms_thresh: float
|
389 |
+
) -> MaskData:
|
390 |
+
"""
|
391 |
+
Removes small disconnected regions and holes in masks, then reruns
|
392 |
+
box NMS to remove any new duplicates.
|
393 |
+
|
394 |
+
Edits mask_data in place.
|
395 |
+
|
396 |
+
Requires open-cv as a dependency.
|
397 |
+
"""
|
398 |
+
if len(mask_data["rles"]) == 0:
|
399 |
+
return mask_data
|
400 |
+
|
401 |
+
# Filter small disconnected regions and holes
|
402 |
+
new_masks = []
|
403 |
+
scores = []
|
404 |
+
for rle in mask_data["rles"]:
|
405 |
+
mask = rle_to_mask(rle)
|
406 |
+
|
407 |
+
mask, changed = remove_small_regions(mask, min_area, mode="holes")
|
408 |
+
unchanged = not changed
|
409 |
+
mask, changed = remove_small_regions(mask, min_area, mode="islands")
|
410 |
+
unchanged = unchanged and not changed
|
411 |
+
|
412 |
+
new_masks.append(torch.as_tensor(mask).unsqueeze(0))
|
413 |
+
# Give score=0 to changed masks and score=1 to unchanged masks
|
414 |
+
# so NMS will prefer ones that didn't need postprocessing
|
415 |
+
scores.append(float(unchanged))
|
416 |
+
|
417 |
+
# Recalculate boxes and remove any new duplicates
|
418 |
+
masks = torch.cat(new_masks, dim=0)
|
419 |
+
boxes = batched_mask_to_box(masks)
|
420 |
+
keep_by_nms = batched_nms(
|
421 |
+
boxes.float(),
|
422 |
+
torch.as_tensor(scores),
|
423 |
+
torch.zeros_like(boxes[:, 0]), # categories
|
424 |
+
iou_threshold=nms_thresh,
|
425 |
+
)
|
426 |
+
|
427 |
+
# Only recalculate RLEs for masks that have changed
|
428 |
+
for i_mask in keep_by_nms:
|
429 |
+
if scores[i_mask] == 0.0:
|
430 |
+
mask_torch = masks[i_mask].unsqueeze(0)
|
431 |
+
mask_data["rles"][i_mask] = mask_to_rle_pytorch(mask_torch)[0]
|
432 |
+
mask_data["boxes"][i_mask] = boxes[i_mask] # update res directly
|
433 |
+
mask_data.filter(keep_by_nms)
|
434 |
+
|
435 |
+
return mask_data
|
436 |
+
|
437 |
+
def refine_with_m2m(self, points, point_labels, low_res_masks, points_per_batch):
|
438 |
+
new_masks = []
|
439 |
+
new_iou_preds = []
|
440 |
+
|
441 |
+
for cur_points, cur_point_labels, low_res_mask in batch_iterator(
|
442 |
+
points_per_batch, points, point_labels, low_res_masks
|
443 |
+
):
|
444 |
+
best_masks, best_iou_preds, _ = self.predictor._predict(
|
445 |
+
cur_points[:, None, :],
|
446 |
+
cur_point_labels[:, None],
|
447 |
+
mask_input=low_res_mask[:, None, :],
|
448 |
+
multimask_output=False,
|
449 |
+
return_logits=True,
|
450 |
+
)
|
451 |
+
new_masks.append(best_masks)
|
452 |
+
new_iou_preds.append(best_iou_preds)
|
453 |
+
masks = torch.cat(new_masks, dim=0)
|
454 |
+
return masks, torch.cat(new_iou_preds, dim=0)
|
eval/grounded_sam/sam2/build_sam.py
ADDED
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
import logging
|
8 |
+
import os
|
9 |
+
import sys
|
10 |
+
import torch
|
11 |
+
from hydra import compose
|
12 |
+
from hydra.utils import instantiate
|
13 |
+
from omegaconf import OmegaConf
|
14 |
+
|
15 |
+
from pathlib import Path
|
16 |
+
current_dir = str(Path(os.path.abspath('')))
|
17 |
+
sam_dir = os.path.join(current_dir, "eval/grounded_sam")
|
18 |
+
sys.path.append(sam_dir)
|
19 |
+
|
20 |
+
import sam2
|
21 |
+
|
22 |
+
# # Check if the user is running Python from the parent directory of the sam2 repo
|
23 |
+
# # (i.e. the directory where this repo is cloned into) -- this is not supported since
|
24 |
+
# # it could shadow the sam2 package and cause issues.
|
25 |
+
# if os.path.isdir(os.path.join(sam2.__path__[0], "sam2")):
|
26 |
+
# # If the user has "sam2/sam2" in their path, they are likey importing the repo itself
|
27 |
+
# # as "sam2" rather than importing the "sam2" python package (i.e. "sam2/sam2" directory).
|
28 |
+
# # This typically happens because the user is running Python from the parent directory
|
29 |
+
# # that contains the sam2 repo they cloned.
|
30 |
+
# raise RuntimeError(
|
31 |
+
# "You're likely running Python from the parent directory of the sam2 repository "
|
32 |
+
# "(i.e. the directory where https://github.com/facebookresearch/sam2 is cloned into). "
|
33 |
+
# "This is not supported since the `sam2` Python package could be shadowed by the "
|
34 |
+
# "repository name (the repository is also named `sam2` and contains the Python package "
|
35 |
+
# "in `sam2/sam2`). Please run Python from another directory (e.g. from the repo dir "
|
36 |
+
# "rather than its parent dir, or from your home directory) after installing SAM 2."
|
37 |
+
# )
|
38 |
+
|
39 |
+
|
40 |
+
HF_MODEL_ID_TO_FILENAMES = {
|
41 |
+
"facebook/sam2-hiera-tiny": (
|
42 |
+
"configs/sam2/sam2_hiera_t.yaml",
|
43 |
+
"sam2_hiera_tiny.pt",
|
44 |
+
),
|
45 |
+
"facebook/sam2-hiera-small": (
|
46 |
+
"configs/sam2/sam2_hiera_s.yaml",
|
47 |
+
"sam2_hiera_small.pt",
|
48 |
+
),
|
49 |
+
"facebook/sam2-hiera-base-plus": (
|
50 |
+
"configs/sam2/sam2_hiera_b+.yaml",
|
51 |
+
"sam2_hiera_base_plus.pt",
|
52 |
+
),
|
53 |
+
"facebook/sam2-hiera-large": (
|
54 |
+
"configs/sam2/sam2_hiera_l.yaml",
|
55 |
+
"sam2_hiera_large.pt",
|
56 |
+
),
|
57 |
+
"facebook/sam2.1-hiera-tiny": (
|
58 |
+
"configs/sam2.1/sam2.1_hiera_t.yaml",
|
59 |
+
"sam2.1_hiera_tiny.pt",
|
60 |
+
),
|
61 |
+
"facebook/sam2.1-hiera-small": (
|
62 |
+
"configs/sam2.1/sam2.1_hiera_s.yaml",
|
63 |
+
"sam2.1_hiera_small.pt",
|
64 |
+
),
|
65 |
+
"facebook/sam2.1-hiera-base-plus": (
|
66 |
+
"configs/sam2.1/sam2.1_hiera_b+.yaml",
|
67 |
+
"sam2.1_hiera_base_plus.pt",
|
68 |
+
),
|
69 |
+
"facebook/sam2.1-hiera-large": (
|
70 |
+
"configs/sam2.1/sam2.1_hiera_l.yaml",
|
71 |
+
"sam2.1_hiera_large.pt",
|
72 |
+
),
|
73 |
+
}
|
74 |
+
|
75 |
+
|
76 |
+
def build_sam2(
|
77 |
+
config_file,
|
78 |
+
ckpt_path=None,
|
79 |
+
device="cuda",
|
80 |
+
mode="eval",
|
81 |
+
hydra_overrides_extra=[],
|
82 |
+
apply_postprocessing=True,
|
83 |
+
**kwargs,
|
84 |
+
):
|
85 |
+
|
86 |
+
if apply_postprocessing:
|
87 |
+
hydra_overrides_extra = hydra_overrides_extra.copy()
|
88 |
+
hydra_overrides_extra += [
|
89 |
+
# dynamically fall back to multi-mask if the single mask is not stable
|
90 |
+
"++model.sam_mask_decoder_extra_args.dynamic_multimask_via_stability=true",
|
91 |
+
"++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_delta=0.05",
|
92 |
+
"++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_thresh=0.98",
|
93 |
+
]
|
94 |
+
# Read config and init model
|
95 |
+
cfg = compose(config_name=config_file, overrides=hydra_overrides_extra)
|
96 |
+
OmegaConf.resolve(cfg)
|
97 |
+
model = instantiate(cfg.model, _recursive_=True)
|
98 |
+
_load_checkpoint(model, ckpt_path)
|
99 |
+
model = model.to(device)
|
100 |
+
if mode == "eval":
|
101 |
+
model.eval()
|
102 |
+
return model
|
103 |
+
|
104 |
+
|
105 |
+
def build_sam2_video_predictor(
|
106 |
+
config_file,
|
107 |
+
ckpt_path=None,
|
108 |
+
device="cuda",
|
109 |
+
mode="eval",
|
110 |
+
hydra_overrides_extra=[],
|
111 |
+
apply_postprocessing=True,
|
112 |
+
**kwargs,
|
113 |
+
):
|
114 |
+
hydra_overrides = [
|
115 |
+
"++model._target_=sam2.sam2_video_predictor.SAM2VideoPredictor",
|
116 |
+
]
|
117 |
+
if apply_postprocessing:
|
118 |
+
hydra_overrides_extra = hydra_overrides_extra.copy()
|
119 |
+
hydra_overrides_extra += [
|
120 |
+
# dynamically fall back to multi-mask if the single mask is not stable
|
121 |
+
"++model.sam_mask_decoder_extra_args.dynamic_multimask_via_stability=true",
|
122 |
+
"++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_delta=0.05",
|
123 |
+
"++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_thresh=0.98",
|
124 |
+
# the sigmoid mask logits on interacted frames with clicks in the memory encoder so that the encoded masks are exactly as what users see from clicking
|
125 |
+
"++model.binarize_mask_from_pts_for_mem_enc=true",
|
126 |
+
# fill small holes in the low-res masks up to `fill_hole_area` (before resizing them to the original video resolution)
|
127 |
+
"++model.fill_hole_area=8",
|
128 |
+
]
|
129 |
+
hydra_overrides.extend(hydra_overrides_extra)
|
130 |
+
|
131 |
+
# Read config and init model
|
132 |
+
cfg = compose(config_name=config_file, overrides=hydra_overrides)
|
133 |
+
OmegaConf.resolve(cfg)
|
134 |
+
model = instantiate(cfg.model, _recursive_=True)
|
135 |
+
_load_checkpoint(model, ckpt_path)
|
136 |
+
model = model.to(device)
|
137 |
+
if mode == "eval":
|
138 |
+
model.eval()
|
139 |
+
return model
|
140 |
+
|
141 |
+
|
142 |
+
def _hf_download(model_id):
|
143 |
+
from huggingface_hub import hf_hub_download
|
144 |
+
|
145 |
+
config_name, checkpoint_name = HF_MODEL_ID_TO_FILENAMES[model_id]
|
146 |
+
ckpt_path = hf_hub_download(repo_id=model_id, filename=checkpoint_name)
|
147 |
+
return config_name, ckpt_path
|
148 |
+
|
149 |
+
|
150 |
+
def build_sam2_hf(model_id, **kwargs):
|
151 |
+
config_name, ckpt_path = _hf_download(model_id)
|
152 |
+
return build_sam2(config_file=config_name, ckpt_path=ckpt_path, **kwargs)
|
153 |
+
|
154 |
+
|
155 |
+
def build_sam2_video_predictor_hf(model_id, **kwargs):
|
156 |
+
config_name, ckpt_path = _hf_download(model_id)
|
157 |
+
return build_sam2_video_predictor(
|
158 |
+
config_file=config_name, ckpt_path=ckpt_path, **kwargs
|
159 |
+
)
|
160 |
+
|
161 |
+
|
162 |
+
def _load_checkpoint(model, ckpt_path):
|
163 |
+
if ckpt_path is not None:
|
164 |
+
sd = torch.load(ckpt_path, map_location="cpu", weights_only=True)["model"]
|
165 |
+
missing_keys, unexpected_keys = model.load_state_dict(sd)
|
166 |
+
if missing_keys:
|
167 |
+
logging.error(missing_keys)
|
168 |
+
raise RuntimeError()
|
169 |
+
if unexpected_keys:
|
170 |
+
logging.error(unexpected_keys)
|
171 |
+
raise RuntimeError()
|
172 |
+
logging.info("Loaded checkpoint sucessfully")
|
eval/grounded_sam/sam2/configs/sam2.1/sam2.1_hiera_b+.yaml
ADDED
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# @package _global_
|
2 |
+
|
3 |
+
# Model
|
4 |
+
model:
|
5 |
+
_target_: sam2.modeling.sam2_base.SAM2Base
|
6 |
+
image_encoder:
|
7 |
+
_target_: sam2.modeling.backbones.image_encoder.ImageEncoder
|
8 |
+
scalp: 1
|
9 |
+
trunk:
|
10 |
+
_target_: sam2.modeling.backbones.hieradet.Hiera
|
11 |
+
embed_dim: 112
|
12 |
+
num_heads: 2
|
13 |
+
neck:
|
14 |
+
_target_: sam2.modeling.backbones.image_encoder.FpnNeck
|
15 |
+
position_encoding:
|
16 |
+
_target_: sam2.modeling.position_encoding.PositionEmbeddingSine
|
17 |
+
num_pos_feats: 256
|
18 |
+
normalize: true
|
19 |
+
scale: null
|
20 |
+
temperature: 10000
|
21 |
+
d_model: 256
|
22 |
+
backbone_channel_list: [896, 448, 224, 112]
|
23 |
+
fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
|
24 |
+
fpn_interp_model: nearest
|
25 |
+
|
26 |
+
memory_attention:
|
27 |
+
_target_: sam2.modeling.memory_attention.MemoryAttention
|
28 |
+
d_model: 256
|
29 |
+
pos_enc_at_input: true
|
30 |
+
layer:
|
31 |
+
_target_: sam2.modeling.memory_attention.MemoryAttentionLayer
|
32 |
+
activation: relu
|
33 |
+
dim_feedforward: 2048
|
34 |
+
dropout: 0.1
|
35 |
+
pos_enc_at_attn: false
|
36 |
+
self_attention:
|
37 |
+
_target_: sam2.modeling.sam.transformer.RoPEAttention
|
38 |
+
rope_theta: 10000.0
|
39 |
+
feat_sizes: [32, 32]
|
40 |
+
embedding_dim: 256
|
41 |
+
num_heads: 1
|
42 |
+
downsample_rate: 1
|
43 |
+
dropout: 0.1
|
44 |
+
d_model: 256
|
45 |
+
pos_enc_at_cross_attn_keys: true
|
46 |
+
pos_enc_at_cross_attn_queries: false
|
47 |
+
cross_attention:
|
48 |
+
_target_: sam2.modeling.sam.transformer.RoPEAttention
|
49 |
+
rope_theta: 10000.0
|
50 |
+
feat_sizes: [32, 32]
|
51 |
+
rope_k_repeat: True
|
52 |
+
embedding_dim: 256
|
53 |
+
num_heads: 1
|
54 |
+
downsample_rate: 1
|
55 |
+
dropout: 0.1
|
56 |
+
kv_in_dim: 64
|
57 |
+
num_layers: 4
|
58 |
+
|
59 |
+
memory_encoder:
|
60 |
+
_target_: sam2.modeling.memory_encoder.MemoryEncoder
|
61 |
+
out_dim: 64
|
62 |
+
position_encoding:
|
63 |
+
_target_: sam2.modeling.position_encoding.PositionEmbeddingSine
|
64 |
+
num_pos_feats: 64
|
65 |
+
normalize: true
|
66 |
+
scale: null
|
67 |
+
temperature: 10000
|
68 |
+
mask_downsampler:
|
69 |
+
_target_: sam2.modeling.memory_encoder.MaskDownSampler
|
70 |
+
kernel_size: 3
|
71 |
+
stride: 2
|
72 |
+
padding: 1
|
73 |
+
fuser:
|
74 |
+
_target_: sam2.modeling.memory_encoder.Fuser
|
75 |
+
layer:
|
76 |
+
_target_: sam2.modeling.memory_encoder.CXBlock
|
77 |
+
dim: 256
|
78 |
+
kernel_size: 7
|
79 |
+
padding: 3
|
80 |
+
layer_scale_init_value: 1e-6
|
81 |
+
use_dwconv: True # depth-wise convs
|
82 |
+
num_layers: 2
|
83 |
+
|
84 |
+
num_maskmem: 7
|
85 |
+
image_size: 1024
|
86 |
+
# apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
|
87 |
+
sigmoid_scale_for_mem_enc: 20.0
|
88 |
+
sigmoid_bias_for_mem_enc: -10.0
|
89 |
+
use_mask_input_as_output_without_sam: true
|
90 |
+
# Memory
|
91 |
+
directly_add_no_mem_embed: true
|
92 |
+
no_obj_embed_spatial: true
|
93 |
+
# use high-resolution feature map in the SAM mask decoder
|
94 |
+
use_high_res_features_in_sam: true
|
95 |
+
# output 3 masks on the first click on initial conditioning frames
|
96 |
+
multimask_output_in_sam: true
|
97 |
+
# SAM heads
|
98 |
+
iou_prediction_use_sigmoid: True
|
99 |
+
# cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
|
100 |
+
use_obj_ptrs_in_encoder: true
|
101 |
+
add_tpos_enc_to_obj_ptrs: true
|
102 |
+
proj_tpos_enc_in_obj_ptrs: true
|
103 |
+
use_signed_tpos_enc_to_obj_ptrs: true
|
104 |
+
only_obj_ptrs_in_the_past_for_eval: true
|
105 |
+
# object occlusion prediction
|
106 |
+
pred_obj_scores: true
|
107 |
+
pred_obj_scores_mlp: true
|
108 |
+
fixed_no_obj_ptr: true
|
109 |
+
# multimask tracking settings
|
110 |
+
multimask_output_for_tracking: true
|
111 |
+
use_multimask_token_for_obj_ptr: true
|
112 |
+
multimask_min_pt_num: 0
|
113 |
+
multimask_max_pt_num: 1
|
114 |
+
use_mlp_for_obj_ptr_proj: true
|
115 |
+
# Compilation flag
|
116 |
+
compile_image_encoder: False
|
eval/grounded_sam/sam2/configs/sam2.1/sam2.1_hiera_l.yaml
ADDED
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# @package _global_
|
2 |
+
|
3 |
+
# Model
|
4 |
+
model:
|
5 |
+
_target_: sam2.modeling.sam2_base.SAM2Base
|
6 |
+
image_encoder:
|
7 |
+
_target_: sam2.modeling.backbones.image_encoder.ImageEncoder
|
8 |
+
scalp: 1
|
9 |
+
trunk:
|
10 |
+
_target_: sam2.modeling.backbones.hieradet.Hiera
|
11 |
+
embed_dim: 144
|
12 |
+
num_heads: 2
|
13 |
+
stages: [2, 6, 36, 4]
|
14 |
+
global_att_blocks: [23, 33, 43]
|
15 |
+
window_pos_embed_bkg_spatial_size: [7, 7]
|
16 |
+
window_spec: [8, 4, 16, 8]
|
17 |
+
neck:
|
18 |
+
_target_: sam2.modeling.backbones.image_encoder.FpnNeck
|
19 |
+
position_encoding:
|
20 |
+
_target_: sam2.modeling.position_encoding.PositionEmbeddingSine
|
21 |
+
num_pos_feats: 256
|
22 |
+
normalize: true
|
23 |
+
scale: null
|
24 |
+
temperature: 10000
|
25 |
+
d_model: 256
|
26 |
+
backbone_channel_list: [1152, 576, 288, 144]
|
27 |
+
fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
|
28 |
+
fpn_interp_model: nearest
|
29 |
+
|
30 |
+
memory_attention:
|
31 |
+
_target_: sam2.modeling.memory_attention.MemoryAttention
|
32 |
+
d_model: 256
|
33 |
+
pos_enc_at_input: true
|
34 |
+
layer:
|
35 |
+
_target_: sam2.modeling.memory_attention.MemoryAttentionLayer
|
36 |
+
activation: relu
|
37 |
+
dim_feedforward: 2048
|
38 |
+
dropout: 0.1
|
39 |
+
pos_enc_at_attn: false
|
40 |
+
self_attention:
|
41 |
+
_target_: sam2.modeling.sam.transformer.RoPEAttention
|
42 |
+
rope_theta: 10000.0
|
43 |
+
feat_sizes: [32, 32]
|
44 |
+
embedding_dim: 256
|
45 |
+
num_heads: 1
|
46 |
+
downsample_rate: 1
|
47 |
+
dropout: 0.1
|
48 |
+
d_model: 256
|
49 |
+
pos_enc_at_cross_attn_keys: true
|
50 |
+
pos_enc_at_cross_attn_queries: false
|
51 |
+
cross_attention:
|
52 |
+
_target_: sam2.modeling.sam.transformer.RoPEAttention
|
53 |
+
rope_theta: 10000.0
|
54 |
+
feat_sizes: [32, 32]
|
55 |
+
rope_k_repeat: True
|
56 |
+
embedding_dim: 256
|
57 |
+
num_heads: 1
|
58 |
+
downsample_rate: 1
|
59 |
+
dropout: 0.1
|
60 |
+
kv_in_dim: 64
|
61 |
+
num_layers: 4
|
62 |
+
|
63 |
+
memory_encoder:
|
64 |
+
_target_: sam2.modeling.memory_encoder.MemoryEncoder
|
65 |
+
out_dim: 64
|
66 |
+
position_encoding:
|
67 |
+
_target_: sam2.modeling.position_encoding.PositionEmbeddingSine
|
68 |
+
num_pos_feats: 64
|
69 |
+
normalize: true
|
70 |
+
scale: null
|
71 |
+
temperature: 10000
|
72 |
+
mask_downsampler:
|
73 |
+
_target_: sam2.modeling.memory_encoder.MaskDownSampler
|
74 |
+
kernel_size: 3
|
75 |
+
stride: 2
|
76 |
+
padding: 1
|
77 |
+
fuser:
|
78 |
+
_target_: sam2.modeling.memory_encoder.Fuser
|
79 |
+
layer:
|
80 |
+
_target_: sam2.modeling.memory_encoder.CXBlock
|
81 |
+
dim: 256
|
82 |
+
kernel_size: 7
|
83 |
+
padding: 3
|
84 |
+
layer_scale_init_value: 1e-6
|
85 |
+
use_dwconv: True # depth-wise convs
|
86 |
+
num_layers: 2
|
87 |
+
|
88 |
+
num_maskmem: 7
|
89 |
+
image_size: 1024
|
90 |
+
# apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
|
91 |
+
sigmoid_scale_for_mem_enc: 20.0
|
92 |
+
sigmoid_bias_for_mem_enc: -10.0
|
93 |
+
use_mask_input_as_output_without_sam: true
|
94 |
+
# Memory
|
95 |
+
directly_add_no_mem_embed: true
|
96 |
+
no_obj_embed_spatial: true
|
97 |
+
# use high-resolution feature map in the SAM mask decoder
|
98 |
+
use_high_res_features_in_sam: true
|
99 |
+
# output 3 masks on the first click on initial conditioning frames
|
100 |
+
multimask_output_in_sam: true
|
101 |
+
# SAM heads
|
102 |
+
iou_prediction_use_sigmoid: True
|
103 |
+
# cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
|
104 |
+
use_obj_ptrs_in_encoder: true
|
105 |
+
add_tpos_enc_to_obj_ptrs: true
|
106 |
+
proj_tpos_enc_in_obj_ptrs: true
|
107 |
+
use_signed_tpos_enc_to_obj_ptrs: true
|
108 |
+
only_obj_ptrs_in_the_past_for_eval: true
|
109 |
+
# object occlusion prediction
|
110 |
+
pred_obj_scores: true
|
111 |
+
pred_obj_scores_mlp: true
|
112 |
+
fixed_no_obj_ptr: true
|
113 |
+
# multimask tracking settings
|
114 |
+
multimask_output_for_tracking: true
|
115 |
+
use_multimask_token_for_obj_ptr: true
|
116 |
+
multimask_min_pt_num: 0
|
117 |
+
multimask_max_pt_num: 1
|
118 |
+
use_mlp_for_obj_ptr_proj: true
|
119 |
+
# Compilation flag
|
120 |
+
compile_image_encoder: False
|
eval/grounded_sam/sam2/configs/sam2.1/sam2.1_hiera_s.yaml
ADDED
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# @package _global_
|
2 |
+
|
3 |
+
# Model
|
4 |
+
model:
|
5 |
+
_target_: sam2.modeling.sam2_base.SAM2Base
|
6 |
+
image_encoder:
|
7 |
+
_target_: sam2.modeling.backbones.image_encoder.ImageEncoder
|
8 |
+
scalp: 1
|
9 |
+
trunk:
|
10 |
+
_target_: sam2.modeling.backbones.hieradet.Hiera
|
11 |
+
embed_dim: 96
|
12 |
+
num_heads: 1
|
13 |
+
stages: [1, 2, 11, 2]
|
14 |
+
global_att_blocks: [7, 10, 13]
|
15 |
+
window_pos_embed_bkg_spatial_size: [7, 7]
|
16 |
+
neck:
|
17 |
+
_target_: sam2.modeling.backbones.image_encoder.FpnNeck
|
18 |
+
position_encoding:
|
19 |
+
_target_: sam2.modeling.position_encoding.PositionEmbeddingSine
|
20 |
+
num_pos_feats: 256
|
21 |
+
normalize: true
|
22 |
+
scale: null
|
23 |
+
temperature: 10000
|
24 |
+
d_model: 256
|
25 |
+
backbone_channel_list: [768, 384, 192, 96]
|
26 |
+
fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
|
27 |
+
fpn_interp_model: nearest
|
28 |
+
|
29 |
+
memory_attention:
|
30 |
+
_target_: sam2.modeling.memory_attention.MemoryAttention
|
31 |
+
d_model: 256
|
32 |
+
pos_enc_at_input: true
|
33 |
+
layer:
|
34 |
+
_target_: sam2.modeling.memory_attention.MemoryAttentionLayer
|
35 |
+
activation: relu
|
36 |
+
dim_feedforward: 2048
|
37 |
+
dropout: 0.1
|
38 |
+
pos_enc_at_attn: false
|
39 |
+
self_attention:
|
40 |
+
_target_: sam2.modeling.sam.transformer.RoPEAttention
|
41 |
+
rope_theta: 10000.0
|
42 |
+
feat_sizes: [32, 32]
|
43 |
+
embedding_dim: 256
|
44 |
+
num_heads: 1
|
45 |
+
downsample_rate: 1
|
46 |
+
dropout: 0.1
|
47 |
+
d_model: 256
|
48 |
+
pos_enc_at_cross_attn_keys: true
|
49 |
+
pos_enc_at_cross_attn_queries: false
|
50 |
+
cross_attention:
|
51 |
+
_target_: sam2.modeling.sam.transformer.RoPEAttention
|
52 |
+
rope_theta: 10000.0
|
53 |
+
feat_sizes: [32, 32]
|
54 |
+
rope_k_repeat: True
|
55 |
+
embedding_dim: 256
|
56 |
+
num_heads: 1
|
57 |
+
downsample_rate: 1
|
58 |
+
dropout: 0.1
|
59 |
+
kv_in_dim: 64
|
60 |
+
num_layers: 4
|
61 |
+
|
62 |
+
memory_encoder:
|
63 |
+
_target_: sam2.modeling.memory_encoder.MemoryEncoder
|
64 |
+
out_dim: 64
|
65 |
+
position_encoding:
|
66 |
+
_target_: sam2.modeling.position_encoding.PositionEmbeddingSine
|
67 |
+
num_pos_feats: 64
|
68 |
+
normalize: true
|
69 |
+
scale: null
|
70 |
+
temperature: 10000
|
71 |
+
mask_downsampler:
|
72 |
+
_target_: sam2.modeling.memory_encoder.MaskDownSampler
|
73 |
+
kernel_size: 3
|
74 |
+
stride: 2
|
75 |
+
padding: 1
|
76 |
+
fuser:
|
77 |
+
_target_: sam2.modeling.memory_encoder.Fuser
|
78 |
+
layer:
|
79 |
+
_target_: sam2.modeling.memory_encoder.CXBlock
|
80 |
+
dim: 256
|
81 |
+
kernel_size: 7
|
82 |
+
padding: 3
|
83 |
+
layer_scale_init_value: 1e-6
|
84 |
+
use_dwconv: True # depth-wise convs
|
85 |
+
num_layers: 2
|
86 |
+
|
87 |
+
num_maskmem: 7
|
88 |
+
image_size: 1024
|
89 |
+
# apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
|
90 |
+
sigmoid_scale_for_mem_enc: 20.0
|
91 |
+
sigmoid_bias_for_mem_enc: -10.0
|
92 |
+
use_mask_input_as_output_without_sam: true
|
93 |
+
# Memory
|
94 |
+
directly_add_no_mem_embed: true
|
95 |
+
no_obj_embed_spatial: true
|
96 |
+
# use high-resolution feature map in the SAM mask decoder
|
97 |
+
use_high_res_features_in_sam: true
|
98 |
+
# output 3 masks on the first click on initial conditioning frames
|
99 |
+
multimask_output_in_sam: true
|
100 |
+
# SAM heads
|
101 |
+
iou_prediction_use_sigmoid: True
|
102 |
+
# cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
|
103 |
+
use_obj_ptrs_in_encoder: true
|
104 |
+
add_tpos_enc_to_obj_ptrs: true
|
105 |
+
proj_tpos_enc_in_obj_ptrs: true
|
106 |
+
use_signed_tpos_enc_to_obj_ptrs: true
|
107 |
+
only_obj_ptrs_in_the_past_for_eval: true
|
108 |
+
# object occlusion prediction
|
109 |
+
pred_obj_scores: true
|
110 |
+
pred_obj_scores_mlp: true
|
111 |
+
fixed_no_obj_ptr: true
|
112 |
+
# multimask tracking settings
|
113 |
+
multimask_output_for_tracking: true
|
114 |
+
use_multimask_token_for_obj_ptr: true
|
115 |
+
multimask_min_pt_num: 0
|
116 |
+
multimask_max_pt_num: 1
|
117 |
+
use_mlp_for_obj_ptr_proj: true
|
118 |
+
# Compilation flag
|
119 |
+
compile_image_encoder: False
|
eval/grounded_sam/sam2/configs/sam2.1/sam2.1_hiera_t.yaml
ADDED
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# @package _global_
|
2 |
+
|
3 |
+
# Model
|
4 |
+
model:
|
5 |
+
_target_: sam2.modeling.sam2_base.SAM2Base
|
6 |
+
image_encoder:
|
7 |
+
_target_: sam2.modeling.backbones.image_encoder.ImageEncoder
|
8 |
+
scalp: 1
|
9 |
+
trunk:
|
10 |
+
_target_: sam2.modeling.backbones.hieradet.Hiera
|
11 |
+
embed_dim: 96
|
12 |
+
num_heads: 1
|
13 |
+
stages: [1, 2, 7, 2]
|
14 |
+
global_att_blocks: [5, 7, 9]
|
15 |
+
window_pos_embed_bkg_spatial_size: [7, 7]
|
16 |
+
neck:
|
17 |
+
_target_: sam2.modeling.backbones.image_encoder.FpnNeck
|
18 |
+
position_encoding:
|
19 |
+
_target_: sam2.modeling.position_encoding.PositionEmbeddingSine
|
20 |
+
num_pos_feats: 256
|
21 |
+
normalize: true
|
22 |
+
scale: null
|
23 |
+
temperature: 10000
|
24 |
+
d_model: 256
|
25 |
+
backbone_channel_list: [768, 384, 192, 96]
|
26 |
+
fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
|
27 |
+
fpn_interp_model: nearest
|
28 |
+
|
29 |
+
memory_attention:
|
30 |
+
_target_: sam2.modeling.memory_attention.MemoryAttention
|
31 |
+
d_model: 256
|
32 |
+
pos_enc_at_input: true
|
33 |
+
layer:
|
34 |
+
_target_: sam2.modeling.memory_attention.MemoryAttentionLayer
|
35 |
+
activation: relu
|
36 |
+
dim_feedforward: 2048
|
37 |
+
dropout: 0.1
|
38 |
+
pos_enc_at_attn: false
|
39 |
+
self_attention:
|
40 |
+
_target_: sam2.modeling.sam.transformer.RoPEAttention
|
41 |
+
rope_theta: 10000.0
|
42 |
+
feat_sizes: [32, 32]
|
43 |
+
embedding_dim: 256
|
44 |
+
num_heads: 1
|
45 |
+
downsample_rate: 1
|
46 |
+
dropout: 0.1
|
47 |
+
d_model: 256
|
48 |
+
pos_enc_at_cross_attn_keys: true
|
49 |
+
pos_enc_at_cross_attn_queries: false
|
50 |
+
cross_attention:
|
51 |
+
_target_: sam2.modeling.sam.transformer.RoPEAttention
|
52 |
+
rope_theta: 10000.0
|
53 |
+
feat_sizes: [32, 32]
|
54 |
+
rope_k_repeat: True
|
55 |
+
embedding_dim: 256
|
56 |
+
num_heads: 1
|
57 |
+
downsample_rate: 1
|
58 |
+
dropout: 0.1
|
59 |
+
kv_in_dim: 64
|
60 |
+
num_layers: 4
|
61 |
+
|
62 |
+
memory_encoder:
|
63 |
+
_target_: sam2.modeling.memory_encoder.MemoryEncoder
|
64 |
+
out_dim: 64
|
65 |
+
position_encoding:
|
66 |
+
_target_: sam2.modeling.position_encoding.PositionEmbeddingSine
|
67 |
+
num_pos_feats: 64
|
68 |
+
normalize: true
|
69 |
+
scale: null
|
70 |
+
temperature: 10000
|
71 |
+
mask_downsampler:
|
72 |
+
_target_: sam2.modeling.memory_encoder.MaskDownSampler
|
73 |
+
kernel_size: 3
|
74 |
+
stride: 2
|
75 |
+
padding: 1
|
76 |
+
fuser:
|
77 |
+
_target_: sam2.modeling.memory_encoder.Fuser
|
78 |
+
layer:
|
79 |
+
_target_: sam2.modeling.memory_encoder.CXBlock
|
80 |
+
dim: 256
|
81 |
+
kernel_size: 7
|
82 |
+
padding: 3
|
83 |
+
layer_scale_init_value: 1e-6
|
84 |
+
use_dwconv: True # depth-wise convs
|
85 |
+
num_layers: 2
|
86 |
+
|
87 |
+
num_maskmem: 7
|
88 |
+
image_size: 1024
|
89 |
+
# apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
|
90 |
+
# SAM decoder
|
91 |
+
sigmoid_scale_for_mem_enc: 20.0
|
92 |
+
sigmoid_bias_for_mem_enc: -10.0
|
93 |
+
use_mask_input_as_output_without_sam: true
|
94 |
+
# Memory
|
95 |
+
directly_add_no_mem_embed: true
|
96 |
+
no_obj_embed_spatial: true
|
97 |
+
# use high-resolution feature map in the SAM mask decoder
|
98 |
+
use_high_res_features_in_sam: true
|
99 |
+
# output 3 masks on the first click on initial conditioning frames
|
100 |
+
multimask_output_in_sam: true
|
101 |
+
# SAM heads
|
102 |
+
iou_prediction_use_sigmoid: True
|
103 |
+
# cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
|
104 |
+
use_obj_ptrs_in_encoder: true
|
105 |
+
add_tpos_enc_to_obj_ptrs: true
|
106 |
+
proj_tpos_enc_in_obj_ptrs: true
|
107 |
+
use_signed_tpos_enc_to_obj_ptrs: true
|
108 |
+
only_obj_ptrs_in_the_past_for_eval: true
|
109 |
+
# object occlusion prediction
|
110 |
+
pred_obj_scores: true
|
111 |
+
pred_obj_scores_mlp: true
|
112 |
+
fixed_no_obj_ptr: true
|
113 |
+
# multimask tracking settings
|
114 |
+
multimask_output_for_tracking: true
|
115 |
+
use_multimask_token_for_obj_ptr: true
|
116 |
+
multimask_min_pt_num: 0
|
117 |
+
multimask_max_pt_num: 1
|
118 |
+
use_mlp_for_obj_ptr_proj: true
|
119 |
+
# Compilation flag
|
120 |
+
# HieraT does not currently support compilation, should always be set to False
|
121 |
+
compile_image_encoder: False
|
eval/grounded_sam/sam2/configs/sam2.1_training/sam2.1_hiera_b+_MOSE_finetune.yaml
ADDED
@@ -0,0 +1,339 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# @package _global_
|
2 |
+
|
3 |
+
scratch:
|
4 |
+
resolution: 1024
|
5 |
+
train_batch_size: 1
|
6 |
+
num_train_workers: 10
|
7 |
+
num_frames: 8
|
8 |
+
max_num_objects: 3
|
9 |
+
base_lr: 5.0e-6
|
10 |
+
vision_lr: 3.0e-06
|
11 |
+
phases_per_epoch: 1
|
12 |
+
num_epochs: 40
|
13 |
+
|
14 |
+
dataset:
|
15 |
+
# PATHS to Dataset
|
16 |
+
img_folder: null # PATH to MOSE JPEGImages folder
|
17 |
+
gt_folder: null # PATH to MOSE Annotations folder
|
18 |
+
file_list_txt: training/assets/MOSE_sample_train_list.txt # Optional PATH to filelist containing a subset of videos to be used for training
|
19 |
+
multiplier: 2
|
20 |
+
|
21 |
+
# Video transforms
|
22 |
+
vos:
|
23 |
+
train_transforms:
|
24 |
+
- _target_: training.dataset.transforms.ComposeAPI
|
25 |
+
transforms:
|
26 |
+
- _target_: training.dataset.transforms.RandomHorizontalFlip
|
27 |
+
consistent_transform: True
|
28 |
+
- _target_: training.dataset.transforms.RandomAffine
|
29 |
+
degrees: 25
|
30 |
+
shear: 20
|
31 |
+
image_interpolation: bilinear
|
32 |
+
consistent_transform: True
|
33 |
+
- _target_: training.dataset.transforms.RandomResizeAPI
|
34 |
+
sizes: ${scratch.resolution}
|
35 |
+
square: true
|
36 |
+
consistent_transform: True
|
37 |
+
- _target_: training.dataset.transforms.ColorJitter
|
38 |
+
consistent_transform: True
|
39 |
+
brightness: 0.1
|
40 |
+
contrast: 0.03
|
41 |
+
saturation: 0.03
|
42 |
+
hue: null
|
43 |
+
- _target_: training.dataset.transforms.RandomGrayscale
|
44 |
+
p: 0.05
|
45 |
+
consistent_transform: True
|
46 |
+
- _target_: training.dataset.transforms.ColorJitter
|
47 |
+
consistent_transform: False
|
48 |
+
brightness: 0.1
|
49 |
+
contrast: 0.05
|
50 |
+
saturation: 0.05
|
51 |
+
hue: null
|
52 |
+
- _target_: training.dataset.transforms.ToTensorAPI
|
53 |
+
- _target_: training.dataset.transforms.NormalizeAPI
|
54 |
+
mean: [0.485, 0.456, 0.406]
|
55 |
+
std: [0.229, 0.224, 0.225]
|
56 |
+
|
57 |
+
trainer:
|
58 |
+
_target_: training.trainer.Trainer
|
59 |
+
mode: train_only
|
60 |
+
max_epochs: ${times:${scratch.num_epochs},${scratch.phases_per_epoch}}
|
61 |
+
accelerator: cuda
|
62 |
+
seed_value: 123
|
63 |
+
|
64 |
+
model:
|
65 |
+
_target_: training.model.sam2.SAM2Train
|
66 |
+
image_encoder:
|
67 |
+
_target_: sam2.modeling.backbones.image_encoder.ImageEncoder
|
68 |
+
scalp: 1
|
69 |
+
trunk:
|
70 |
+
_target_: sam2.modeling.backbones.hieradet.Hiera
|
71 |
+
embed_dim: 112
|
72 |
+
num_heads: 2
|
73 |
+
drop_path_rate: 0.1
|
74 |
+
neck:
|
75 |
+
_target_: sam2.modeling.backbones.image_encoder.FpnNeck
|
76 |
+
position_encoding:
|
77 |
+
_target_: sam2.modeling.position_encoding.PositionEmbeddingSine
|
78 |
+
num_pos_feats: 256
|
79 |
+
normalize: true
|
80 |
+
scale: null
|
81 |
+
temperature: 10000
|
82 |
+
d_model: 256
|
83 |
+
backbone_channel_list: [896, 448, 224, 112]
|
84 |
+
fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
|
85 |
+
fpn_interp_model: nearest
|
86 |
+
|
87 |
+
memory_attention:
|
88 |
+
_target_: sam2.modeling.memory_attention.MemoryAttention
|
89 |
+
d_model: 256
|
90 |
+
pos_enc_at_input: true
|
91 |
+
layer:
|
92 |
+
_target_: sam2.modeling.memory_attention.MemoryAttentionLayer
|
93 |
+
activation: relu
|
94 |
+
dim_feedforward: 2048
|
95 |
+
dropout: 0.1
|
96 |
+
pos_enc_at_attn: false
|
97 |
+
self_attention:
|
98 |
+
_target_: sam2.modeling.sam.transformer.RoPEAttention
|
99 |
+
rope_theta: 10000.0
|
100 |
+
feat_sizes: [32, 32]
|
101 |
+
embedding_dim: 256
|
102 |
+
num_heads: 1
|
103 |
+
downsample_rate: 1
|
104 |
+
dropout: 0.1
|
105 |
+
d_model: 256
|
106 |
+
pos_enc_at_cross_attn_keys: true
|
107 |
+
pos_enc_at_cross_attn_queries: false
|
108 |
+
cross_attention:
|
109 |
+
_target_: sam2.modeling.sam.transformer.RoPEAttention
|
110 |
+
rope_theta: 10000.0
|
111 |
+
feat_sizes: [32, 32]
|
112 |
+
rope_k_repeat: True
|
113 |
+
embedding_dim: 256
|
114 |
+
num_heads: 1
|
115 |
+
downsample_rate: 1
|
116 |
+
dropout: 0.1
|
117 |
+
kv_in_dim: 64
|
118 |
+
num_layers: 4
|
119 |
+
|
120 |
+
memory_encoder:
|
121 |
+
_target_: sam2.modeling.memory_encoder.MemoryEncoder
|
122 |
+
out_dim: 64
|
123 |
+
position_encoding:
|
124 |
+
_target_: sam2.modeling.position_encoding.PositionEmbeddingSine
|
125 |
+
num_pos_feats: 64
|
126 |
+
normalize: true
|
127 |
+
scale: null
|
128 |
+
temperature: 10000
|
129 |
+
mask_downsampler:
|
130 |
+
_target_: sam2.modeling.memory_encoder.MaskDownSampler
|
131 |
+
kernel_size: 3
|
132 |
+
stride: 2
|
133 |
+
padding: 1
|
134 |
+
fuser:
|
135 |
+
_target_: sam2.modeling.memory_encoder.Fuser
|
136 |
+
layer:
|
137 |
+
_target_: sam2.modeling.memory_encoder.CXBlock
|
138 |
+
dim: 256
|
139 |
+
kernel_size: 7
|
140 |
+
padding: 3
|
141 |
+
layer_scale_init_value: 1e-6
|
142 |
+
use_dwconv: True # depth-wise convs
|
143 |
+
num_layers: 2
|
144 |
+
|
145 |
+
num_maskmem: 7
|
146 |
+
image_size: ${scratch.resolution}
|
147 |
+
# apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
|
148 |
+
sigmoid_scale_for_mem_enc: 20.0
|
149 |
+
sigmoid_bias_for_mem_enc: -10.0
|
150 |
+
use_mask_input_as_output_without_sam: true
|
151 |
+
# Memory
|
152 |
+
directly_add_no_mem_embed: true
|
153 |
+
no_obj_embed_spatial: true
|
154 |
+
# use high-resolution feature map in the SAM mask decoder
|
155 |
+
use_high_res_features_in_sam: true
|
156 |
+
# output 3 masks on the first click on initial conditioning frames
|
157 |
+
multimask_output_in_sam: true
|
158 |
+
# SAM heads
|
159 |
+
iou_prediction_use_sigmoid: True
|
160 |
+
# cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
|
161 |
+
use_obj_ptrs_in_encoder: true
|
162 |
+
add_tpos_enc_to_obj_ptrs: true
|
163 |
+
proj_tpos_enc_in_obj_ptrs: true
|
164 |
+
use_signed_tpos_enc_to_obj_ptrs: true
|
165 |
+
only_obj_ptrs_in_the_past_for_eval: true
|
166 |
+
# object occlusion prediction
|
167 |
+
pred_obj_scores: true
|
168 |
+
pred_obj_scores_mlp: true
|
169 |
+
fixed_no_obj_ptr: true
|
170 |
+
# multimask tracking settings
|
171 |
+
multimask_output_for_tracking: true
|
172 |
+
use_multimask_token_for_obj_ptr: true
|
173 |
+
multimask_min_pt_num: 0
|
174 |
+
multimask_max_pt_num: 1
|
175 |
+
use_mlp_for_obj_ptr_proj: true
|
176 |
+
# Compilation flag
|
177 |
+
# compile_image_encoder: False
|
178 |
+
|
179 |
+
####### Training specific params #######
|
180 |
+
# box/point input and corrections
|
181 |
+
prob_to_use_pt_input_for_train: 0.5
|
182 |
+
prob_to_use_pt_input_for_eval: 0.0
|
183 |
+
prob_to_use_box_input_for_train: 0.5 # 0.5*0.5 = 0.25 prob to use box instead of points
|
184 |
+
prob_to_use_box_input_for_eval: 0.0
|
185 |
+
prob_to_sample_from_gt_for_train: 0.1 # with a small prob, sampling correction points from GT mask instead of prediction errors
|
186 |
+
num_frames_to_correct_for_train: 2 # iteratively sample on random 1~2 frames (always include the first frame)
|
187 |
+
num_frames_to_correct_for_eval: 1 # only iteratively sample on first frame
|
188 |
+
rand_frames_to_correct_for_train: True # random #init-cond-frame ~ 2
|
189 |
+
add_all_frames_to_correct_as_cond: True # when a frame receives a correction click, it becomes a conditioning frame (even if it's not initially a conditioning frame)
|
190 |
+
# maximum 2 initial conditioning frames
|
191 |
+
num_init_cond_frames_for_train: 2
|
192 |
+
rand_init_cond_frames_for_train: True # random 1~2
|
193 |
+
num_correction_pt_per_frame: 7
|
194 |
+
use_act_ckpt_iterative_pt_sampling: false
|
195 |
+
|
196 |
+
|
197 |
+
|
198 |
+
num_init_cond_frames_for_eval: 1 # only mask on the first frame
|
199 |
+
forward_backbone_per_frame_for_eval: True
|
200 |
+
|
201 |
+
|
202 |
+
data:
|
203 |
+
train:
|
204 |
+
_target_: training.dataset.sam2_datasets.TorchTrainMixedDataset
|
205 |
+
phases_per_epoch: ${scratch.phases_per_epoch}
|
206 |
+
batch_sizes:
|
207 |
+
- ${scratch.train_batch_size}
|
208 |
+
|
209 |
+
datasets:
|
210 |
+
- _target_: training.dataset.utils.RepeatFactorWrapper
|
211 |
+
dataset:
|
212 |
+
_target_: training.dataset.utils.ConcatDataset
|
213 |
+
datasets:
|
214 |
+
- _target_: training.dataset.vos_dataset.VOSDataset
|
215 |
+
transforms: ${vos.train_transforms}
|
216 |
+
training: true
|
217 |
+
video_dataset:
|
218 |
+
_target_: training.dataset.vos_raw_dataset.PNGRawDataset
|
219 |
+
img_folder: ${dataset.img_folder}
|
220 |
+
gt_folder: ${dataset.gt_folder}
|
221 |
+
file_list_txt: ${dataset.file_list_txt}
|
222 |
+
sampler:
|
223 |
+
_target_: training.dataset.vos_sampler.RandomUniformSampler
|
224 |
+
num_frames: ${scratch.num_frames}
|
225 |
+
max_num_objects: ${scratch.max_num_objects}
|
226 |
+
multiplier: ${dataset.multiplier}
|
227 |
+
shuffle: True
|
228 |
+
num_workers: ${scratch.num_train_workers}
|
229 |
+
pin_memory: True
|
230 |
+
drop_last: True
|
231 |
+
collate_fn:
|
232 |
+
_target_: training.utils.data_utils.collate_fn
|
233 |
+
_partial_: true
|
234 |
+
dict_key: all
|
235 |
+
|
236 |
+
optim:
|
237 |
+
amp:
|
238 |
+
enabled: True
|
239 |
+
amp_dtype: bfloat16
|
240 |
+
|
241 |
+
optimizer:
|
242 |
+
_target_: torch.optim.AdamW
|
243 |
+
|
244 |
+
gradient_clip:
|
245 |
+
_target_: training.optimizer.GradientClipper
|
246 |
+
max_norm: 0.1
|
247 |
+
norm_type: 2
|
248 |
+
|
249 |
+
param_group_modifiers:
|
250 |
+
- _target_: training.optimizer.layer_decay_param_modifier
|
251 |
+
_partial_: True
|
252 |
+
layer_decay_value: 0.9
|
253 |
+
apply_to: 'image_encoder.trunk'
|
254 |
+
overrides:
|
255 |
+
- pattern: '*pos_embed*'
|
256 |
+
value: 1.0
|
257 |
+
|
258 |
+
options:
|
259 |
+
lr:
|
260 |
+
- scheduler:
|
261 |
+
_target_: fvcore.common.param_scheduler.CosineParamScheduler
|
262 |
+
start_value: ${scratch.base_lr}
|
263 |
+
end_value: ${divide:${scratch.base_lr},10}
|
264 |
+
- scheduler:
|
265 |
+
_target_: fvcore.common.param_scheduler.CosineParamScheduler
|
266 |
+
start_value: ${scratch.vision_lr}
|
267 |
+
end_value: ${divide:${scratch.vision_lr},10}
|
268 |
+
param_names:
|
269 |
+
- 'image_encoder.*'
|
270 |
+
weight_decay:
|
271 |
+
- scheduler:
|
272 |
+
_target_: fvcore.common.param_scheduler.ConstantParamScheduler
|
273 |
+
value: 0.1
|
274 |
+
- scheduler:
|
275 |
+
_target_: fvcore.common.param_scheduler.ConstantParamScheduler
|
276 |
+
value: 0.0
|
277 |
+
param_names:
|
278 |
+
- '*bias*'
|
279 |
+
module_cls_names: ['torch.nn.LayerNorm']
|
280 |
+
|
281 |
+
loss:
|
282 |
+
all:
|
283 |
+
_target_: training.loss_fns.MultiStepMultiMasksAndIous
|
284 |
+
weight_dict:
|
285 |
+
loss_mask: 20
|
286 |
+
loss_dice: 1
|
287 |
+
loss_iou: 1
|
288 |
+
loss_class: 1
|
289 |
+
supervise_all_iou: true
|
290 |
+
iou_use_l1_loss: true
|
291 |
+
pred_obj_scores: true
|
292 |
+
focal_gamma_obj_score: 0.0
|
293 |
+
focal_alpha_obj_score: -1.0
|
294 |
+
|
295 |
+
distributed:
|
296 |
+
backend: nccl
|
297 |
+
find_unused_parameters: True
|
298 |
+
|
299 |
+
logging:
|
300 |
+
tensorboard_writer:
|
301 |
+
_target_: training.utils.logger.make_tensorboard_logger
|
302 |
+
log_dir: ${launcher.experiment_log_dir}/tensorboard
|
303 |
+
flush_secs: 120
|
304 |
+
should_log: True
|
305 |
+
log_dir: ${launcher.experiment_log_dir}/logs
|
306 |
+
log_freq: 10
|
307 |
+
|
308 |
+
# initialize from a SAM 2 checkpoint
|
309 |
+
checkpoint:
|
310 |
+
save_dir: ${launcher.experiment_log_dir}/checkpoints
|
311 |
+
save_freq: 0 # 0 only last checkpoint is saved.
|
312 |
+
model_weight_initializer:
|
313 |
+
_partial_: True
|
314 |
+
_target_: training.utils.checkpoint_utils.load_state_dict_into_model
|
315 |
+
strict: True
|
316 |
+
ignore_unexpected_keys: null
|
317 |
+
ignore_missing_keys: null
|
318 |
+
|
319 |
+
state_dict:
|
320 |
+
_target_: training.utils.checkpoint_utils.load_checkpoint_and_apply_kernels
|
321 |
+
checkpoint_path: ./checkpoints/sam2.1_hiera_base_plus.pt # PATH to SAM 2.1 checkpoint
|
322 |
+
ckpt_state_dict_keys: ['model']
|
323 |
+
|
324 |
+
launcher:
|
325 |
+
num_nodes: 1
|
326 |
+
gpus_per_node: 8
|
327 |
+
experiment_log_dir: null # Path to log directory, defaults to ./sam2_logs/${config_name}
|
328 |
+
|
329 |
+
# SLURM args if running on a cluster
|
330 |
+
submitit:
|
331 |
+
partition: null
|
332 |
+
account: null
|
333 |
+
qos: null
|
334 |
+
cpus_per_task: 10
|
335 |
+
use_cluster: false
|
336 |
+
timeout_hour: 24
|
337 |
+
name: null
|
338 |
+
port_range: [10000, 65000]
|
339 |
+
|
eval/grounded_sam/sam2/configs/sam2/sam2_hiera_b+.yaml
ADDED
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# @package _global_
|
2 |
+
|
3 |
+
# Model
|
4 |
+
model:
|
5 |
+
_target_: sam2.modeling.sam2_base.SAM2Base
|
6 |
+
image_encoder:
|
7 |
+
_target_: sam2.modeling.backbones.image_encoder.ImageEncoder
|
8 |
+
scalp: 1
|
9 |
+
trunk:
|
10 |
+
_target_: sam2.modeling.backbones.hieradet.Hiera
|
11 |
+
embed_dim: 112
|
12 |
+
num_heads: 2
|
13 |
+
neck:
|
14 |
+
_target_: sam2.modeling.backbones.image_encoder.FpnNeck
|
15 |
+
position_encoding:
|
16 |
+
_target_: sam2.modeling.position_encoding.PositionEmbeddingSine
|
17 |
+
num_pos_feats: 256
|
18 |
+
normalize: true
|
19 |
+
scale: null
|
20 |
+
temperature: 10000
|
21 |
+
d_model: 256
|
22 |
+
backbone_channel_list: [896, 448, 224, 112]
|
23 |
+
fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
|
24 |
+
fpn_interp_model: nearest
|
25 |
+
|
26 |
+
memory_attention:
|
27 |
+
_target_: sam2.modeling.memory_attention.MemoryAttention
|
28 |
+
d_model: 256
|
29 |
+
pos_enc_at_input: true
|
30 |
+
layer:
|
31 |
+
_target_: sam2.modeling.memory_attention.MemoryAttentionLayer
|
32 |
+
activation: relu
|
33 |
+
dim_feedforward: 2048
|
34 |
+
dropout: 0.1
|
35 |
+
pos_enc_at_attn: false
|
36 |
+
self_attention:
|
37 |
+
_target_: sam2.modeling.sam.transformer.RoPEAttention
|
38 |
+
rope_theta: 10000.0
|
39 |
+
feat_sizes: [32, 32]
|
40 |
+
embedding_dim: 256
|
41 |
+
num_heads: 1
|
42 |
+
downsample_rate: 1
|
43 |
+
dropout: 0.1
|
44 |
+
d_model: 256
|
45 |
+
pos_enc_at_cross_attn_keys: true
|
46 |
+
pos_enc_at_cross_attn_queries: false
|
47 |
+
cross_attention:
|
48 |
+
_target_: sam2.modeling.sam.transformer.RoPEAttention
|
49 |
+
rope_theta: 10000.0
|
50 |
+
feat_sizes: [32, 32]
|
51 |
+
rope_k_repeat: True
|
52 |
+
embedding_dim: 256
|
53 |
+
num_heads: 1
|
54 |
+
downsample_rate: 1
|
55 |
+
dropout: 0.1
|
56 |
+
kv_in_dim: 64
|
57 |
+
num_layers: 4
|
58 |
+
|
59 |
+
memory_encoder:
|
60 |
+
_target_: sam2.modeling.memory_encoder.MemoryEncoder
|
61 |
+
out_dim: 64
|
62 |
+
position_encoding:
|
63 |
+
_target_: sam2.modeling.position_encoding.PositionEmbeddingSine
|
64 |
+
num_pos_feats: 64
|
65 |
+
normalize: true
|
66 |
+
scale: null
|
67 |
+
temperature: 10000
|
68 |
+
mask_downsampler:
|
69 |
+
_target_: sam2.modeling.memory_encoder.MaskDownSampler
|
70 |
+
kernel_size: 3
|
71 |
+
stride: 2
|
72 |
+
padding: 1
|
73 |
+
fuser:
|
74 |
+
_target_: sam2.modeling.memory_encoder.Fuser
|
75 |
+
layer:
|
76 |
+
_target_: sam2.modeling.memory_encoder.CXBlock
|
77 |
+
dim: 256
|
78 |
+
kernel_size: 7
|
79 |
+
padding: 3
|
80 |
+
layer_scale_init_value: 1e-6
|
81 |
+
use_dwconv: True # depth-wise convs
|
82 |
+
num_layers: 2
|
83 |
+
|
84 |
+
num_maskmem: 7
|
85 |
+
image_size: 1024
|
86 |
+
# apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
|
87 |
+
sigmoid_scale_for_mem_enc: 20.0
|
88 |
+
sigmoid_bias_for_mem_enc: -10.0
|
89 |
+
use_mask_input_as_output_without_sam: true
|
90 |
+
# Memory
|
91 |
+
directly_add_no_mem_embed: true
|
92 |
+
# use high-resolution feature map in the SAM mask decoder
|
93 |
+
use_high_res_features_in_sam: true
|
94 |
+
# output 3 masks on the first click on initial conditioning frames
|
95 |
+
multimask_output_in_sam: true
|
96 |
+
# SAM heads
|
97 |
+
iou_prediction_use_sigmoid: True
|
98 |
+
# cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
|
99 |
+
use_obj_ptrs_in_encoder: true
|
100 |
+
add_tpos_enc_to_obj_ptrs: false
|
101 |
+
only_obj_ptrs_in_the_past_for_eval: true
|
102 |
+
# object occlusion prediction
|
103 |
+
pred_obj_scores: true
|
104 |
+
pred_obj_scores_mlp: true
|
105 |
+
fixed_no_obj_ptr: true
|
106 |
+
# multimask tracking settings
|
107 |
+
multimask_output_for_tracking: true
|
108 |
+
use_multimask_token_for_obj_ptr: true
|
109 |
+
multimask_min_pt_num: 0
|
110 |
+
multimask_max_pt_num: 1
|
111 |
+
use_mlp_for_obj_ptr_proj: true
|
112 |
+
# Compilation flag
|
113 |
+
compile_image_encoder: False
|
eval/grounded_sam/sam2/configs/sam2/sam2_hiera_l.yaml
ADDED
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# @package _global_
|
2 |
+
|
3 |
+
# Model
|
4 |
+
model:
|
5 |
+
_target_: sam2.modeling.sam2_base.SAM2Base
|
6 |
+
image_encoder:
|
7 |
+
_target_: sam2.modeling.backbones.image_encoder.ImageEncoder
|
8 |
+
scalp: 1
|
9 |
+
trunk:
|
10 |
+
_target_: sam2.modeling.backbones.hieradet.Hiera
|
11 |
+
embed_dim: 144
|
12 |
+
num_heads: 2
|
13 |
+
stages: [2, 6, 36, 4]
|
14 |
+
global_att_blocks: [23, 33, 43]
|
15 |
+
window_pos_embed_bkg_spatial_size: [7, 7]
|
16 |
+
window_spec: [8, 4, 16, 8]
|
17 |
+
neck:
|
18 |
+
_target_: sam2.modeling.backbones.image_encoder.FpnNeck
|
19 |
+
position_encoding:
|
20 |
+
_target_: sam2.modeling.position_encoding.PositionEmbeddingSine
|
21 |
+
num_pos_feats: 256
|
22 |
+
normalize: true
|
23 |
+
scale: null
|
24 |
+
temperature: 10000
|
25 |
+
d_model: 256
|
26 |
+
backbone_channel_list: [1152, 576, 288, 144]
|
27 |
+
fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
|
28 |
+
fpn_interp_model: nearest
|
29 |
+
|
30 |
+
memory_attention:
|
31 |
+
_target_: sam2.modeling.memory_attention.MemoryAttention
|
32 |
+
d_model: 256
|
33 |
+
pos_enc_at_input: true
|
34 |
+
layer:
|
35 |
+
_target_: sam2.modeling.memory_attention.MemoryAttentionLayer
|
36 |
+
activation: relu
|
37 |
+
dim_feedforward: 2048
|
38 |
+
dropout: 0.1
|
39 |
+
pos_enc_at_attn: false
|
40 |
+
self_attention:
|
41 |
+
_target_: sam2.modeling.sam.transformer.RoPEAttention
|
42 |
+
rope_theta: 10000.0
|
43 |
+
feat_sizes: [32, 32]
|
44 |
+
embedding_dim: 256
|
45 |
+
num_heads: 1
|
46 |
+
downsample_rate: 1
|
47 |
+
dropout: 0.1
|
48 |
+
d_model: 256
|
49 |
+
pos_enc_at_cross_attn_keys: true
|
50 |
+
pos_enc_at_cross_attn_queries: false
|
51 |
+
cross_attention:
|
52 |
+
_target_: sam2.modeling.sam.transformer.RoPEAttention
|
53 |
+
rope_theta: 10000.0
|
54 |
+
feat_sizes: [32, 32]
|
55 |
+
rope_k_repeat: True
|
56 |
+
embedding_dim: 256
|
57 |
+
num_heads: 1
|
58 |
+
downsample_rate: 1
|
59 |
+
dropout: 0.1
|
60 |
+
kv_in_dim: 64
|
61 |
+
num_layers: 4
|
62 |
+
|
63 |
+
memory_encoder:
|
64 |
+
_target_: sam2.modeling.memory_encoder.MemoryEncoder
|
65 |
+
out_dim: 64
|
66 |
+
position_encoding:
|
67 |
+
_target_: sam2.modeling.position_encoding.PositionEmbeddingSine
|
68 |
+
num_pos_feats: 64
|
69 |
+
normalize: true
|
70 |
+
scale: null
|
71 |
+
temperature: 10000
|
72 |
+
mask_downsampler:
|
73 |
+
_target_: sam2.modeling.memory_encoder.MaskDownSampler
|
74 |
+
kernel_size: 3
|
75 |
+
stride: 2
|
76 |
+
padding: 1
|
77 |
+
fuser:
|
78 |
+
_target_: sam2.modeling.memory_encoder.Fuser
|
79 |
+
layer:
|
80 |
+
_target_: sam2.modeling.memory_encoder.CXBlock
|
81 |
+
dim: 256
|
82 |
+
kernel_size: 7
|
83 |
+
padding: 3
|
84 |
+
layer_scale_init_value: 1e-6
|
85 |
+
use_dwconv: True # depth-wise convs
|
86 |
+
num_layers: 2
|
87 |
+
|
88 |
+
num_maskmem: 7
|
89 |
+
image_size: 1024
|
90 |
+
# apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
|
91 |
+
sigmoid_scale_for_mem_enc: 20.0
|
92 |
+
sigmoid_bias_for_mem_enc: -10.0
|
93 |
+
use_mask_input_as_output_without_sam: true
|
94 |
+
# Memory
|
95 |
+
directly_add_no_mem_embed: true
|
96 |
+
# use high-resolution feature map in the SAM mask decoder
|
97 |
+
use_high_res_features_in_sam: true
|
98 |
+
# output 3 masks on the first click on initial conditioning frames
|
99 |
+
multimask_output_in_sam: true
|
100 |
+
# SAM heads
|
101 |
+
iou_prediction_use_sigmoid: True
|
102 |
+
# cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
|
103 |
+
use_obj_ptrs_in_encoder: true
|
104 |
+
add_tpos_enc_to_obj_ptrs: false
|
105 |
+
only_obj_ptrs_in_the_past_for_eval: true
|
106 |
+
# object occlusion prediction
|
107 |
+
pred_obj_scores: true
|
108 |
+
pred_obj_scores_mlp: true
|
109 |
+
fixed_no_obj_ptr: true
|
110 |
+
# multimask tracking settings
|
111 |
+
multimask_output_for_tracking: true
|
112 |
+
use_multimask_token_for_obj_ptr: true
|
113 |
+
multimask_min_pt_num: 0
|
114 |
+
multimask_max_pt_num: 1
|
115 |
+
use_mlp_for_obj_ptr_proj: true
|
116 |
+
# Compilation flag
|
117 |
+
compile_image_encoder: False
|
eval/grounded_sam/sam2/configs/sam2/sam2_hiera_s.yaml
ADDED
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# @package _global_
|
2 |
+
|
3 |
+
# Model
|
4 |
+
model:
|
5 |
+
_target_: sam2.modeling.sam2_base.SAM2Base
|
6 |
+
image_encoder:
|
7 |
+
_target_: sam2.modeling.backbones.image_encoder.ImageEncoder
|
8 |
+
scalp: 1
|
9 |
+
trunk:
|
10 |
+
_target_: sam2.modeling.backbones.hieradet.Hiera
|
11 |
+
embed_dim: 96
|
12 |
+
num_heads: 1
|
13 |
+
stages: [1, 2, 11, 2]
|
14 |
+
global_att_blocks: [7, 10, 13]
|
15 |
+
window_pos_embed_bkg_spatial_size: [7, 7]
|
16 |
+
neck:
|
17 |
+
_target_: sam2.modeling.backbones.image_encoder.FpnNeck
|
18 |
+
position_encoding:
|
19 |
+
_target_: sam2.modeling.position_encoding.PositionEmbeddingSine
|
20 |
+
num_pos_feats: 256
|
21 |
+
normalize: true
|
22 |
+
scale: null
|
23 |
+
temperature: 10000
|
24 |
+
d_model: 256
|
25 |
+
backbone_channel_list: [768, 384, 192, 96]
|
26 |
+
fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
|
27 |
+
fpn_interp_model: nearest
|
28 |
+
|
29 |
+
memory_attention:
|
30 |
+
_target_: sam2.modeling.memory_attention.MemoryAttention
|
31 |
+
d_model: 256
|
32 |
+
pos_enc_at_input: true
|
33 |
+
layer:
|
34 |
+
_target_: sam2.modeling.memory_attention.MemoryAttentionLayer
|
35 |
+
activation: relu
|
36 |
+
dim_feedforward: 2048
|
37 |
+
dropout: 0.1
|
38 |
+
pos_enc_at_attn: false
|
39 |
+
self_attention:
|
40 |
+
_target_: sam2.modeling.sam.transformer.RoPEAttention
|
41 |
+
rope_theta: 10000.0
|
42 |
+
feat_sizes: [32, 32]
|
43 |
+
embedding_dim: 256
|
44 |
+
num_heads: 1
|
45 |
+
downsample_rate: 1
|
46 |
+
dropout: 0.1
|
47 |
+
d_model: 256
|
48 |
+
pos_enc_at_cross_attn_keys: true
|
49 |
+
pos_enc_at_cross_attn_queries: false
|
50 |
+
cross_attention:
|
51 |
+
_target_: sam2.modeling.sam.transformer.RoPEAttention
|
52 |
+
rope_theta: 10000.0
|
53 |
+
feat_sizes: [32, 32]
|
54 |
+
rope_k_repeat: True
|
55 |
+
embedding_dim: 256
|
56 |
+
num_heads: 1
|
57 |
+
downsample_rate: 1
|
58 |
+
dropout: 0.1
|
59 |
+
kv_in_dim: 64
|
60 |
+
num_layers: 4
|
61 |
+
|
62 |
+
memory_encoder:
|
63 |
+
_target_: sam2.modeling.memory_encoder.MemoryEncoder
|
64 |
+
out_dim: 64
|
65 |
+
position_encoding:
|
66 |
+
_target_: sam2.modeling.position_encoding.PositionEmbeddingSine
|
67 |
+
num_pos_feats: 64
|
68 |
+
normalize: true
|
69 |
+
scale: null
|
70 |
+
temperature: 10000
|
71 |
+
mask_downsampler:
|
72 |
+
_target_: sam2.modeling.memory_encoder.MaskDownSampler
|
73 |
+
kernel_size: 3
|
74 |
+
stride: 2
|
75 |
+
padding: 1
|
76 |
+
fuser:
|
77 |
+
_target_: sam2.modeling.memory_encoder.Fuser
|
78 |
+
layer:
|
79 |
+
_target_: sam2.modeling.memory_encoder.CXBlock
|
80 |
+
dim: 256
|
81 |
+
kernel_size: 7
|
82 |
+
padding: 3
|
83 |
+
layer_scale_init_value: 1e-6
|
84 |
+
use_dwconv: True # depth-wise convs
|
85 |
+
num_layers: 2
|
86 |
+
|
87 |
+
num_maskmem: 7
|
88 |
+
image_size: 1024
|
89 |
+
# apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
|
90 |
+
sigmoid_scale_for_mem_enc: 20.0
|
91 |
+
sigmoid_bias_for_mem_enc: -10.0
|
92 |
+
use_mask_input_as_output_without_sam: true
|
93 |
+
# Memory
|
94 |
+
directly_add_no_mem_embed: true
|
95 |
+
# use high-resolution feature map in the SAM mask decoder
|
96 |
+
use_high_res_features_in_sam: true
|
97 |
+
# output 3 masks on the first click on initial conditioning frames
|
98 |
+
multimask_output_in_sam: true
|
99 |
+
# SAM heads
|
100 |
+
iou_prediction_use_sigmoid: True
|
101 |
+
# cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
|
102 |
+
use_obj_ptrs_in_encoder: true
|
103 |
+
add_tpos_enc_to_obj_ptrs: false
|
104 |
+
only_obj_ptrs_in_the_past_for_eval: true
|
105 |
+
# object occlusion prediction
|
106 |
+
pred_obj_scores: true
|
107 |
+
pred_obj_scores_mlp: true
|
108 |
+
fixed_no_obj_ptr: true
|
109 |
+
# multimask tracking settings
|
110 |
+
multimask_output_for_tracking: true
|
111 |
+
use_multimask_token_for_obj_ptr: true
|
112 |
+
multimask_min_pt_num: 0
|
113 |
+
multimask_max_pt_num: 1
|
114 |
+
use_mlp_for_obj_ptr_proj: true
|
115 |
+
# Compilation flag
|
116 |
+
compile_image_encoder: False
|
eval/grounded_sam/sam2/configs/sam2/sam2_hiera_t.yaml
ADDED
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# @package _global_
|
2 |
+
|
3 |
+
# Model
|
4 |
+
model:
|
5 |
+
_target_: sam2.modeling.sam2_base.SAM2Base
|
6 |
+
image_encoder:
|
7 |
+
_target_: sam2.modeling.backbones.image_encoder.ImageEncoder
|
8 |
+
scalp: 1
|
9 |
+
trunk:
|
10 |
+
_target_: sam2.modeling.backbones.hieradet.Hiera
|
11 |
+
embed_dim: 96
|
12 |
+
num_heads: 1
|
13 |
+
stages: [1, 2, 7, 2]
|
14 |
+
global_att_blocks: [5, 7, 9]
|
15 |
+
window_pos_embed_bkg_spatial_size: [7, 7]
|
16 |
+
neck:
|
17 |
+
_target_: sam2.modeling.backbones.image_encoder.FpnNeck
|
18 |
+
position_encoding:
|
19 |
+
_target_: sam2.modeling.position_encoding.PositionEmbeddingSine
|
20 |
+
num_pos_feats: 256
|
21 |
+
normalize: true
|
22 |
+
scale: null
|
23 |
+
temperature: 10000
|
24 |
+
d_model: 256
|
25 |
+
backbone_channel_list: [768, 384, 192, 96]
|
26 |
+
fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
|
27 |
+
fpn_interp_model: nearest
|
28 |
+
|
29 |
+
memory_attention:
|
30 |
+
_target_: sam2.modeling.memory_attention.MemoryAttention
|
31 |
+
d_model: 256
|
32 |
+
pos_enc_at_input: true
|
33 |
+
layer:
|
34 |
+
_target_: sam2.modeling.memory_attention.MemoryAttentionLayer
|
35 |
+
activation: relu
|
36 |
+
dim_feedforward: 2048
|
37 |
+
dropout: 0.1
|
38 |
+
pos_enc_at_attn: false
|
39 |
+
self_attention:
|
40 |
+
_target_: sam2.modeling.sam.transformer.RoPEAttention
|
41 |
+
rope_theta: 10000.0
|
42 |
+
feat_sizes: [32, 32]
|
43 |
+
embedding_dim: 256
|
44 |
+
num_heads: 1
|
45 |
+
downsample_rate: 1
|
46 |
+
dropout: 0.1
|
47 |
+
d_model: 256
|
48 |
+
pos_enc_at_cross_attn_keys: true
|
49 |
+
pos_enc_at_cross_attn_queries: false
|
50 |
+
cross_attention:
|
51 |
+
_target_: sam2.modeling.sam.transformer.RoPEAttention
|
52 |
+
rope_theta: 10000.0
|
53 |
+
feat_sizes: [32, 32]
|
54 |
+
rope_k_repeat: True
|
55 |
+
embedding_dim: 256
|
56 |
+
num_heads: 1
|
57 |
+
downsample_rate: 1
|
58 |
+
dropout: 0.1
|
59 |
+
kv_in_dim: 64
|
60 |
+
num_layers: 4
|
61 |
+
|
62 |
+
memory_encoder:
|
63 |
+
_target_: sam2.modeling.memory_encoder.MemoryEncoder
|
64 |
+
out_dim: 64
|
65 |
+
position_encoding:
|
66 |
+
_target_: sam2.modeling.position_encoding.PositionEmbeddingSine
|
67 |
+
num_pos_feats: 64
|
68 |
+
normalize: true
|
69 |
+
scale: null
|
70 |
+
temperature: 10000
|
71 |
+
mask_downsampler:
|
72 |
+
_target_: sam2.modeling.memory_encoder.MaskDownSampler
|
73 |
+
kernel_size: 3
|
74 |
+
stride: 2
|
75 |
+
padding: 1
|
76 |
+
fuser:
|
77 |
+
_target_: sam2.modeling.memory_encoder.Fuser
|
78 |
+
layer:
|
79 |
+
_target_: sam2.modeling.memory_encoder.CXBlock
|
80 |
+
dim: 256
|
81 |
+
kernel_size: 7
|
82 |
+
padding: 3
|
83 |
+
layer_scale_init_value: 1e-6
|
84 |
+
use_dwconv: True # depth-wise convs
|
85 |
+
num_layers: 2
|
86 |
+
|
87 |
+
num_maskmem: 7
|
88 |
+
image_size: 1024
|
89 |
+
# apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
|
90 |
+
# SAM decoder
|
91 |
+
sigmoid_scale_for_mem_enc: 20.0
|
92 |
+
sigmoid_bias_for_mem_enc: -10.0
|
93 |
+
use_mask_input_as_output_without_sam: true
|
94 |
+
# Memory
|
95 |
+
directly_add_no_mem_embed: true
|
96 |
+
# use high-resolution feature map in the SAM mask decoder
|
97 |
+
use_high_res_features_in_sam: true
|
98 |
+
# output 3 masks on the first click on initial conditioning frames
|
99 |
+
multimask_output_in_sam: true
|
100 |
+
# SAM heads
|
101 |
+
iou_prediction_use_sigmoid: True
|
102 |
+
# cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
|
103 |
+
use_obj_ptrs_in_encoder: true
|
104 |
+
add_tpos_enc_to_obj_ptrs: false
|
105 |
+
only_obj_ptrs_in_the_past_for_eval: true
|
106 |
+
# object occlusion prediction
|
107 |
+
pred_obj_scores: true
|
108 |
+
pred_obj_scores_mlp: true
|
109 |
+
fixed_no_obj_ptr: true
|
110 |
+
# multimask tracking settings
|
111 |
+
multimask_output_for_tracking: true
|
112 |
+
use_multimask_token_for_obj_ptr: true
|
113 |
+
multimask_min_pt_num: 0
|
114 |
+
multimask_max_pt_num: 1
|
115 |
+
use_mlp_for_obj_ptr_proj: true
|
116 |
+
# Compilation flag
|
117 |
+
# HieraT does not currently support compilation, should always be set to False
|
118 |
+
compile_image_encoder: False
|
eval/grounded_sam/sam2/csrc/connected_components.cu
ADDED
@@ -0,0 +1,289 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
// All rights reserved.
|
3 |
+
|
4 |
+
// This source code is licensed under the license found in the
|
5 |
+
// LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
// adapted from https://github.com/zsef123/Connected_components_PyTorch
|
8 |
+
// with license found in the LICENSE_cctorch file in the root directory.
|
9 |
+
#include <ATen/cuda/CUDAContext.h>
|
10 |
+
#include <cuda.h>
|
11 |
+
#include <cuda_runtime.h>
|
12 |
+
#include <torch/extension.h>
|
13 |
+
#include <torch/script.h>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
// 2d
|
17 |
+
#define BLOCK_ROWS 16
|
18 |
+
#define BLOCK_COLS 16
|
19 |
+
|
20 |
+
namespace cc2d {
|
21 |
+
|
22 |
+
template <typename T>
|
23 |
+
__device__ __forceinline__ unsigned char hasBit(T bitmap, unsigned char pos) {
|
24 |
+
return (bitmap >> pos) & 1;
|
25 |
+
}
|
26 |
+
|
27 |
+
__device__ int32_t find(const int32_t* s_buf, int32_t n) {
|
28 |
+
while (s_buf[n] != n)
|
29 |
+
n = s_buf[n];
|
30 |
+
return n;
|
31 |
+
}
|
32 |
+
|
33 |
+
__device__ int32_t find_n_compress(int32_t* s_buf, int32_t n) {
|
34 |
+
const int32_t id = n;
|
35 |
+
while (s_buf[n] != n) {
|
36 |
+
n = s_buf[n];
|
37 |
+
s_buf[id] = n;
|
38 |
+
}
|
39 |
+
return n;
|
40 |
+
}
|
41 |
+
|
42 |
+
__device__ void union_(int32_t* s_buf, int32_t a, int32_t b) {
|
43 |
+
bool done;
|
44 |
+
do {
|
45 |
+
a = find(s_buf, a);
|
46 |
+
b = find(s_buf, b);
|
47 |
+
|
48 |
+
if (a < b) {
|
49 |
+
int32_t old = atomicMin(s_buf + b, a);
|
50 |
+
done = (old == b);
|
51 |
+
b = old;
|
52 |
+
} else if (b < a) {
|
53 |
+
int32_t old = atomicMin(s_buf + a, b);
|
54 |
+
done = (old == a);
|
55 |
+
a = old;
|
56 |
+
} else
|
57 |
+
done = true;
|
58 |
+
|
59 |
+
} while (!done);
|
60 |
+
}
|
61 |
+
|
62 |
+
__global__ void
|
63 |
+
init_labeling(int32_t* label, const uint32_t W, const uint32_t H) {
|
64 |
+
const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
|
65 |
+
const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
|
66 |
+
const uint32_t idx = row * W + col;
|
67 |
+
|
68 |
+
if (row < H && col < W)
|
69 |
+
label[idx] = idx;
|
70 |
+
}
|
71 |
+
|
72 |
+
__global__ void
|
73 |
+
merge(uint8_t* img, int32_t* label, const uint32_t W, const uint32_t H) {
|
74 |
+
const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
|
75 |
+
const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
|
76 |
+
const uint32_t idx = row * W + col;
|
77 |
+
|
78 |
+
if (row >= H || col >= W)
|
79 |
+
return;
|
80 |
+
|
81 |
+
uint32_t P = 0;
|
82 |
+
|
83 |
+
if (img[idx])
|
84 |
+
P |= 0x777;
|
85 |
+
if (row + 1 < H && img[idx + W])
|
86 |
+
P |= 0x777 << 4;
|
87 |
+
if (col + 1 < W && img[idx + 1])
|
88 |
+
P |= 0x777 << 1;
|
89 |
+
|
90 |
+
if (col == 0)
|
91 |
+
P &= 0xEEEE;
|
92 |
+
if (col + 1 >= W)
|
93 |
+
P &= 0x3333;
|
94 |
+
else if (col + 2 >= W)
|
95 |
+
P &= 0x7777;
|
96 |
+
|
97 |
+
if (row == 0)
|
98 |
+
P &= 0xFFF0;
|
99 |
+
if (row + 1 >= H)
|
100 |
+
P &= 0xFF;
|
101 |
+
|
102 |
+
if (P > 0) {
|
103 |
+
// If need check about top-left pixel(if flag the first bit) and hit the
|
104 |
+
// top-left pixel
|
105 |
+
if (hasBit(P, 0) && img[idx - W - 1]) {
|
106 |
+
union_(label, idx, idx - 2 * W - 2); // top left block
|
107 |
+
}
|
108 |
+
|
109 |
+
if ((hasBit(P, 1) && img[idx - W]) || (hasBit(P, 2) && img[idx - W + 1]))
|
110 |
+
union_(label, idx, idx - 2 * W); // top bottom block
|
111 |
+
|
112 |
+
if (hasBit(P, 3) && img[idx + 2 - W])
|
113 |
+
union_(label, idx, idx - 2 * W + 2); // top right block
|
114 |
+
|
115 |
+
if ((hasBit(P, 4) && img[idx - 1]) || (hasBit(P, 8) && img[idx + W - 1]))
|
116 |
+
union_(label, idx, idx - 2); // just left block
|
117 |
+
}
|
118 |
+
}
|
119 |
+
|
120 |
+
__global__ void compression(int32_t* label, const int32_t W, const int32_t H) {
|
121 |
+
const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
|
122 |
+
const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
|
123 |
+
const uint32_t idx = row * W + col;
|
124 |
+
|
125 |
+
if (row < H && col < W)
|
126 |
+
find_n_compress(label, idx);
|
127 |
+
}
|
128 |
+
|
129 |
+
__global__ void final_labeling(
|
130 |
+
const uint8_t* img,
|
131 |
+
int32_t* label,
|
132 |
+
const int32_t W,
|
133 |
+
const int32_t H) {
|
134 |
+
const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
|
135 |
+
const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
|
136 |
+
const uint32_t idx = row * W + col;
|
137 |
+
|
138 |
+
if (row >= H || col >= W)
|
139 |
+
return;
|
140 |
+
|
141 |
+
int32_t y = label[idx] + 1;
|
142 |
+
|
143 |
+
if (img[idx])
|
144 |
+
label[idx] = y;
|
145 |
+
else
|
146 |
+
label[idx] = 0;
|
147 |
+
|
148 |
+
if (col + 1 < W) {
|
149 |
+
if (img[idx + 1])
|
150 |
+
label[idx + 1] = y;
|
151 |
+
else
|
152 |
+
label[idx + 1] = 0;
|
153 |
+
|
154 |
+
if (row + 1 < H) {
|
155 |
+
if (img[idx + W + 1])
|
156 |
+
label[idx + W + 1] = y;
|
157 |
+
else
|
158 |
+
label[idx + W + 1] = 0;
|
159 |
+
}
|
160 |
+
}
|
161 |
+
|
162 |
+
if (row + 1 < H) {
|
163 |
+
if (img[idx + W])
|
164 |
+
label[idx + W] = y;
|
165 |
+
else
|
166 |
+
label[idx + W] = 0;
|
167 |
+
}
|
168 |
+
}
|
169 |
+
|
170 |
+
__global__ void init_counting(
|
171 |
+
const int32_t* label,
|
172 |
+
int32_t* count_init,
|
173 |
+
const int32_t W,
|
174 |
+
const int32_t H) {
|
175 |
+
const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y);
|
176 |
+
const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x);
|
177 |
+
const uint32_t idx = row * W + col;
|
178 |
+
|
179 |
+
if (row >= H || col >= W)
|
180 |
+
return;
|
181 |
+
|
182 |
+
int32_t y = label[idx];
|
183 |
+
if (y > 0) {
|
184 |
+
int32_t count_idx = y - 1;
|
185 |
+
atomicAdd(count_init + count_idx, 1);
|
186 |
+
}
|
187 |
+
}
|
188 |
+
|
189 |
+
__global__ void final_counting(
|
190 |
+
const int32_t* label,
|
191 |
+
const int32_t* count_init,
|
192 |
+
int32_t* count_final,
|
193 |
+
const int32_t W,
|
194 |
+
const int32_t H) {
|
195 |
+
const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y);
|
196 |
+
const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x);
|
197 |
+
const uint32_t idx = row * W + col;
|
198 |
+
|
199 |
+
if (row >= H || col >= W)
|
200 |
+
return;
|
201 |
+
|
202 |
+
int32_t y = label[idx];
|
203 |
+
if (y > 0) {
|
204 |
+
int32_t count_idx = y - 1;
|
205 |
+
count_final[idx] = count_init[count_idx];
|
206 |
+
} else {
|
207 |
+
count_final[idx] = 0;
|
208 |
+
}
|
209 |
+
}
|
210 |
+
|
211 |
+
} // namespace cc2d
|
212 |
+
|
213 |
+
std::vector<torch::Tensor> get_connected_componnets(
|
214 |
+
const torch::Tensor& inputs) {
|
215 |
+
AT_ASSERTM(inputs.is_cuda(), "inputs must be a CUDA tensor");
|
216 |
+
AT_ASSERTM(inputs.ndimension() == 4, "inputs must be [N, 1, H, W] shape");
|
217 |
+
AT_ASSERTM(
|
218 |
+
inputs.scalar_type() == torch::kUInt8, "inputs must be a uint8 type");
|
219 |
+
|
220 |
+
const uint32_t N = inputs.size(0);
|
221 |
+
const uint32_t C = inputs.size(1);
|
222 |
+
const uint32_t H = inputs.size(2);
|
223 |
+
const uint32_t W = inputs.size(3);
|
224 |
+
|
225 |
+
AT_ASSERTM(C == 1, "inputs must be [N, 1, H, W] shape");
|
226 |
+
AT_ASSERTM((H % 2) == 0, "height must be an even number");
|
227 |
+
AT_ASSERTM((W % 2) == 0, "width must be an even number");
|
228 |
+
|
229 |
+
// label must be uint32_t
|
230 |
+
auto label_options =
|
231 |
+
torch::TensorOptions().dtype(torch::kInt32).device(inputs.device());
|
232 |
+
torch::Tensor labels = torch::zeros({N, C, H, W}, label_options);
|
233 |
+
torch::Tensor counts_init = torch::zeros({N, C, H, W}, label_options);
|
234 |
+
torch::Tensor counts_final = torch::zeros({N, C, H, W}, label_options);
|
235 |
+
|
236 |
+
dim3 grid = dim3(
|
237 |
+
((W + 1) / 2 + BLOCK_COLS - 1) / BLOCK_COLS,
|
238 |
+
((H + 1) / 2 + BLOCK_ROWS - 1) / BLOCK_ROWS);
|
239 |
+
dim3 block = dim3(BLOCK_COLS, BLOCK_ROWS);
|
240 |
+
dim3 grid_count =
|
241 |
+
dim3((W + BLOCK_COLS) / BLOCK_COLS, (H + BLOCK_ROWS) / BLOCK_ROWS);
|
242 |
+
dim3 block_count = dim3(BLOCK_COLS, BLOCK_ROWS);
|
243 |
+
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
244 |
+
|
245 |
+
for (int n = 0; n < N; n++) {
|
246 |
+
uint32_t offset = n * H * W;
|
247 |
+
|
248 |
+
cc2d::init_labeling<<<grid, block, 0, stream>>>(
|
249 |
+
labels.data_ptr<int32_t>() + offset, W, H);
|
250 |
+
cc2d::merge<<<grid, block, 0, stream>>>(
|
251 |
+
inputs.data_ptr<uint8_t>() + offset,
|
252 |
+
labels.data_ptr<int32_t>() + offset,
|
253 |
+
W,
|
254 |
+
H);
|
255 |
+
cc2d::compression<<<grid, block, 0, stream>>>(
|
256 |
+
labels.data_ptr<int32_t>() + offset, W, H);
|
257 |
+
cc2d::final_labeling<<<grid, block, 0, stream>>>(
|
258 |
+
inputs.data_ptr<uint8_t>() + offset,
|
259 |
+
labels.data_ptr<int32_t>() + offset,
|
260 |
+
W,
|
261 |
+
H);
|
262 |
+
|
263 |
+
// get the counting of each pixel
|
264 |
+
cc2d::init_counting<<<grid_count, block_count, 0, stream>>>(
|
265 |
+
labels.data_ptr<int32_t>() + offset,
|
266 |
+
counts_init.data_ptr<int32_t>() + offset,
|
267 |
+
W,
|
268 |
+
H);
|
269 |
+
cc2d::final_counting<<<grid_count, block_count, 0, stream>>>(
|
270 |
+
labels.data_ptr<int32_t>() + offset,
|
271 |
+
counts_init.data_ptr<int32_t>() + offset,
|
272 |
+
counts_final.data_ptr<int32_t>() + offset,
|
273 |
+
W,
|
274 |
+
H);
|
275 |
+
}
|
276 |
+
|
277 |
+
// returned values are [labels, counts]
|
278 |
+
std::vector<torch::Tensor> outputs;
|
279 |
+
outputs.push_back(labels);
|
280 |
+
outputs.push_back(counts_final);
|
281 |
+
return outputs;
|
282 |
+
}
|
283 |
+
|
284 |
+
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
285 |
+
m.def(
|
286 |
+
"get_connected_componnets",
|
287 |
+
&get_connected_componnets,
|
288 |
+
"get_connected_componnets");
|
289 |
+
}
|
eval/grounded_sam/sam2/modeling/__init__.py
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
eval/grounded_sam/sam2/modeling/backbones/__init__.py
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
eval/grounded_sam/sam2/modeling/backbones/hieradet.py
ADDED
@@ -0,0 +1,317 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
import logging
|
8 |
+
from functools import partial
|
9 |
+
from typing import List, Tuple, Union
|
10 |
+
|
11 |
+
import torch
|
12 |
+
import torch.nn as nn
|
13 |
+
import torch.nn.functional as F
|
14 |
+
from iopath.common.file_io import g_pathmgr
|
15 |
+
|
16 |
+
from sam2.modeling.backbones.utils import (
|
17 |
+
PatchEmbed,
|
18 |
+
window_partition,
|
19 |
+
window_unpartition,
|
20 |
+
)
|
21 |
+
|
22 |
+
from sam2.modeling.sam2_utils import DropPath, MLP
|
23 |
+
|
24 |
+
|
25 |
+
def do_pool(x: torch.Tensor, pool: nn.Module, norm: nn.Module = None) -> torch.Tensor:
|
26 |
+
if pool is None:
|
27 |
+
return x
|
28 |
+
# (B, H, W, C) -> (B, C, H, W)
|
29 |
+
x = x.permute(0, 3, 1, 2)
|
30 |
+
x = pool(x)
|
31 |
+
# (B, C, H', W') -> (B, H', W', C)
|
32 |
+
x = x.permute(0, 2, 3, 1)
|
33 |
+
if norm:
|
34 |
+
x = norm(x)
|
35 |
+
|
36 |
+
return x
|
37 |
+
|
38 |
+
|
39 |
+
class MultiScaleAttention(nn.Module):
|
40 |
+
def __init__(
|
41 |
+
self,
|
42 |
+
dim: int,
|
43 |
+
dim_out: int,
|
44 |
+
num_heads: int,
|
45 |
+
q_pool: nn.Module = None,
|
46 |
+
):
|
47 |
+
super().__init__()
|
48 |
+
|
49 |
+
self.dim = dim
|
50 |
+
self.dim_out = dim_out
|
51 |
+
self.num_heads = num_heads
|
52 |
+
self.q_pool = q_pool
|
53 |
+
self.qkv = nn.Linear(dim, dim_out * 3)
|
54 |
+
self.proj = nn.Linear(dim_out, dim_out)
|
55 |
+
|
56 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
57 |
+
B, H, W, _ = x.shape
|
58 |
+
# qkv with shape (B, H * W, 3, nHead, C)
|
59 |
+
qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1)
|
60 |
+
# q, k, v with shape (B, H * W, nheads, C)
|
61 |
+
q, k, v = torch.unbind(qkv, 2)
|
62 |
+
|
63 |
+
# Q pooling (for downsample at stage changes)
|
64 |
+
if self.q_pool:
|
65 |
+
q = do_pool(q.reshape(B, H, W, -1), self.q_pool)
|
66 |
+
H, W = q.shape[1:3] # downsampled shape
|
67 |
+
q = q.reshape(B, H * W, self.num_heads, -1)
|
68 |
+
|
69 |
+
# Torch's SDPA expects [B, nheads, H*W, C] so we transpose
|
70 |
+
x = F.scaled_dot_product_attention(
|
71 |
+
q.transpose(1, 2),
|
72 |
+
k.transpose(1, 2),
|
73 |
+
v.transpose(1, 2),
|
74 |
+
)
|
75 |
+
# Transpose back
|
76 |
+
x = x.transpose(1, 2)
|
77 |
+
x = x.reshape(B, H, W, -1)
|
78 |
+
|
79 |
+
x = self.proj(x)
|
80 |
+
|
81 |
+
return x
|
82 |
+
|
83 |
+
|
84 |
+
class MultiScaleBlock(nn.Module):
|
85 |
+
def __init__(
|
86 |
+
self,
|
87 |
+
dim: int,
|
88 |
+
dim_out: int,
|
89 |
+
num_heads: int,
|
90 |
+
mlp_ratio: float = 4.0,
|
91 |
+
drop_path: float = 0.0,
|
92 |
+
norm_layer: Union[nn.Module, str] = "LayerNorm",
|
93 |
+
q_stride: Tuple[int, int] = None,
|
94 |
+
act_layer: nn.Module = nn.GELU,
|
95 |
+
window_size: int = 0,
|
96 |
+
):
|
97 |
+
super().__init__()
|
98 |
+
|
99 |
+
if isinstance(norm_layer, str):
|
100 |
+
norm_layer = partial(getattr(nn, norm_layer), eps=1e-6)
|
101 |
+
|
102 |
+
self.dim = dim
|
103 |
+
self.dim_out = dim_out
|
104 |
+
self.norm1 = norm_layer(dim)
|
105 |
+
|
106 |
+
self.window_size = window_size
|
107 |
+
|
108 |
+
self.pool, self.q_stride = None, q_stride
|
109 |
+
if self.q_stride:
|
110 |
+
self.pool = nn.MaxPool2d(
|
111 |
+
kernel_size=q_stride, stride=q_stride, ceil_mode=False
|
112 |
+
)
|
113 |
+
|
114 |
+
self.attn = MultiScaleAttention(
|
115 |
+
dim,
|
116 |
+
dim_out,
|
117 |
+
num_heads=num_heads,
|
118 |
+
q_pool=self.pool,
|
119 |
+
)
|
120 |
+
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
|
121 |
+
|
122 |
+
self.norm2 = norm_layer(dim_out)
|
123 |
+
self.mlp = MLP(
|
124 |
+
dim_out,
|
125 |
+
int(dim_out * mlp_ratio),
|
126 |
+
dim_out,
|
127 |
+
num_layers=2,
|
128 |
+
activation=act_layer,
|
129 |
+
)
|
130 |
+
|
131 |
+
if dim != dim_out:
|
132 |
+
self.proj = nn.Linear(dim, dim_out)
|
133 |
+
|
134 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
135 |
+
shortcut = x # B, H, W, C
|
136 |
+
x = self.norm1(x)
|
137 |
+
|
138 |
+
# Skip connection
|
139 |
+
if self.dim != self.dim_out:
|
140 |
+
shortcut = do_pool(self.proj(x), self.pool)
|
141 |
+
|
142 |
+
# Window partition
|
143 |
+
window_size = self.window_size
|
144 |
+
if window_size > 0:
|
145 |
+
H, W = x.shape[1], x.shape[2]
|
146 |
+
x, pad_hw = window_partition(x, window_size)
|
147 |
+
|
148 |
+
# Window Attention + Q Pooling (if stage change)
|
149 |
+
x = self.attn(x)
|
150 |
+
if self.q_stride:
|
151 |
+
# Shapes have changed due to Q pooling
|
152 |
+
window_size = self.window_size // self.q_stride[0]
|
153 |
+
H, W = shortcut.shape[1:3]
|
154 |
+
|
155 |
+
pad_h = (window_size - H % window_size) % window_size
|
156 |
+
pad_w = (window_size - W % window_size) % window_size
|
157 |
+
pad_hw = (H + pad_h, W + pad_w)
|
158 |
+
|
159 |
+
# Reverse window partition
|
160 |
+
if self.window_size > 0:
|
161 |
+
x = window_unpartition(x, window_size, pad_hw, (H, W))
|
162 |
+
|
163 |
+
x = shortcut + self.drop_path(x)
|
164 |
+
# MLP
|
165 |
+
x = x + self.drop_path(self.mlp(self.norm2(x)))
|
166 |
+
return x
|
167 |
+
|
168 |
+
|
169 |
+
class Hiera(nn.Module):
|
170 |
+
"""
|
171 |
+
Reference: https://arxiv.org/abs/2306.00989
|
172 |
+
"""
|
173 |
+
|
174 |
+
def __init__(
|
175 |
+
self,
|
176 |
+
embed_dim: int = 96, # initial embed dim
|
177 |
+
num_heads: int = 1, # initial number of heads
|
178 |
+
drop_path_rate: float = 0.0, # stochastic depth
|
179 |
+
q_pool: int = 3, # number of q_pool stages
|
180 |
+
q_stride: Tuple[int, int] = (2, 2), # downsample stride bet. stages
|
181 |
+
stages: Tuple[int, ...] = (2, 3, 16, 3), # blocks per stage
|
182 |
+
dim_mul: float = 2.0, # dim_mul factor at stage shift
|
183 |
+
head_mul: float = 2.0, # head_mul factor at stage shift
|
184 |
+
window_pos_embed_bkg_spatial_size: Tuple[int, int] = (14, 14),
|
185 |
+
# window size per stage, when not using global att.
|
186 |
+
window_spec: Tuple[int, ...] = (
|
187 |
+
8,
|
188 |
+
4,
|
189 |
+
14,
|
190 |
+
7,
|
191 |
+
),
|
192 |
+
# global attn in these blocks
|
193 |
+
global_att_blocks: Tuple[int, ...] = (
|
194 |
+
12,
|
195 |
+
16,
|
196 |
+
20,
|
197 |
+
),
|
198 |
+
weights_path=None,
|
199 |
+
return_interm_layers=True, # return feats from every stage
|
200 |
+
):
|
201 |
+
super().__init__()
|
202 |
+
|
203 |
+
assert len(stages) == len(window_spec)
|
204 |
+
self.window_spec = window_spec
|
205 |
+
|
206 |
+
depth = sum(stages)
|
207 |
+
self.q_stride = q_stride
|
208 |
+
self.stage_ends = [sum(stages[:i]) - 1 for i in range(1, len(stages) + 1)]
|
209 |
+
assert 0 <= q_pool <= len(self.stage_ends[:-1])
|
210 |
+
self.q_pool_blocks = [x + 1 for x in self.stage_ends[:-1]][:q_pool]
|
211 |
+
self.return_interm_layers = return_interm_layers
|
212 |
+
|
213 |
+
self.patch_embed = PatchEmbed(
|
214 |
+
embed_dim=embed_dim,
|
215 |
+
)
|
216 |
+
# Which blocks have global att?
|
217 |
+
self.global_att_blocks = global_att_blocks
|
218 |
+
|
219 |
+
# Windowed positional embedding (https://arxiv.org/abs/2311.05613)
|
220 |
+
self.window_pos_embed_bkg_spatial_size = window_pos_embed_bkg_spatial_size
|
221 |
+
self.pos_embed = nn.Parameter(
|
222 |
+
torch.zeros(1, embed_dim, *self.window_pos_embed_bkg_spatial_size)
|
223 |
+
)
|
224 |
+
self.pos_embed_window = nn.Parameter(
|
225 |
+
torch.zeros(1, embed_dim, self.window_spec[0], self.window_spec[0])
|
226 |
+
)
|
227 |
+
|
228 |
+
dpr = [
|
229 |
+
x.item() for x in torch.linspace(0, drop_path_rate, depth)
|
230 |
+
] # stochastic depth decay rule
|
231 |
+
|
232 |
+
cur_stage = 1
|
233 |
+
self.blocks = nn.ModuleList()
|
234 |
+
|
235 |
+
for i in range(depth):
|
236 |
+
dim_out = embed_dim
|
237 |
+
# lags by a block, so first block of
|
238 |
+
# next stage uses an initial window size
|
239 |
+
# of previous stage and final window size of current stage
|
240 |
+
window_size = self.window_spec[cur_stage - 1]
|
241 |
+
|
242 |
+
if self.global_att_blocks is not None:
|
243 |
+
window_size = 0 if i in self.global_att_blocks else window_size
|
244 |
+
|
245 |
+
if i - 1 in self.stage_ends:
|
246 |
+
dim_out = int(embed_dim * dim_mul)
|
247 |
+
num_heads = int(num_heads * head_mul)
|
248 |
+
cur_stage += 1
|
249 |
+
|
250 |
+
block = MultiScaleBlock(
|
251 |
+
dim=embed_dim,
|
252 |
+
dim_out=dim_out,
|
253 |
+
num_heads=num_heads,
|
254 |
+
drop_path=dpr[i],
|
255 |
+
q_stride=self.q_stride if i in self.q_pool_blocks else None,
|
256 |
+
window_size=window_size,
|
257 |
+
)
|
258 |
+
|
259 |
+
embed_dim = dim_out
|
260 |
+
self.blocks.append(block)
|
261 |
+
|
262 |
+
self.channel_list = (
|
263 |
+
[self.blocks[i].dim_out for i in self.stage_ends[::-1]]
|
264 |
+
if return_interm_layers
|
265 |
+
else [self.blocks[-1].dim_out]
|
266 |
+
)
|
267 |
+
|
268 |
+
if weights_path is not None:
|
269 |
+
with g_pathmgr.open(weights_path, "rb") as f:
|
270 |
+
chkpt = torch.load(f, map_location="cpu")
|
271 |
+
logging.info("loading Hiera", self.load_state_dict(chkpt, strict=False))
|
272 |
+
|
273 |
+
def _get_pos_embed(self, hw: Tuple[int, int]) -> torch.Tensor:
|
274 |
+
h, w = hw
|
275 |
+
window_embed = self.pos_embed_window
|
276 |
+
pos_embed = F.interpolate(self.pos_embed, size=(h, w), mode="bicubic")
|
277 |
+
pos_embed = pos_embed + window_embed.tile(
|
278 |
+
[x // y for x, y in zip(pos_embed.shape, window_embed.shape)]
|
279 |
+
)
|
280 |
+
pos_embed = pos_embed.permute(0, 2, 3, 1)
|
281 |
+
return pos_embed
|
282 |
+
|
283 |
+
def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
|
284 |
+
x = self.patch_embed(x)
|
285 |
+
# x: (B, H, W, C)
|
286 |
+
|
287 |
+
# Add pos embed
|
288 |
+
x = x + self._get_pos_embed(x.shape[1:3])
|
289 |
+
|
290 |
+
outputs = []
|
291 |
+
for i, blk in enumerate(self.blocks):
|
292 |
+
x = blk(x)
|
293 |
+
if (i == self.stage_ends[-1]) or (
|
294 |
+
i in self.stage_ends and self.return_interm_layers
|
295 |
+
):
|
296 |
+
feats = x.permute(0, 3, 1, 2)
|
297 |
+
outputs.append(feats)
|
298 |
+
|
299 |
+
return outputs
|
300 |
+
|
301 |
+
def get_layer_id(self, layer_name):
|
302 |
+
# https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L33
|
303 |
+
num_layers = self.get_num_layers()
|
304 |
+
|
305 |
+
if layer_name.find("rel_pos") != -1:
|
306 |
+
return num_layers + 1
|
307 |
+
elif layer_name.find("pos_embed") != -1:
|
308 |
+
return 0
|
309 |
+
elif layer_name.find("patch_embed") != -1:
|
310 |
+
return 0
|
311 |
+
elif layer_name.find("blocks") != -1:
|
312 |
+
return int(layer_name.split("blocks")[1].split(".")[1]) + 1
|
313 |
+
else:
|
314 |
+
return num_layers + 1
|
315 |
+
|
316 |
+
def get_num_layers(self) -> int:
|
317 |
+
return len(self.blocks)
|
eval/grounded_sam/sam2/modeling/backbones/image_encoder.py
ADDED
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
from typing import List, Optional
|
8 |
+
|
9 |
+
import torch
|
10 |
+
import torch.nn as nn
|
11 |
+
import torch.nn.functional as F
|
12 |
+
|
13 |
+
|
14 |
+
class ImageEncoder(nn.Module):
|
15 |
+
def __init__(
|
16 |
+
self,
|
17 |
+
trunk: nn.Module,
|
18 |
+
neck: nn.Module,
|
19 |
+
scalp: int = 0,
|
20 |
+
):
|
21 |
+
super().__init__()
|
22 |
+
self.trunk = trunk
|
23 |
+
self.neck = neck
|
24 |
+
self.scalp = scalp
|
25 |
+
assert (
|
26 |
+
self.trunk.channel_list == self.neck.backbone_channel_list
|
27 |
+
), f"Channel dims of trunk and neck do not match. Trunk: {self.trunk.channel_list}, neck: {self.neck.backbone_channel_list}"
|
28 |
+
|
29 |
+
def forward(self, sample: torch.Tensor):
|
30 |
+
# Forward through backbone
|
31 |
+
features, pos = self.neck(self.trunk(sample))
|
32 |
+
if self.scalp > 0:
|
33 |
+
# Discard the lowest resolution features
|
34 |
+
features, pos = features[: -self.scalp], pos[: -self.scalp]
|
35 |
+
|
36 |
+
src = features[-1]
|
37 |
+
output = {
|
38 |
+
"vision_features": src,
|
39 |
+
"vision_pos_enc": pos,
|
40 |
+
"backbone_fpn": features,
|
41 |
+
}
|
42 |
+
return output
|
43 |
+
|
44 |
+
|
45 |
+
class FpnNeck(nn.Module):
|
46 |
+
"""
|
47 |
+
A modified variant of Feature Pyramid Network (FPN) neck
|
48 |
+
(we remove output conv and also do bicubic interpolation similar to ViT
|
49 |
+
pos embed interpolation)
|
50 |
+
"""
|
51 |
+
|
52 |
+
def __init__(
|
53 |
+
self,
|
54 |
+
position_encoding: nn.Module,
|
55 |
+
d_model: int,
|
56 |
+
backbone_channel_list: List[int],
|
57 |
+
kernel_size: int = 1,
|
58 |
+
stride: int = 1,
|
59 |
+
padding: int = 0,
|
60 |
+
fpn_interp_model: str = "bilinear",
|
61 |
+
fuse_type: str = "sum",
|
62 |
+
fpn_top_down_levels: Optional[List[int]] = None,
|
63 |
+
):
|
64 |
+
"""Initialize the neck
|
65 |
+
:param trunk: the backbone
|
66 |
+
:param position_encoding: the positional encoding to use
|
67 |
+
:param d_model: the dimension of the model
|
68 |
+
:param neck_norm: the normalization to use
|
69 |
+
"""
|
70 |
+
super().__init__()
|
71 |
+
self.position_encoding = position_encoding
|
72 |
+
self.convs = nn.ModuleList()
|
73 |
+
self.backbone_channel_list = backbone_channel_list
|
74 |
+
self.d_model = d_model
|
75 |
+
for dim in backbone_channel_list:
|
76 |
+
current = nn.Sequential()
|
77 |
+
current.add_module(
|
78 |
+
"conv",
|
79 |
+
nn.Conv2d(
|
80 |
+
in_channels=dim,
|
81 |
+
out_channels=d_model,
|
82 |
+
kernel_size=kernel_size,
|
83 |
+
stride=stride,
|
84 |
+
padding=padding,
|
85 |
+
),
|
86 |
+
)
|
87 |
+
|
88 |
+
self.convs.append(current)
|
89 |
+
self.fpn_interp_model = fpn_interp_model
|
90 |
+
assert fuse_type in ["sum", "avg"]
|
91 |
+
self.fuse_type = fuse_type
|
92 |
+
|
93 |
+
# levels to have top-down features in its outputs
|
94 |
+
# e.g. if fpn_top_down_levels is [2, 3], then only outputs of level 2 and 3
|
95 |
+
# have top-down propagation, while outputs of level 0 and level 1 have only
|
96 |
+
# lateral features from the same backbone level.
|
97 |
+
if fpn_top_down_levels is None:
|
98 |
+
# default is to have top-down features on all levels
|
99 |
+
fpn_top_down_levels = range(len(self.convs))
|
100 |
+
self.fpn_top_down_levels = list(fpn_top_down_levels)
|
101 |
+
|
102 |
+
def forward(self, xs: List[torch.Tensor]):
|
103 |
+
|
104 |
+
out = [None] * len(self.convs)
|
105 |
+
pos = [None] * len(self.convs)
|
106 |
+
assert len(xs) == len(self.convs)
|
107 |
+
# fpn forward pass
|
108 |
+
# see https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/fpn.py
|
109 |
+
prev_features = None
|
110 |
+
# forward in top-down order (from low to high resolution)
|
111 |
+
n = len(self.convs) - 1
|
112 |
+
for i in range(n, -1, -1):
|
113 |
+
x = xs[i]
|
114 |
+
lateral_features = self.convs[n - i](x)
|
115 |
+
if i in self.fpn_top_down_levels and prev_features is not None:
|
116 |
+
top_down_features = F.interpolate(
|
117 |
+
prev_features.to(dtype=torch.float32),
|
118 |
+
scale_factor=2.0,
|
119 |
+
mode=self.fpn_interp_model,
|
120 |
+
align_corners=(
|
121 |
+
None if self.fpn_interp_model == "nearest" else False
|
122 |
+
),
|
123 |
+
antialias=False,
|
124 |
+
)
|
125 |
+
prev_features = lateral_features + top_down_features
|
126 |
+
if self.fuse_type == "avg":
|
127 |
+
prev_features /= 2
|
128 |
+
else:
|
129 |
+
prev_features = lateral_features
|
130 |
+
x_out = prev_features
|
131 |
+
out[i] = x_out
|
132 |
+
pos[i] = self.position_encoding(x_out).to(x_out.dtype)
|
133 |
+
|
134 |
+
return out, pos
|
eval/grounded_sam/sam2/modeling/backbones/utils.py
ADDED
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
"""Some utilities for backbones, in particular for windowing"""
|
8 |
+
|
9 |
+
from typing import Tuple
|
10 |
+
|
11 |
+
import torch
|
12 |
+
import torch.nn as nn
|
13 |
+
import torch.nn.functional as F
|
14 |
+
|
15 |
+
|
16 |
+
def window_partition(x, window_size):
|
17 |
+
"""
|
18 |
+
Partition into non-overlapping windows with padding if needed.
|
19 |
+
Args:
|
20 |
+
x (tensor): input tokens with [B, H, W, C].
|
21 |
+
window_size (int): window size.
|
22 |
+
Returns:
|
23 |
+
windows: windows after partition with [B * num_windows, window_size, window_size, C].
|
24 |
+
(Hp, Wp): padded height and width before partition
|
25 |
+
"""
|
26 |
+
B, H, W, C = x.shape
|
27 |
+
|
28 |
+
pad_h = (window_size - H % window_size) % window_size
|
29 |
+
pad_w = (window_size - W % window_size) % window_size
|
30 |
+
if pad_h > 0 or pad_w > 0:
|
31 |
+
x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h))
|
32 |
+
Hp, Wp = H + pad_h, W + pad_w
|
33 |
+
|
34 |
+
x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C)
|
35 |
+
windows = (
|
36 |
+
x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
|
37 |
+
)
|
38 |
+
return windows, (Hp, Wp)
|
39 |
+
|
40 |
+
|
41 |
+
def window_unpartition(windows, window_size, pad_hw, hw):
|
42 |
+
"""
|
43 |
+
Window unpartition into original sequences and removing padding.
|
44 |
+
Args:
|
45 |
+
x (tensor): input tokens with [B * num_windows, window_size, window_size, C].
|
46 |
+
window_size (int): window size.
|
47 |
+
pad_hw (Tuple): padded height and width (Hp, Wp).
|
48 |
+
hw (Tuple): original height and width (H, W) before padding.
|
49 |
+
Returns:
|
50 |
+
x: unpartitioned sequences with [B, H, W, C].
|
51 |
+
"""
|
52 |
+
Hp, Wp = pad_hw
|
53 |
+
H, W = hw
|
54 |
+
B = windows.shape[0] // (Hp * Wp // window_size // window_size)
|
55 |
+
x = windows.view(
|
56 |
+
B, Hp // window_size, Wp // window_size, window_size, window_size, -1
|
57 |
+
)
|
58 |
+
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1)
|
59 |
+
|
60 |
+
if Hp > H or Wp > W:
|
61 |
+
x = x[:, :H, :W, :].contiguous()
|
62 |
+
return x
|
63 |
+
|
64 |
+
|
65 |
+
class PatchEmbed(nn.Module):
|
66 |
+
"""
|
67 |
+
Image to Patch Embedding.
|
68 |
+
"""
|
69 |
+
|
70 |
+
def __init__(
|
71 |
+
self,
|
72 |
+
kernel_size: Tuple[int, ...] = (7, 7),
|
73 |
+
stride: Tuple[int, ...] = (4, 4),
|
74 |
+
padding: Tuple[int, ...] = (3, 3),
|
75 |
+
in_chans: int = 3,
|
76 |
+
embed_dim: int = 768,
|
77 |
+
):
|
78 |
+
"""
|
79 |
+
Args:
|
80 |
+
kernel_size (Tuple): kernel size of the projection layer.
|
81 |
+
stride (Tuple): stride of the projection layer.
|
82 |
+
padding (Tuple): padding size of the projection layer.
|
83 |
+
in_chans (int): Number of input image channels.
|
84 |
+
embed_dim (int): embed_dim (int): Patch embedding dimension.
|
85 |
+
"""
|
86 |
+
super().__init__()
|
87 |
+
self.proj = nn.Conv2d(
|
88 |
+
in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding
|
89 |
+
)
|
90 |
+
|
91 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
92 |
+
x = self.proj(x)
|
93 |
+
# B C H W -> B H W C
|
94 |
+
x = x.permute(0, 2, 3, 1)
|
95 |
+
return x
|
eval/grounded_sam/sam2/modeling/memory_attention.py
ADDED
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
from typing import Optional
|
8 |
+
|
9 |
+
import torch
|
10 |
+
from torch import nn, Tensor
|
11 |
+
|
12 |
+
from sam2.modeling.sam.transformer import RoPEAttention
|
13 |
+
|
14 |
+
from sam2.modeling.sam2_utils import get_activation_fn, get_clones
|
15 |
+
|
16 |
+
|
17 |
+
class MemoryAttentionLayer(nn.Module):
|
18 |
+
|
19 |
+
def __init__(
|
20 |
+
self,
|
21 |
+
activation: str,
|
22 |
+
cross_attention: nn.Module,
|
23 |
+
d_model: int,
|
24 |
+
dim_feedforward: int,
|
25 |
+
dropout: float,
|
26 |
+
pos_enc_at_attn: bool,
|
27 |
+
pos_enc_at_cross_attn_keys: bool,
|
28 |
+
pos_enc_at_cross_attn_queries: bool,
|
29 |
+
self_attention: nn.Module,
|
30 |
+
):
|
31 |
+
super().__init__()
|
32 |
+
self.d_model = d_model
|
33 |
+
self.dim_feedforward = dim_feedforward
|
34 |
+
self.dropout_value = dropout
|
35 |
+
self.self_attn = self_attention
|
36 |
+
self.cross_attn_image = cross_attention
|
37 |
+
|
38 |
+
# Implementation of Feedforward model
|
39 |
+
self.linear1 = nn.Linear(d_model, dim_feedforward)
|
40 |
+
self.dropout = nn.Dropout(dropout)
|
41 |
+
self.linear2 = nn.Linear(dim_feedforward, d_model)
|
42 |
+
|
43 |
+
self.norm1 = nn.LayerNorm(d_model)
|
44 |
+
self.norm2 = nn.LayerNorm(d_model)
|
45 |
+
self.norm3 = nn.LayerNorm(d_model)
|
46 |
+
self.dropout1 = nn.Dropout(dropout)
|
47 |
+
self.dropout2 = nn.Dropout(dropout)
|
48 |
+
self.dropout3 = nn.Dropout(dropout)
|
49 |
+
|
50 |
+
self.activation_str = activation
|
51 |
+
self.activation = get_activation_fn(activation)
|
52 |
+
|
53 |
+
# Where to add pos enc
|
54 |
+
self.pos_enc_at_attn = pos_enc_at_attn
|
55 |
+
self.pos_enc_at_cross_attn_queries = pos_enc_at_cross_attn_queries
|
56 |
+
self.pos_enc_at_cross_attn_keys = pos_enc_at_cross_attn_keys
|
57 |
+
|
58 |
+
def _forward_sa(self, tgt, query_pos):
|
59 |
+
# Self-Attention
|
60 |
+
tgt2 = self.norm1(tgt)
|
61 |
+
q = k = tgt2 + query_pos if self.pos_enc_at_attn else tgt2
|
62 |
+
tgt2 = self.self_attn(q, k, v=tgt2)
|
63 |
+
tgt = tgt + self.dropout1(tgt2)
|
64 |
+
return tgt
|
65 |
+
|
66 |
+
def _forward_ca(self, tgt, memory, query_pos, pos, num_k_exclude_rope=0):
|
67 |
+
kwds = {}
|
68 |
+
if num_k_exclude_rope > 0:
|
69 |
+
assert isinstance(self.cross_attn_image, RoPEAttention)
|
70 |
+
kwds = {"num_k_exclude_rope": num_k_exclude_rope}
|
71 |
+
|
72 |
+
# Cross-Attention
|
73 |
+
tgt2 = self.norm2(tgt)
|
74 |
+
tgt2 = self.cross_attn_image(
|
75 |
+
q=tgt2 + query_pos if self.pos_enc_at_cross_attn_queries else tgt2,
|
76 |
+
k=memory + pos if self.pos_enc_at_cross_attn_keys else memory,
|
77 |
+
v=memory,
|
78 |
+
**kwds,
|
79 |
+
)
|
80 |
+
tgt = tgt + self.dropout2(tgt2)
|
81 |
+
return tgt
|
82 |
+
|
83 |
+
def forward(
|
84 |
+
self,
|
85 |
+
tgt,
|
86 |
+
memory,
|
87 |
+
pos: Optional[Tensor] = None,
|
88 |
+
query_pos: Optional[Tensor] = None,
|
89 |
+
num_k_exclude_rope: int = 0,
|
90 |
+
) -> torch.Tensor:
|
91 |
+
|
92 |
+
# Self-Attn, Cross-Attn
|
93 |
+
tgt = self._forward_sa(tgt, query_pos)
|
94 |
+
tgt = self._forward_ca(tgt, memory, query_pos, pos, num_k_exclude_rope)
|
95 |
+
# MLP
|
96 |
+
tgt2 = self.norm3(tgt)
|
97 |
+
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
|
98 |
+
tgt = tgt + self.dropout3(tgt2)
|
99 |
+
return tgt
|
100 |
+
|
101 |
+
|
102 |
+
class MemoryAttention(nn.Module):
|
103 |
+
def __init__(
|
104 |
+
self,
|
105 |
+
d_model: int,
|
106 |
+
pos_enc_at_input: bool,
|
107 |
+
layer: nn.Module,
|
108 |
+
num_layers: int,
|
109 |
+
batch_first: bool = True, # Do layers expect batch first input?
|
110 |
+
):
|
111 |
+
super().__init__()
|
112 |
+
self.d_model = d_model
|
113 |
+
self.layers = get_clones(layer, num_layers)
|
114 |
+
self.num_layers = num_layers
|
115 |
+
self.norm = nn.LayerNorm(d_model)
|
116 |
+
self.pos_enc_at_input = pos_enc_at_input
|
117 |
+
self.batch_first = batch_first
|
118 |
+
|
119 |
+
def forward(
|
120 |
+
self,
|
121 |
+
curr: torch.Tensor, # self-attention inputs
|
122 |
+
memory: torch.Tensor, # cross-attention inputs
|
123 |
+
curr_pos: Optional[Tensor] = None, # pos_enc for self-attention inputs
|
124 |
+
memory_pos: Optional[Tensor] = None, # pos_enc for cross-attention inputs
|
125 |
+
num_obj_ptr_tokens: int = 0, # number of object pointer *tokens*
|
126 |
+
):
|
127 |
+
if isinstance(curr, list):
|
128 |
+
assert isinstance(curr_pos, list)
|
129 |
+
assert len(curr) == len(curr_pos) == 1
|
130 |
+
curr, curr_pos = (
|
131 |
+
curr[0],
|
132 |
+
curr_pos[0],
|
133 |
+
)
|
134 |
+
|
135 |
+
assert (
|
136 |
+
curr.shape[1] == memory.shape[1]
|
137 |
+
), "Batch size must be the same for curr and memory"
|
138 |
+
|
139 |
+
output = curr
|
140 |
+
if self.pos_enc_at_input and curr_pos is not None:
|
141 |
+
output = output + 0.1 * curr_pos
|
142 |
+
|
143 |
+
if self.batch_first:
|
144 |
+
# Convert to batch first
|
145 |
+
output = output.transpose(0, 1)
|
146 |
+
curr_pos = curr_pos.transpose(0, 1)
|
147 |
+
memory = memory.transpose(0, 1)
|
148 |
+
memory_pos = memory_pos.transpose(0, 1)
|
149 |
+
|
150 |
+
for layer in self.layers:
|
151 |
+
kwds = {}
|
152 |
+
if isinstance(layer.cross_attn_image, RoPEAttention):
|
153 |
+
kwds = {"num_k_exclude_rope": num_obj_ptr_tokens}
|
154 |
+
|
155 |
+
output = layer(
|
156 |
+
tgt=output,
|
157 |
+
memory=memory,
|
158 |
+
pos=memory_pos,
|
159 |
+
query_pos=curr_pos,
|
160 |
+
**kwds,
|
161 |
+
)
|
162 |
+
normed_output = self.norm(output)
|
163 |
+
|
164 |
+
if self.batch_first:
|
165 |
+
# Convert back to seq first
|
166 |
+
normed_output = normed_output.transpose(0, 1)
|
167 |
+
curr_pos = curr_pos.transpose(0, 1)
|
168 |
+
|
169 |
+
return normed_output
|
eval/grounded_sam/sam2/modeling/memory_encoder.py
ADDED
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
import math
|
8 |
+
from typing import Tuple
|
9 |
+
|
10 |
+
import torch
|
11 |
+
import torch.nn as nn
|
12 |
+
import torch.nn.functional as F
|
13 |
+
|
14 |
+
from sam2.modeling.sam2_utils import DropPath, get_clones, LayerNorm2d
|
15 |
+
|
16 |
+
|
17 |
+
class MaskDownSampler(nn.Module):
|
18 |
+
"""
|
19 |
+
Progressively downsample a mask by total_stride, each time by stride.
|
20 |
+
Note that LayerNorm is applied per *token*, like in ViT.
|
21 |
+
|
22 |
+
With each downsample (by a factor stride**2), channel capacity increases by the same factor.
|
23 |
+
In the end, we linearly project to embed_dim channels.
|
24 |
+
"""
|
25 |
+
|
26 |
+
def __init__(
|
27 |
+
self,
|
28 |
+
embed_dim=256,
|
29 |
+
kernel_size=4,
|
30 |
+
stride=4,
|
31 |
+
padding=0,
|
32 |
+
total_stride=16,
|
33 |
+
activation=nn.GELU,
|
34 |
+
):
|
35 |
+
super().__init__()
|
36 |
+
num_layers = int(math.log2(total_stride) // math.log2(stride))
|
37 |
+
assert stride**num_layers == total_stride
|
38 |
+
self.encoder = nn.Sequential()
|
39 |
+
mask_in_chans, mask_out_chans = 1, 1
|
40 |
+
for _ in range(num_layers):
|
41 |
+
mask_out_chans = mask_in_chans * (stride**2)
|
42 |
+
self.encoder.append(
|
43 |
+
nn.Conv2d(
|
44 |
+
mask_in_chans,
|
45 |
+
mask_out_chans,
|
46 |
+
kernel_size=kernel_size,
|
47 |
+
stride=stride,
|
48 |
+
padding=padding,
|
49 |
+
)
|
50 |
+
)
|
51 |
+
self.encoder.append(LayerNorm2d(mask_out_chans))
|
52 |
+
self.encoder.append(activation())
|
53 |
+
mask_in_chans = mask_out_chans
|
54 |
+
|
55 |
+
self.encoder.append(nn.Conv2d(mask_out_chans, embed_dim, kernel_size=1))
|
56 |
+
|
57 |
+
def forward(self, x):
|
58 |
+
return self.encoder(x)
|
59 |
+
|
60 |
+
|
61 |
+
# Lightly adapted from ConvNext (https://github.com/facebookresearch/ConvNeXt)
|
62 |
+
class CXBlock(nn.Module):
|
63 |
+
r"""ConvNeXt Block. There are two equivalent implementations:
|
64 |
+
(1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W)
|
65 |
+
(2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back
|
66 |
+
We use (2) as we find it slightly faster in PyTorch
|
67 |
+
|
68 |
+
Args:
|
69 |
+
dim (int): Number of input channels.
|
70 |
+
drop_path (float): Stochastic depth rate. Default: 0.0
|
71 |
+
layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.
|
72 |
+
"""
|
73 |
+
|
74 |
+
def __init__(
|
75 |
+
self,
|
76 |
+
dim,
|
77 |
+
kernel_size=7,
|
78 |
+
padding=3,
|
79 |
+
drop_path=0.0,
|
80 |
+
layer_scale_init_value=1e-6,
|
81 |
+
use_dwconv=True,
|
82 |
+
):
|
83 |
+
super().__init__()
|
84 |
+
self.dwconv = nn.Conv2d(
|
85 |
+
dim,
|
86 |
+
dim,
|
87 |
+
kernel_size=kernel_size,
|
88 |
+
padding=padding,
|
89 |
+
groups=dim if use_dwconv else 1,
|
90 |
+
) # depthwise conv
|
91 |
+
self.norm = LayerNorm2d(dim, eps=1e-6)
|
92 |
+
self.pwconv1 = nn.Linear(
|
93 |
+
dim, 4 * dim
|
94 |
+
) # pointwise/1x1 convs, implemented with linear layers
|
95 |
+
self.act = nn.GELU()
|
96 |
+
self.pwconv2 = nn.Linear(4 * dim, dim)
|
97 |
+
self.gamma = (
|
98 |
+
nn.Parameter(layer_scale_init_value * torch.ones((dim)), requires_grad=True)
|
99 |
+
if layer_scale_init_value > 0
|
100 |
+
else None
|
101 |
+
)
|
102 |
+
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
|
103 |
+
|
104 |
+
def forward(self, x):
|
105 |
+
input = x
|
106 |
+
x = self.dwconv(x)
|
107 |
+
x = self.norm(x)
|
108 |
+
x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C)
|
109 |
+
x = self.pwconv1(x)
|
110 |
+
x = self.act(x)
|
111 |
+
x = self.pwconv2(x)
|
112 |
+
if self.gamma is not None:
|
113 |
+
x = self.gamma * x
|
114 |
+
x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)
|
115 |
+
|
116 |
+
x = input + self.drop_path(x)
|
117 |
+
return x
|
118 |
+
|
119 |
+
|
120 |
+
class Fuser(nn.Module):
|
121 |
+
def __init__(self, layer, num_layers, dim=None, input_projection=False):
|
122 |
+
super().__init__()
|
123 |
+
self.proj = nn.Identity()
|
124 |
+
self.layers = get_clones(layer, num_layers)
|
125 |
+
|
126 |
+
if input_projection:
|
127 |
+
assert dim is not None
|
128 |
+
self.proj = nn.Conv2d(dim, dim, kernel_size=1)
|
129 |
+
|
130 |
+
def forward(self, x):
|
131 |
+
# normally x: (N, C, H, W)
|
132 |
+
x = self.proj(x)
|
133 |
+
for layer in self.layers:
|
134 |
+
x = layer(x)
|
135 |
+
return x
|
136 |
+
|
137 |
+
|
138 |
+
class MemoryEncoder(nn.Module):
|
139 |
+
def __init__(
|
140 |
+
self,
|
141 |
+
out_dim,
|
142 |
+
mask_downsampler,
|
143 |
+
fuser,
|
144 |
+
position_encoding,
|
145 |
+
in_dim=256, # in_dim of pix_feats
|
146 |
+
):
|
147 |
+
super().__init__()
|
148 |
+
|
149 |
+
self.mask_downsampler = mask_downsampler
|
150 |
+
|
151 |
+
self.pix_feat_proj = nn.Conv2d(in_dim, in_dim, kernel_size=1)
|
152 |
+
self.fuser = fuser
|
153 |
+
self.position_encoding = position_encoding
|
154 |
+
self.out_proj = nn.Identity()
|
155 |
+
if out_dim != in_dim:
|
156 |
+
self.out_proj = nn.Conv2d(in_dim, out_dim, kernel_size=1)
|
157 |
+
|
158 |
+
def forward(
|
159 |
+
self,
|
160 |
+
pix_feat: torch.Tensor,
|
161 |
+
masks: torch.Tensor,
|
162 |
+
skip_mask_sigmoid: bool = False,
|
163 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
164 |
+
## Process masks
|
165 |
+
# sigmoid, so that less domain shift from gt masks which are bool
|
166 |
+
if not skip_mask_sigmoid:
|
167 |
+
masks = F.sigmoid(masks)
|
168 |
+
masks = self.mask_downsampler(masks)
|
169 |
+
|
170 |
+
## Fuse pix_feats and downsampled masks
|
171 |
+
# in case the visual features are on CPU, cast them to CUDA
|
172 |
+
pix_feat = pix_feat.to(masks.device)
|
173 |
+
|
174 |
+
x = self.pix_feat_proj(pix_feat)
|
175 |
+
x = x + masks
|
176 |
+
x = self.fuser(x)
|
177 |
+
x = self.out_proj(x)
|
178 |
+
|
179 |
+
pos = self.position_encoding(x).to(x.dtype)
|
180 |
+
|
181 |
+
return {"vision_features": x, "vision_pos_enc": [pos]}
|
eval/grounded_sam/sam2/modeling/position_encoding.py
ADDED
@@ -0,0 +1,221 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
import math
|
8 |
+
from typing import Any, Optional, Tuple
|
9 |
+
|
10 |
+
import numpy as np
|
11 |
+
|
12 |
+
import torch
|
13 |
+
from torch import nn
|
14 |
+
|
15 |
+
|
16 |
+
class PositionEmbeddingSine(nn.Module):
|
17 |
+
"""
|
18 |
+
This is a more standard version of the position embedding, very similar to the one
|
19 |
+
used by the Attention Is All You Need paper, generalized to work on images.
|
20 |
+
"""
|
21 |
+
|
22 |
+
def __init__(
|
23 |
+
self,
|
24 |
+
num_pos_feats,
|
25 |
+
temperature: int = 10000,
|
26 |
+
normalize: bool = True,
|
27 |
+
scale: Optional[float] = None,
|
28 |
+
):
|
29 |
+
super().__init__()
|
30 |
+
assert num_pos_feats % 2 == 0, "Expecting even model width"
|
31 |
+
self.num_pos_feats = num_pos_feats // 2
|
32 |
+
self.temperature = temperature
|
33 |
+
self.normalize = normalize
|
34 |
+
if scale is not None and normalize is False:
|
35 |
+
raise ValueError("normalize should be True if scale is passed")
|
36 |
+
if scale is None:
|
37 |
+
scale = 2 * math.pi
|
38 |
+
self.scale = scale
|
39 |
+
|
40 |
+
self.cache = {}
|
41 |
+
|
42 |
+
def _encode_xy(self, x, y):
|
43 |
+
# The positions are expected to be normalized
|
44 |
+
assert len(x) == len(y) and x.ndim == y.ndim == 1
|
45 |
+
x_embed = x * self.scale
|
46 |
+
y_embed = y * self.scale
|
47 |
+
|
48 |
+
dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
|
49 |
+
dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
|
50 |
+
|
51 |
+
pos_x = x_embed[:, None] / dim_t
|
52 |
+
pos_y = y_embed[:, None] / dim_t
|
53 |
+
pos_x = torch.stack(
|
54 |
+
(pos_x[:, 0::2].sin(), pos_x[:, 1::2].cos()), dim=2
|
55 |
+
).flatten(1)
|
56 |
+
pos_y = torch.stack(
|
57 |
+
(pos_y[:, 0::2].sin(), pos_y[:, 1::2].cos()), dim=2
|
58 |
+
).flatten(1)
|
59 |
+
return pos_x, pos_y
|
60 |
+
|
61 |
+
@torch.no_grad()
|
62 |
+
def encode_boxes(self, x, y, w, h):
|
63 |
+
pos_x, pos_y = self._encode_xy(x, y)
|
64 |
+
pos = torch.cat((pos_y, pos_x, h[:, None], w[:, None]), dim=1)
|
65 |
+
return pos
|
66 |
+
|
67 |
+
encode = encode_boxes # Backwards compatibility
|
68 |
+
|
69 |
+
@torch.no_grad()
|
70 |
+
def encode_points(self, x, y, labels):
|
71 |
+
(bx, nx), (by, ny), (bl, nl) = x.shape, y.shape, labels.shape
|
72 |
+
assert bx == by and nx == ny and bx == bl and nx == nl
|
73 |
+
pos_x, pos_y = self._encode_xy(x.flatten(), y.flatten())
|
74 |
+
pos_x, pos_y = pos_x.reshape(bx, nx, -1), pos_y.reshape(by, ny, -1)
|
75 |
+
pos = torch.cat((pos_y, pos_x, labels[:, :, None]), dim=2)
|
76 |
+
return pos
|
77 |
+
|
78 |
+
@torch.no_grad()
|
79 |
+
def forward(self, x: torch.Tensor):
|
80 |
+
cache_key = (x.shape[-2], x.shape[-1])
|
81 |
+
if cache_key in self.cache:
|
82 |
+
return self.cache[cache_key][None].repeat(x.shape[0], 1, 1, 1)
|
83 |
+
y_embed = (
|
84 |
+
torch.arange(1, x.shape[-2] + 1, dtype=torch.float32, device=x.device)
|
85 |
+
.view(1, -1, 1)
|
86 |
+
.repeat(x.shape[0], 1, x.shape[-1])
|
87 |
+
)
|
88 |
+
x_embed = (
|
89 |
+
torch.arange(1, x.shape[-1] + 1, dtype=torch.float32, device=x.device)
|
90 |
+
.view(1, 1, -1)
|
91 |
+
.repeat(x.shape[0], x.shape[-2], 1)
|
92 |
+
)
|
93 |
+
|
94 |
+
if self.normalize:
|
95 |
+
eps = 1e-6
|
96 |
+
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
|
97 |
+
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
|
98 |
+
|
99 |
+
dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
|
100 |
+
dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
|
101 |
+
|
102 |
+
pos_x = x_embed[:, :, :, None] / dim_t
|
103 |
+
pos_y = y_embed[:, :, :, None] / dim_t
|
104 |
+
pos_x = torch.stack(
|
105 |
+
(pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4
|
106 |
+
).flatten(3)
|
107 |
+
pos_y = torch.stack(
|
108 |
+
(pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4
|
109 |
+
).flatten(3)
|
110 |
+
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
|
111 |
+
self.cache[cache_key] = pos[0]
|
112 |
+
return pos
|
113 |
+
|
114 |
+
|
115 |
+
class PositionEmbeddingRandom(nn.Module):
|
116 |
+
"""
|
117 |
+
Positional encoding using random spatial frequencies.
|
118 |
+
"""
|
119 |
+
|
120 |
+
def __init__(self, num_pos_feats: int = 64, scale: Optional[float] = None) -> None:
|
121 |
+
super().__init__()
|
122 |
+
if scale is None or scale <= 0.0:
|
123 |
+
scale = 1.0
|
124 |
+
self.register_buffer(
|
125 |
+
"positional_encoding_gaussian_matrix",
|
126 |
+
scale * torch.randn((2, num_pos_feats)),
|
127 |
+
)
|
128 |
+
|
129 |
+
def _pe_encoding(self, coords: torch.Tensor) -> torch.Tensor:
|
130 |
+
"""Positionally encode points that are normalized to [0,1]."""
|
131 |
+
# assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape
|
132 |
+
coords = 2 * coords - 1
|
133 |
+
coords = coords @ self.positional_encoding_gaussian_matrix
|
134 |
+
coords = 2 * np.pi * coords
|
135 |
+
# outputs d_1 x ... x d_n x C shape
|
136 |
+
return torch.cat([torch.sin(coords), torch.cos(coords)], dim=-1)
|
137 |
+
|
138 |
+
def forward(self, size: Tuple[int, int]) -> torch.Tensor:
|
139 |
+
"""Generate positional encoding for a grid of the specified size."""
|
140 |
+
h, w = size
|
141 |
+
device: Any = self.positional_encoding_gaussian_matrix.device
|
142 |
+
grid = torch.ones((h, w), device=device, dtype=torch.float32)
|
143 |
+
y_embed = grid.cumsum(dim=0) - 0.5
|
144 |
+
x_embed = grid.cumsum(dim=1) - 0.5
|
145 |
+
y_embed = y_embed / h
|
146 |
+
x_embed = x_embed / w
|
147 |
+
|
148 |
+
pe = self._pe_encoding(torch.stack([x_embed, y_embed], dim=-1))
|
149 |
+
return pe.permute(2, 0, 1) # C x H x W
|
150 |
+
|
151 |
+
def forward_with_coords(
|
152 |
+
self, coords_input: torch.Tensor, image_size: Tuple[int, int]
|
153 |
+
) -> torch.Tensor:
|
154 |
+
"""Positionally encode points that are not normalized to [0,1]."""
|
155 |
+
coords = coords_input.clone()
|
156 |
+
coords[:, :, 0] = coords[:, :, 0] / image_size[1]
|
157 |
+
coords[:, :, 1] = coords[:, :, 1] / image_size[0]
|
158 |
+
return self._pe_encoding(coords.to(torch.float)) # B x N x C
|
159 |
+
|
160 |
+
|
161 |
+
# Rotary Positional Encoding, adapted from:
|
162 |
+
# 1. https://github.com/meta-llama/codellama/blob/main/llama/model.py
|
163 |
+
# 2. https://github.com/naver-ai/rope-vit
|
164 |
+
# 3. https://github.com/lucidrains/rotary-embedding-torch
|
165 |
+
|
166 |
+
|
167 |
+
def init_t_xy(end_x: int, end_y: int):
|
168 |
+
t = torch.arange(end_x * end_y, dtype=torch.float32)
|
169 |
+
t_x = (t % end_x).float()
|
170 |
+
t_y = torch.div(t, end_x, rounding_mode="floor").float()
|
171 |
+
return t_x, t_y
|
172 |
+
|
173 |
+
|
174 |
+
def compute_axial_cis(dim: int, end_x: int, end_y: int, theta: float = 10000.0):
|
175 |
+
freqs_x = 1.0 / (theta ** (torch.arange(0, dim, 4)[: (dim // 4)].float() / dim))
|
176 |
+
freqs_y = 1.0 / (theta ** (torch.arange(0, dim, 4)[: (dim // 4)].float() / dim))
|
177 |
+
|
178 |
+
t_x, t_y = init_t_xy(end_x, end_y)
|
179 |
+
freqs_x = torch.outer(t_x, freqs_x)
|
180 |
+
freqs_y = torch.outer(t_y, freqs_y)
|
181 |
+
freqs_cis_x = torch.polar(torch.ones_like(freqs_x), freqs_x)
|
182 |
+
freqs_cis_y = torch.polar(torch.ones_like(freqs_y), freqs_y)
|
183 |
+
return torch.cat([freqs_cis_x, freqs_cis_y], dim=-1)
|
184 |
+
|
185 |
+
|
186 |
+
def reshape_for_broadcast(freqs_cis: torch.Tensor, x: torch.Tensor):
|
187 |
+
ndim = x.ndim
|
188 |
+
assert 0 <= 1 < ndim
|
189 |
+
assert freqs_cis.shape == (x.shape[-2], x.shape[-1])
|
190 |
+
shape = [d if i >= ndim - 2 else 1 for i, d in enumerate(x.shape)]
|
191 |
+
return freqs_cis.view(*shape)
|
192 |
+
|
193 |
+
|
194 |
+
def apply_rotary_enc(
|
195 |
+
xq: torch.Tensor,
|
196 |
+
xk: torch.Tensor,
|
197 |
+
freqs_cis: torch.Tensor,
|
198 |
+
repeat_freqs_k: bool = False,
|
199 |
+
):
|
200 |
+
xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2))
|
201 |
+
xk_ = (
|
202 |
+
torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2))
|
203 |
+
if xk.shape[-2] != 0
|
204 |
+
else None
|
205 |
+
)
|
206 |
+
freqs_cis = reshape_for_broadcast(freqs_cis, xq_)
|
207 |
+
xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(3)
|
208 |
+
if xk_ is None:
|
209 |
+
# no keys to rotate, due to dropout
|
210 |
+
return xq_out.type_as(xq).to(xq.device), xk
|
211 |
+
# repeat freqs along seq_len dim to match k seq_len
|
212 |
+
if repeat_freqs_k:
|
213 |
+
r = xk_.shape[-2] // xq_.shape[-2]
|
214 |
+
if freqs_cis.is_cuda:
|
215 |
+
freqs_cis = freqs_cis.repeat(*([1] * (freqs_cis.ndim - 2)), r, 1)
|
216 |
+
else:
|
217 |
+
# torch.repeat on complex numbers may not be supported on non-CUDA devices
|
218 |
+
# (freqs_cis has 4 dims and we repeat on dim 2) so we use expand + flatten
|
219 |
+
freqs_cis = freqs_cis.unsqueeze(2).expand(-1, -1, r, -1, -1).flatten(2, 3)
|
220 |
+
xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(3)
|
221 |
+
return xq_out.type_as(xq).to(xq.device), xk_out.type_as(xk).to(xk.device)
|
eval/grounded_sam/sam2/modeling/sam/__init__.py
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
eval/grounded_sam/sam2/modeling/sam/mask_decoder.py
ADDED
@@ -0,0 +1,295 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
from typing import List, Optional, Tuple, Type
|
8 |
+
|
9 |
+
import torch
|
10 |
+
from torch import nn
|
11 |
+
|
12 |
+
from sam2.modeling.sam2_utils import LayerNorm2d, MLP
|
13 |
+
|
14 |
+
|
15 |
+
class MaskDecoder(nn.Module):
|
16 |
+
def __init__(
|
17 |
+
self,
|
18 |
+
*,
|
19 |
+
transformer_dim: int,
|
20 |
+
transformer: nn.Module,
|
21 |
+
num_multimask_outputs: int = 3,
|
22 |
+
activation: Type[nn.Module] = nn.GELU,
|
23 |
+
iou_head_depth: int = 3,
|
24 |
+
iou_head_hidden_dim: int = 256,
|
25 |
+
use_high_res_features: bool = False,
|
26 |
+
iou_prediction_use_sigmoid=False,
|
27 |
+
dynamic_multimask_via_stability=False,
|
28 |
+
dynamic_multimask_stability_delta=0.05,
|
29 |
+
dynamic_multimask_stability_thresh=0.98,
|
30 |
+
pred_obj_scores: bool = False,
|
31 |
+
pred_obj_scores_mlp: bool = False,
|
32 |
+
use_multimask_token_for_obj_ptr: bool = False,
|
33 |
+
) -> None:
|
34 |
+
"""
|
35 |
+
Predicts masks given an image and prompt embeddings, using a
|
36 |
+
transformer architecture.
|
37 |
+
|
38 |
+
Arguments:
|
39 |
+
transformer_dim (int): the channel dimension of the transformer
|
40 |
+
transformer (nn.Module): the transformer used to predict masks
|
41 |
+
num_multimask_outputs (int): the number of masks to predict
|
42 |
+
when disambiguating masks
|
43 |
+
activation (nn.Module): the type of activation to use when
|
44 |
+
upscaling masks
|
45 |
+
iou_head_depth (int): the depth of the MLP used to predict
|
46 |
+
mask quality
|
47 |
+
iou_head_hidden_dim (int): the hidden dimension of the MLP
|
48 |
+
used to predict mask quality
|
49 |
+
"""
|
50 |
+
super().__init__()
|
51 |
+
self.transformer_dim = transformer_dim
|
52 |
+
self.transformer = transformer
|
53 |
+
|
54 |
+
self.num_multimask_outputs = num_multimask_outputs
|
55 |
+
|
56 |
+
self.iou_token = nn.Embedding(1, transformer_dim)
|
57 |
+
self.num_mask_tokens = num_multimask_outputs + 1
|
58 |
+
self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim)
|
59 |
+
|
60 |
+
self.pred_obj_scores = pred_obj_scores
|
61 |
+
if self.pred_obj_scores:
|
62 |
+
self.obj_score_token = nn.Embedding(1, transformer_dim)
|
63 |
+
self.use_multimask_token_for_obj_ptr = use_multimask_token_for_obj_ptr
|
64 |
+
|
65 |
+
self.output_upscaling = nn.Sequential(
|
66 |
+
nn.ConvTranspose2d(
|
67 |
+
transformer_dim, transformer_dim // 4, kernel_size=2, stride=2
|
68 |
+
),
|
69 |
+
LayerNorm2d(transformer_dim // 4),
|
70 |
+
activation(),
|
71 |
+
nn.ConvTranspose2d(
|
72 |
+
transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2
|
73 |
+
),
|
74 |
+
activation(),
|
75 |
+
)
|
76 |
+
self.use_high_res_features = use_high_res_features
|
77 |
+
if use_high_res_features:
|
78 |
+
self.conv_s0 = nn.Conv2d(
|
79 |
+
transformer_dim, transformer_dim // 8, kernel_size=1, stride=1
|
80 |
+
)
|
81 |
+
self.conv_s1 = nn.Conv2d(
|
82 |
+
transformer_dim, transformer_dim // 4, kernel_size=1, stride=1
|
83 |
+
)
|
84 |
+
|
85 |
+
self.output_hypernetworks_mlps = nn.ModuleList(
|
86 |
+
[
|
87 |
+
MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3)
|
88 |
+
for i in range(self.num_mask_tokens)
|
89 |
+
]
|
90 |
+
)
|
91 |
+
|
92 |
+
self.iou_prediction_head = MLP(
|
93 |
+
transformer_dim,
|
94 |
+
iou_head_hidden_dim,
|
95 |
+
self.num_mask_tokens,
|
96 |
+
iou_head_depth,
|
97 |
+
sigmoid_output=iou_prediction_use_sigmoid,
|
98 |
+
)
|
99 |
+
if self.pred_obj_scores:
|
100 |
+
self.pred_obj_score_head = nn.Linear(transformer_dim, 1)
|
101 |
+
if pred_obj_scores_mlp:
|
102 |
+
self.pred_obj_score_head = MLP(transformer_dim, transformer_dim, 1, 3)
|
103 |
+
|
104 |
+
# When outputting a single mask, optionally we can dynamically fall back to the best
|
105 |
+
# multimask output token if the single mask output token gives low stability scores.
|
106 |
+
self.dynamic_multimask_via_stability = dynamic_multimask_via_stability
|
107 |
+
self.dynamic_multimask_stability_delta = dynamic_multimask_stability_delta
|
108 |
+
self.dynamic_multimask_stability_thresh = dynamic_multimask_stability_thresh
|
109 |
+
|
110 |
+
def forward(
|
111 |
+
self,
|
112 |
+
image_embeddings: torch.Tensor,
|
113 |
+
image_pe: torch.Tensor,
|
114 |
+
sparse_prompt_embeddings: torch.Tensor,
|
115 |
+
dense_prompt_embeddings: torch.Tensor,
|
116 |
+
multimask_output: bool,
|
117 |
+
repeat_image: bool,
|
118 |
+
high_res_features: Optional[List[torch.Tensor]] = None,
|
119 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
120 |
+
"""
|
121 |
+
Predict masks given image and prompt embeddings.
|
122 |
+
|
123 |
+
Arguments:
|
124 |
+
image_embeddings (torch.Tensor): the embeddings from the image encoder
|
125 |
+
image_pe (torch.Tensor): positional encoding with the shape of image_embeddings
|
126 |
+
sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes
|
127 |
+
dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs
|
128 |
+
multimask_output (bool): Whether to return multiple masks or a single
|
129 |
+
mask.
|
130 |
+
|
131 |
+
Returns:
|
132 |
+
torch.Tensor: batched predicted masks
|
133 |
+
torch.Tensor: batched predictions of mask quality
|
134 |
+
torch.Tensor: batched SAM token for mask output
|
135 |
+
"""
|
136 |
+
masks, iou_pred, mask_tokens_out, object_score_logits = self.predict_masks(
|
137 |
+
image_embeddings=image_embeddings,
|
138 |
+
image_pe=image_pe,
|
139 |
+
sparse_prompt_embeddings=sparse_prompt_embeddings,
|
140 |
+
dense_prompt_embeddings=dense_prompt_embeddings,
|
141 |
+
repeat_image=repeat_image,
|
142 |
+
high_res_features=high_res_features,
|
143 |
+
)
|
144 |
+
|
145 |
+
# Select the correct mask or masks for output
|
146 |
+
if multimask_output:
|
147 |
+
masks = masks[:, 1:, :, :]
|
148 |
+
iou_pred = iou_pred[:, 1:]
|
149 |
+
elif self.dynamic_multimask_via_stability and not self.training:
|
150 |
+
masks, iou_pred = self._dynamic_multimask_via_stability(masks, iou_pred)
|
151 |
+
else:
|
152 |
+
masks = masks[:, 0:1, :, :]
|
153 |
+
iou_pred = iou_pred[:, 0:1]
|
154 |
+
|
155 |
+
if multimask_output and self.use_multimask_token_for_obj_ptr:
|
156 |
+
sam_tokens_out = mask_tokens_out[:, 1:] # [b, 3, c] shape
|
157 |
+
else:
|
158 |
+
# Take the mask output token. Here we *always* use the token for single mask output.
|
159 |
+
# At test time, even if we track after 1-click (and using multimask_output=True),
|
160 |
+
# we still take the single mask token here. The rationale is that we always track
|
161 |
+
# after multiple clicks during training, so the past tokens seen during training
|
162 |
+
# are always the single mask token (and we'll let it be the object-memory token).
|
163 |
+
sam_tokens_out = mask_tokens_out[:, 0:1] # [b, 1, c] shape
|
164 |
+
|
165 |
+
# Prepare output
|
166 |
+
return masks, iou_pred, sam_tokens_out, object_score_logits
|
167 |
+
|
168 |
+
def predict_masks(
|
169 |
+
self,
|
170 |
+
image_embeddings: torch.Tensor,
|
171 |
+
image_pe: torch.Tensor,
|
172 |
+
sparse_prompt_embeddings: torch.Tensor,
|
173 |
+
dense_prompt_embeddings: torch.Tensor,
|
174 |
+
repeat_image: bool,
|
175 |
+
high_res_features: Optional[List[torch.Tensor]] = None,
|
176 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
177 |
+
"""Predicts masks. See 'forward' for more details."""
|
178 |
+
# Concatenate output tokens
|
179 |
+
s = 0
|
180 |
+
if self.pred_obj_scores:
|
181 |
+
output_tokens = torch.cat(
|
182 |
+
[
|
183 |
+
self.obj_score_token.weight,
|
184 |
+
self.iou_token.weight,
|
185 |
+
self.mask_tokens.weight,
|
186 |
+
],
|
187 |
+
dim=0,
|
188 |
+
)
|
189 |
+
s = 1
|
190 |
+
else:
|
191 |
+
output_tokens = torch.cat(
|
192 |
+
[self.iou_token.weight, self.mask_tokens.weight], dim=0
|
193 |
+
)
|
194 |
+
output_tokens = output_tokens.unsqueeze(0).expand(
|
195 |
+
sparse_prompt_embeddings.size(0), -1, -1
|
196 |
+
)
|
197 |
+
tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1)
|
198 |
+
|
199 |
+
# Expand per-image data in batch direction to be per-mask
|
200 |
+
if repeat_image:
|
201 |
+
src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0)
|
202 |
+
else:
|
203 |
+
assert image_embeddings.shape[0] == tokens.shape[0]
|
204 |
+
src = image_embeddings
|
205 |
+
src = src + dense_prompt_embeddings
|
206 |
+
assert (
|
207 |
+
image_pe.size(0) == 1
|
208 |
+
), "image_pe should have size 1 in batch dim (from `get_dense_pe()`)"
|
209 |
+
pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0)
|
210 |
+
b, c, h, w = src.shape
|
211 |
+
|
212 |
+
# Run the transformer
|
213 |
+
hs, src = self.transformer(src, pos_src, tokens)
|
214 |
+
iou_token_out = hs[:, s, :]
|
215 |
+
mask_tokens_out = hs[:, s + 1 : (s + 1 + self.num_mask_tokens), :]
|
216 |
+
|
217 |
+
# Upscale mask embeddings and predict masks using the mask tokens
|
218 |
+
src = src.transpose(1, 2).view(b, c, h, w)
|
219 |
+
if not self.use_high_res_features:
|
220 |
+
upscaled_embedding = self.output_upscaling(src)
|
221 |
+
else:
|
222 |
+
dc1, ln1, act1, dc2, act2 = self.output_upscaling
|
223 |
+
feat_s0, feat_s1 = high_res_features
|
224 |
+
upscaled_embedding = act1(ln1(dc1(src) + feat_s1))
|
225 |
+
upscaled_embedding = act2(dc2(upscaled_embedding) + feat_s0)
|
226 |
+
|
227 |
+
hyper_in_list: List[torch.Tensor] = []
|
228 |
+
for i in range(self.num_mask_tokens):
|
229 |
+
hyper_in_list.append(
|
230 |
+
self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :])
|
231 |
+
)
|
232 |
+
hyper_in = torch.stack(hyper_in_list, dim=1)
|
233 |
+
b, c, h, w = upscaled_embedding.shape
|
234 |
+
masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w)
|
235 |
+
|
236 |
+
# Generate mask quality predictions
|
237 |
+
iou_pred = self.iou_prediction_head(iou_token_out)
|
238 |
+
if self.pred_obj_scores:
|
239 |
+
assert s == 1
|
240 |
+
object_score_logits = self.pred_obj_score_head(hs[:, 0, :])
|
241 |
+
else:
|
242 |
+
# Obj scores logits - default to 10.0, i.e. assuming the object is present, sigmoid(10)=1
|
243 |
+
object_score_logits = 10.0 * iou_pred.new_ones(iou_pred.shape[0], 1)
|
244 |
+
|
245 |
+
return masks, iou_pred, mask_tokens_out, object_score_logits
|
246 |
+
|
247 |
+
def _get_stability_scores(self, mask_logits):
|
248 |
+
"""
|
249 |
+
Compute stability scores of the mask logits based on the IoU between upper and
|
250 |
+
lower thresholds.
|
251 |
+
"""
|
252 |
+
mask_logits = mask_logits.flatten(-2)
|
253 |
+
stability_delta = self.dynamic_multimask_stability_delta
|
254 |
+
area_i = torch.sum(mask_logits > stability_delta, dim=-1).float()
|
255 |
+
area_u = torch.sum(mask_logits > -stability_delta, dim=-1).float()
|
256 |
+
stability_scores = torch.where(area_u > 0, area_i / area_u, 1.0)
|
257 |
+
return stability_scores
|
258 |
+
|
259 |
+
def _dynamic_multimask_via_stability(self, all_mask_logits, all_iou_scores):
|
260 |
+
"""
|
261 |
+
When outputting a single mask, if the stability score from the current single-mask
|
262 |
+
output (based on output token 0) falls below a threshold, we instead select from
|
263 |
+
multi-mask outputs (based on output token 1~3) the mask with the highest predicted
|
264 |
+
IoU score. This is intended to ensure a valid mask for both clicking and tracking.
|
265 |
+
"""
|
266 |
+
# The best mask from multimask output tokens (1~3)
|
267 |
+
multimask_logits = all_mask_logits[:, 1:, :, :]
|
268 |
+
multimask_iou_scores = all_iou_scores[:, 1:]
|
269 |
+
best_scores_inds = torch.argmax(multimask_iou_scores, dim=-1)
|
270 |
+
batch_inds = torch.arange(
|
271 |
+
multimask_iou_scores.size(0), device=all_iou_scores.device
|
272 |
+
)
|
273 |
+
best_multimask_logits = multimask_logits[batch_inds, best_scores_inds]
|
274 |
+
best_multimask_logits = best_multimask_logits.unsqueeze(1)
|
275 |
+
best_multimask_iou_scores = multimask_iou_scores[batch_inds, best_scores_inds]
|
276 |
+
best_multimask_iou_scores = best_multimask_iou_scores.unsqueeze(1)
|
277 |
+
|
278 |
+
# The mask from singlemask output token 0 and its stability score
|
279 |
+
singlemask_logits = all_mask_logits[:, 0:1, :, :]
|
280 |
+
singlemask_iou_scores = all_iou_scores[:, 0:1]
|
281 |
+
stability_scores = self._get_stability_scores(singlemask_logits)
|
282 |
+
is_stable = stability_scores >= self.dynamic_multimask_stability_thresh
|
283 |
+
|
284 |
+
# Dynamically fall back to best multimask output upon low stability scores.
|
285 |
+
mask_logits_out = torch.where(
|
286 |
+
is_stable[..., None, None].expand_as(singlemask_logits),
|
287 |
+
singlemask_logits,
|
288 |
+
best_multimask_logits,
|
289 |
+
)
|
290 |
+
iou_scores_out = torch.where(
|
291 |
+
is_stable.expand_as(singlemask_iou_scores),
|
292 |
+
singlemask_iou_scores,
|
293 |
+
best_multimask_iou_scores,
|
294 |
+
)
|
295 |
+
return mask_logits_out, iou_scores_out
|
eval/grounded_sam/sam2/modeling/sam/prompt_encoder.py
ADDED
@@ -0,0 +1,182 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
from typing import Optional, Tuple, Type
|
8 |
+
|
9 |
+
import torch
|
10 |
+
from torch import nn
|
11 |
+
|
12 |
+
from sam2.modeling.position_encoding import PositionEmbeddingRandom
|
13 |
+
|
14 |
+
from sam2.modeling.sam2_utils import LayerNorm2d
|
15 |
+
|
16 |
+
|
17 |
+
class PromptEncoder(nn.Module):
|
18 |
+
def __init__(
|
19 |
+
self,
|
20 |
+
embed_dim: int,
|
21 |
+
image_embedding_size: Tuple[int, int],
|
22 |
+
input_image_size: Tuple[int, int],
|
23 |
+
mask_in_chans: int,
|
24 |
+
activation: Type[nn.Module] = nn.GELU,
|
25 |
+
) -> None:
|
26 |
+
"""
|
27 |
+
Encodes prompts for input to SAM's mask decoder.
|
28 |
+
|
29 |
+
Arguments:
|
30 |
+
embed_dim (int): The prompts' embedding dimension
|
31 |
+
image_embedding_size (tuple(int, int)): The spatial size of the
|
32 |
+
image embedding, as (H, W).
|
33 |
+
input_image_size (int): The padded size of the image as input
|
34 |
+
to the image encoder, as (H, W).
|
35 |
+
mask_in_chans (int): The number of hidden channels used for
|
36 |
+
encoding input masks.
|
37 |
+
activation (nn.Module): The activation to use when encoding
|
38 |
+
input masks.
|
39 |
+
"""
|
40 |
+
super().__init__()
|
41 |
+
self.embed_dim = embed_dim
|
42 |
+
self.input_image_size = input_image_size
|
43 |
+
self.image_embedding_size = image_embedding_size
|
44 |
+
self.pe_layer = PositionEmbeddingRandom(embed_dim // 2)
|
45 |
+
|
46 |
+
self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners
|
47 |
+
point_embeddings = [
|
48 |
+
nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings)
|
49 |
+
]
|
50 |
+
self.point_embeddings = nn.ModuleList(point_embeddings)
|
51 |
+
self.not_a_point_embed = nn.Embedding(1, embed_dim)
|
52 |
+
|
53 |
+
self.mask_input_size = (
|
54 |
+
4 * image_embedding_size[0],
|
55 |
+
4 * image_embedding_size[1],
|
56 |
+
)
|
57 |
+
self.mask_downscaling = nn.Sequential(
|
58 |
+
nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2),
|
59 |
+
LayerNorm2d(mask_in_chans // 4),
|
60 |
+
activation(),
|
61 |
+
nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2),
|
62 |
+
LayerNorm2d(mask_in_chans),
|
63 |
+
activation(),
|
64 |
+
nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1),
|
65 |
+
)
|
66 |
+
self.no_mask_embed = nn.Embedding(1, embed_dim)
|
67 |
+
|
68 |
+
def get_dense_pe(self) -> torch.Tensor:
|
69 |
+
"""
|
70 |
+
Returns the positional encoding used to encode point prompts,
|
71 |
+
applied to a dense set of points the shape of the image encoding.
|
72 |
+
|
73 |
+
Returns:
|
74 |
+
torch.Tensor: Positional encoding with shape
|
75 |
+
1x(embed_dim)x(embedding_h)x(embedding_w)
|
76 |
+
"""
|
77 |
+
return self.pe_layer(self.image_embedding_size).unsqueeze(0)
|
78 |
+
|
79 |
+
def _embed_points(
|
80 |
+
self,
|
81 |
+
points: torch.Tensor,
|
82 |
+
labels: torch.Tensor,
|
83 |
+
pad: bool,
|
84 |
+
) -> torch.Tensor:
|
85 |
+
"""Embeds point prompts."""
|
86 |
+
points = points + 0.5 # Shift to center of pixel
|
87 |
+
if pad:
|
88 |
+
padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device)
|
89 |
+
padding_label = -torch.ones((labels.shape[0], 1), device=labels.device)
|
90 |
+
points = torch.cat([points, padding_point], dim=1)
|
91 |
+
labels = torch.cat([labels, padding_label], dim=1)
|
92 |
+
point_embedding = self.pe_layer.forward_with_coords(
|
93 |
+
points, self.input_image_size
|
94 |
+
)
|
95 |
+
point_embedding[labels == -1] = 0.0
|
96 |
+
point_embedding[labels == -1] += self.not_a_point_embed.weight
|
97 |
+
point_embedding[labels == 0] += self.point_embeddings[0].weight
|
98 |
+
point_embedding[labels == 1] += self.point_embeddings[1].weight
|
99 |
+
point_embedding[labels == 2] += self.point_embeddings[2].weight
|
100 |
+
point_embedding[labels == 3] += self.point_embeddings[3].weight
|
101 |
+
return point_embedding
|
102 |
+
|
103 |
+
def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:
|
104 |
+
"""Embeds box prompts."""
|
105 |
+
boxes = boxes + 0.5 # Shift to center of pixel
|
106 |
+
coords = boxes.reshape(-1, 2, 2)
|
107 |
+
corner_embedding = self.pe_layer.forward_with_coords(
|
108 |
+
coords, self.input_image_size
|
109 |
+
)
|
110 |
+
corner_embedding[:, 0, :] += self.point_embeddings[2].weight
|
111 |
+
corner_embedding[:, 1, :] += self.point_embeddings[3].weight
|
112 |
+
return corner_embedding
|
113 |
+
|
114 |
+
def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor:
|
115 |
+
"""Embeds mask inputs."""
|
116 |
+
mask_embedding = self.mask_downscaling(masks)
|
117 |
+
return mask_embedding
|
118 |
+
|
119 |
+
def _get_batch_size(
|
120 |
+
self,
|
121 |
+
points: Optional[Tuple[torch.Tensor, torch.Tensor]],
|
122 |
+
boxes: Optional[torch.Tensor],
|
123 |
+
masks: Optional[torch.Tensor],
|
124 |
+
) -> int:
|
125 |
+
"""
|
126 |
+
Gets the batch size of the output given the batch size of the input prompts.
|
127 |
+
"""
|
128 |
+
if points is not None:
|
129 |
+
return points[0].shape[0]
|
130 |
+
elif boxes is not None:
|
131 |
+
return boxes.shape[0]
|
132 |
+
elif masks is not None:
|
133 |
+
return masks.shape[0]
|
134 |
+
else:
|
135 |
+
return 1
|
136 |
+
|
137 |
+
def _get_device(self) -> torch.device:
|
138 |
+
return self.point_embeddings[0].weight.device
|
139 |
+
|
140 |
+
def forward(
|
141 |
+
self,
|
142 |
+
points: Optional[Tuple[torch.Tensor, torch.Tensor]],
|
143 |
+
boxes: Optional[torch.Tensor],
|
144 |
+
masks: Optional[torch.Tensor],
|
145 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
146 |
+
"""
|
147 |
+
Embeds different types of prompts, returning both sparse and dense
|
148 |
+
embeddings.
|
149 |
+
|
150 |
+
Arguments:
|
151 |
+
points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates
|
152 |
+
and labels to embed.
|
153 |
+
boxes (torch.Tensor or none): boxes to embed
|
154 |
+
masks (torch.Tensor or none): masks to embed
|
155 |
+
|
156 |
+
Returns:
|
157 |
+
torch.Tensor: sparse embeddings for the points and boxes, with shape
|
158 |
+
BxNx(embed_dim), where N is determined by the number of input points
|
159 |
+
and boxes.
|
160 |
+
torch.Tensor: dense embeddings for the masks, in the shape
|
161 |
+
Bx(embed_dim)x(embed_H)x(embed_W)
|
162 |
+
"""
|
163 |
+
bs = self._get_batch_size(points, boxes, masks)
|
164 |
+
sparse_embeddings = torch.empty(
|
165 |
+
(bs, 0, self.embed_dim), device=self._get_device()
|
166 |
+
)
|
167 |
+
if points is not None:
|
168 |
+
coords, labels = points
|
169 |
+
point_embeddings = self._embed_points(coords, labels, pad=(boxes is None))
|
170 |
+
sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1)
|
171 |
+
if boxes is not None:
|
172 |
+
box_embeddings = self._embed_boxes(boxes)
|
173 |
+
sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1)
|
174 |
+
|
175 |
+
if masks is not None:
|
176 |
+
dense_embeddings = self._embed_masks(masks)
|
177 |
+
else:
|
178 |
+
dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(
|
179 |
+
bs, -1, self.image_embedding_size[0], self.image_embedding_size[1]
|
180 |
+
)
|
181 |
+
|
182 |
+
return sparse_embeddings, dense_embeddings
|
eval/grounded_sam/sam2/modeling/sam/transformer.py
ADDED
@@ -0,0 +1,360 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
import contextlib
|
8 |
+
import math
|
9 |
+
import warnings
|
10 |
+
from functools import partial
|
11 |
+
from typing import Tuple, Type
|
12 |
+
|
13 |
+
import torch
|
14 |
+
import torch.nn.functional as F
|
15 |
+
from torch import nn, Tensor
|
16 |
+
|
17 |
+
from sam2.modeling.position_encoding import apply_rotary_enc, compute_axial_cis
|
18 |
+
from sam2.modeling.sam2_utils import MLP
|
19 |
+
from sam2.utils.misc import get_sdpa_settings
|
20 |
+
|
21 |
+
warnings.simplefilter(action="ignore", category=FutureWarning)
|
22 |
+
# Check whether Flash Attention is available (and use it by default)
|
23 |
+
OLD_GPU, USE_FLASH_ATTN, MATH_KERNEL_ON = get_sdpa_settings()
|
24 |
+
# A fallback setting to allow all available kernels if Flash Attention fails
|
25 |
+
ALLOW_ALL_KERNELS = False
|
26 |
+
|
27 |
+
|
28 |
+
def sdp_kernel_context(dropout_p):
|
29 |
+
"""
|
30 |
+
Get the context for the attention scaled dot-product kernel. We use Flash Attention
|
31 |
+
by default, but fall back to all available kernels if Flash Attention fails.
|
32 |
+
"""
|
33 |
+
if ALLOW_ALL_KERNELS:
|
34 |
+
return contextlib.nullcontext()
|
35 |
+
|
36 |
+
return torch.backends.cuda.sdp_kernel(
|
37 |
+
enable_flash=USE_FLASH_ATTN,
|
38 |
+
# if Flash attention kernel is off, then math kernel needs to be enabled
|
39 |
+
enable_math=(OLD_GPU and dropout_p > 0.0) or MATH_KERNEL_ON,
|
40 |
+
enable_mem_efficient=OLD_GPU,
|
41 |
+
)
|
42 |
+
|
43 |
+
|
44 |
+
class TwoWayTransformer(nn.Module):
|
45 |
+
def __init__(
|
46 |
+
self,
|
47 |
+
depth: int,
|
48 |
+
embedding_dim: int,
|
49 |
+
num_heads: int,
|
50 |
+
mlp_dim: int,
|
51 |
+
activation: Type[nn.Module] = nn.ReLU,
|
52 |
+
attention_downsample_rate: int = 2,
|
53 |
+
) -> None:
|
54 |
+
"""
|
55 |
+
A transformer decoder that attends to an input image using
|
56 |
+
queries whose positional embedding is supplied.
|
57 |
+
|
58 |
+
Args:
|
59 |
+
depth (int): number of layers in the transformer
|
60 |
+
embedding_dim (int): the channel dimension for the input embeddings
|
61 |
+
num_heads (int): the number of heads for multihead attention. Must
|
62 |
+
divide embedding_dim
|
63 |
+
mlp_dim (int): the channel dimension internal to the MLP block
|
64 |
+
activation (nn.Module): the activation to use in the MLP block
|
65 |
+
"""
|
66 |
+
super().__init__()
|
67 |
+
self.depth = depth
|
68 |
+
self.embedding_dim = embedding_dim
|
69 |
+
self.num_heads = num_heads
|
70 |
+
self.mlp_dim = mlp_dim
|
71 |
+
self.layers = nn.ModuleList()
|
72 |
+
|
73 |
+
for i in range(depth):
|
74 |
+
self.layers.append(
|
75 |
+
TwoWayAttentionBlock(
|
76 |
+
embedding_dim=embedding_dim,
|
77 |
+
num_heads=num_heads,
|
78 |
+
mlp_dim=mlp_dim,
|
79 |
+
activation=activation,
|
80 |
+
attention_downsample_rate=attention_downsample_rate,
|
81 |
+
skip_first_layer_pe=(i == 0),
|
82 |
+
)
|
83 |
+
)
|
84 |
+
|
85 |
+
self.final_attn_token_to_image = Attention(
|
86 |
+
embedding_dim, num_heads, downsample_rate=attention_downsample_rate
|
87 |
+
)
|
88 |
+
self.norm_final_attn = nn.LayerNorm(embedding_dim)
|
89 |
+
|
90 |
+
def forward(
|
91 |
+
self,
|
92 |
+
image_embedding: Tensor,
|
93 |
+
image_pe: Tensor,
|
94 |
+
point_embedding: Tensor,
|
95 |
+
) -> Tuple[Tensor, Tensor]:
|
96 |
+
"""
|
97 |
+
Args:
|
98 |
+
image_embedding (torch.Tensor): image to attend to. Should be shape
|
99 |
+
B x embedding_dim x h x w for any h and w.
|
100 |
+
image_pe (torch.Tensor): the positional encoding to add to the image. Must
|
101 |
+
have the same shape as image_embedding.
|
102 |
+
point_embedding (torch.Tensor): the embedding to add to the query points.
|
103 |
+
Must have shape B x N_points x embedding_dim for any N_points.
|
104 |
+
|
105 |
+
Returns:
|
106 |
+
torch.Tensor: the processed point_embedding
|
107 |
+
torch.Tensor: the processed image_embedding
|
108 |
+
"""
|
109 |
+
# BxCxHxW -> BxHWxC == B x N_image_tokens x C
|
110 |
+
bs, c, h, w = image_embedding.shape
|
111 |
+
image_embedding = image_embedding.flatten(2).permute(0, 2, 1)
|
112 |
+
image_pe = image_pe.flatten(2).permute(0, 2, 1)
|
113 |
+
|
114 |
+
# Prepare queries
|
115 |
+
queries = point_embedding
|
116 |
+
keys = image_embedding
|
117 |
+
|
118 |
+
# Apply transformer blocks and final layernorm
|
119 |
+
for layer in self.layers:
|
120 |
+
queries, keys = layer(
|
121 |
+
queries=queries,
|
122 |
+
keys=keys,
|
123 |
+
query_pe=point_embedding,
|
124 |
+
key_pe=image_pe,
|
125 |
+
)
|
126 |
+
|
127 |
+
# Apply the final attention layer from the points to the image
|
128 |
+
q = queries + point_embedding
|
129 |
+
k = keys + image_pe
|
130 |
+
attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys)
|
131 |
+
queries = queries + attn_out
|
132 |
+
queries = self.norm_final_attn(queries)
|
133 |
+
|
134 |
+
return queries, keys
|
135 |
+
|
136 |
+
|
137 |
+
class TwoWayAttentionBlock(nn.Module):
|
138 |
+
def __init__(
|
139 |
+
self,
|
140 |
+
embedding_dim: int,
|
141 |
+
num_heads: int,
|
142 |
+
mlp_dim: int = 2048,
|
143 |
+
activation: Type[nn.Module] = nn.ReLU,
|
144 |
+
attention_downsample_rate: int = 2,
|
145 |
+
skip_first_layer_pe: bool = False,
|
146 |
+
) -> None:
|
147 |
+
"""
|
148 |
+
A transformer block with four layers: (1) self-attention of sparse
|
149 |
+
inputs, (2) cross attention of sparse inputs to dense inputs, (3) mlp
|
150 |
+
block on sparse inputs, and (4) cross attention of dense inputs to sparse
|
151 |
+
inputs.
|
152 |
+
|
153 |
+
Arguments:
|
154 |
+
embedding_dim (int): the channel dimension of the embeddings
|
155 |
+
num_heads (int): the number of heads in the attention layers
|
156 |
+
mlp_dim (int): the hidden dimension of the mlp block
|
157 |
+
activation (nn.Module): the activation of the mlp block
|
158 |
+
skip_first_layer_pe (bool): skip the PE on the first layer
|
159 |
+
"""
|
160 |
+
super().__init__()
|
161 |
+
self.self_attn = Attention(embedding_dim, num_heads)
|
162 |
+
self.norm1 = nn.LayerNorm(embedding_dim)
|
163 |
+
|
164 |
+
self.cross_attn_token_to_image = Attention(
|
165 |
+
embedding_dim, num_heads, downsample_rate=attention_downsample_rate
|
166 |
+
)
|
167 |
+
self.norm2 = nn.LayerNorm(embedding_dim)
|
168 |
+
|
169 |
+
self.mlp = MLP(
|
170 |
+
embedding_dim, mlp_dim, embedding_dim, num_layers=2, activation=activation
|
171 |
+
)
|
172 |
+
self.norm3 = nn.LayerNorm(embedding_dim)
|
173 |
+
|
174 |
+
self.norm4 = nn.LayerNorm(embedding_dim)
|
175 |
+
self.cross_attn_image_to_token = Attention(
|
176 |
+
embedding_dim, num_heads, downsample_rate=attention_downsample_rate
|
177 |
+
)
|
178 |
+
|
179 |
+
self.skip_first_layer_pe = skip_first_layer_pe
|
180 |
+
|
181 |
+
def forward(
|
182 |
+
self, queries: Tensor, keys: Tensor, query_pe: Tensor, key_pe: Tensor
|
183 |
+
) -> Tuple[Tensor, Tensor]:
|
184 |
+
# Self attention block
|
185 |
+
if self.skip_first_layer_pe:
|
186 |
+
queries = self.self_attn(q=queries, k=queries, v=queries)
|
187 |
+
else:
|
188 |
+
q = queries + query_pe
|
189 |
+
attn_out = self.self_attn(q=q, k=q, v=queries)
|
190 |
+
queries = queries + attn_out
|
191 |
+
queries = self.norm1(queries)
|
192 |
+
|
193 |
+
# Cross attention block, tokens attending to image embedding
|
194 |
+
q = queries + query_pe
|
195 |
+
k = keys + key_pe
|
196 |
+
attn_out = self.cross_attn_token_to_image(q=q, k=k, v=keys)
|
197 |
+
queries = queries + attn_out
|
198 |
+
queries = self.norm2(queries)
|
199 |
+
|
200 |
+
# MLP block
|
201 |
+
mlp_out = self.mlp(queries)
|
202 |
+
queries = queries + mlp_out
|
203 |
+
queries = self.norm3(queries)
|
204 |
+
|
205 |
+
# Cross attention block, image embedding attending to tokens
|
206 |
+
q = queries + query_pe
|
207 |
+
k = keys + key_pe
|
208 |
+
attn_out = self.cross_attn_image_to_token(q=k, k=q, v=queries)
|
209 |
+
keys = keys + attn_out
|
210 |
+
keys = self.norm4(keys)
|
211 |
+
|
212 |
+
return queries, keys
|
213 |
+
|
214 |
+
|
215 |
+
class Attention(nn.Module):
|
216 |
+
"""
|
217 |
+
An attention layer that allows for downscaling the size of the embedding
|
218 |
+
after projection to queries, keys, and values.
|
219 |
+
"""
|
220 |
+
|
221 |
+
def __init__(
|
222 |
+
self,
|
223 |
+
embedding_dim: int,
|
224 |
+
num_heads: int,
|
225 |
+
downsample_rate: int = 1,
|
226 |
+
dropout: float = 0.0,
|
227 |
+
kv_in_dim: int = None,
|
228 |
+
) -> None:
|
229 |
+
super().__init__()
|
230 |
+
self.embedding_dim = embedding_dim
|
231 |
+
self.kv_in_dim = kv_in_dim if kv_in_dim is not None else embedding_dim
|
232 |
+
self.internal_dim = embedding_dim // downsample_rate
|
233 |
+
self.num_heads = num_heads
|
234 |
+
assert (
|
235 |
+
self.internal_dim % num_heads == 0
|
236 |
+
), "num_heads must divide embedding_dim."
|
237 |
+
|
238 |
+
self.q_proj = nn.Linear(embedding_dim, self.internal_dim)
|
239 |
+
self.k_proj = nn.Linear(self.kv_in_dim, self.internal_dim)
|
240 |
+
self.v_proj = nn.Linear(self.kv_in_dim, self.internal_dim)
|
241 |
+
self.out_proj = nn.Linear(self.internal_dim, embedding_dim)
|
242 |
+
|
243 |
+
self.dropout_p = dropout
|
244 |
+
|
245 |
+
def _separate_heads(self, x: Tensor, num_heads: int) -> Tensor:
|
246 |
+
b, n, c = x.shape
|
247 |
+
x = x.reshape(b, n, num_heads, c // num_heads)
|
248 |
+
return x.transpose(1, 2) # B x N_heads x N_tokens x C_per_head
|
249 |
+
|
250 |
+
def _recombine_heads(self, x: Tensor) -> Tensor:
|
251 |
+
b, n_heads, n_tokens, c_per_head = x.shape
|
252 |
+
x = x.transpose(1, 2)
|
253 |
+
return x.reshape(b, n_tokens, n_heads * c_per_head) # B x N_tokens x C
|
254 |
+
|
255 |
+
def forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tensor:
|
256 |
+
# Input projections
|
257 |
+
q = self.q_proj(q)
|
258 |
+
k = self.k_proj(k)
|
259 |
+
v = self.v_proj(v)
|
260 |
+
|
261 |
+
# Separate into heads
|
262 |
+
q = self._separate_heads(q, self.num_heads)
|
263 |
+
k = self._separate_heads(k, self.num_heads)
|
264 |
+
v = self._separate_heads(v, self.num_heads)
|
265 |
+
|
266 |
+
dropout_p = self.dropout_p if self.training else 0.0
|
267 |
+
# Attention
|
268 |
+
try:
|
269 |
+
with sdp_kernel_context(dropout_p):
|
270 |
+
out = F.scaled_dot_product_attention(q, k, v, dropout_p=dropout_p)
|
271 |
+
except Exception as e:
|
272 |
+
# Fall back to all kernels if the Flash attention kernel fails
|
273 |
+
warnings.warn(
|
274 |
+
f"Flash Attention kernel failed due to: {e}\nFalling back to all available "
|
275 |
+
f"kernels for scaled_dot_product_attention (which may have a slower speed).",
|
276 |
+
category=UserWarning,
|
277 |
+
stacklevel=2,
|
278 |
+
)
|
279 |
+
global ALLOW_ALL_KERNELS
|
280 |
+
ALLOW_ALL_KERNELS = True
|
281 |
+
out = F.scaled_dot_product_attention(q, k, v, dropout_p=dropout_p)
|
282 |
+
|
283 |
+
out = self._recombine_heads(out)
|
284 |
+
out = self.out_proj(out)
|
285 |
+
|
286 |
+
return out
|
287 |
+
|
288 |
+
|
289 |
+
class RoPEAttention(Attention):
|
290 |
+
"""Attention with rotary position encoding."""
|
291 |
+
|
292 |
+
def __init__(
|
293 |
+
self,
|
294 |
+
*args,
|
295 |
+
rope_theta=10000.0,
|
296 |
+
# whether to repeat q rope to match k length
|
297 |
+
# this is needed for cross-attention to memories
|
298 |
+
rope_k_repeat=False,
|
299 |
+
feat_sizes=(32, 32), # [w, h] for stride 16 feats at 512 resolution
|
300 |
+
**kwargs,
|
301 |
+
):
|
302 |
+
super().__init__(*args, **kwargs)
|
303 |
+
|
304 |
+
self.compute_cis = partial(
|
305 |
+
compute_axial_cis, dim=self.internal_dim // self.num_heads, theta=rope_theta
|
306 |
+
)
|
307 |
+
freqs_cis = self.compute_cis(end_x=feat_sizes[0], end_y=feat_sizes[1])
|
308 |
+
self.freqs_cis = freqs_cis
|
309 |
+
self.rope_k_repeat = rope_k_repeat
|
310 |
+
|
311 |
+
def forward(
|
312 |
+
self, q: Tensor, k: Tensor, v: Tensor, num_k_exclude_rope: int = 0
|
313 |
+
) -> Tensor:
|
314 |
+
# Input projections
|
315 |
+
q = self.q_proj(q)
|
316 |
+
k = self.k_proj(k)
|
317 |
+
v = self.v_proj(v)
|
318 |
+
|
319 |
+
# Separate into heads
|
320 |
+
q = self._separate_heads(q, self.num_heads)
|
321 |
+
k = self._separate_heads(k, self.num_heads)
|
322 |
+
v = self._separate_heads(v, self.num_heads)
|
323 |
+
|
324 |
+
# Apply rotary position encoding
|
325 |
+
w = h = math.sqrt(q.shape[-2])
|
326 |
+
self.freqs_cis = self.freqs_cis.to(q.device)
|
327 |
+
if self.freqs_cis.shape[0] != q.shape[-2]:
|
328 |
+
self.freqs_cis = self.compute_cis(end_x=w, end_y=h).to(q.device)
|
329 |
+
if q.shape[-2] != k.shape[-2]:
|
330 |
+
assert self.rope_k_repeat
|
331 |
+
|
332 |
+
num_k_rope = k.size(-2) - num_k_exclude_rope
|
333 |
+
q, k[:, :, :num_k_rope] = apply_rotary_enc(
|
334 |
+
q,
|
335 |
+
k[:, :, :num_k_rope],
|
336 |
+
freqs_cis=self.freqs_cis,
|
337 |
+
repeat_freqs_k=self.rope_k_repeat,
|
338 |
+
)
|
339 |
+
|
340 |
+
dropout_p = self.dropout_p if self.training else 0.0
|
341 |
+
# Attention
|
342 |
+
try:
|
343 |
+
with sdp_kernel_context(dropout_p):
|
344 |
+
out = F.scaled_dot_product_attention(q, k, v, dropout_p=dropout_p)
|
345 |
+
except Exception as e:
|
346 |
+
# Fall back to all kernels if the Flash attention kernel fails
|
347 |
+
warnings.warn(
|
348 |
+
f"Flash Attention kernel failed due to: {e}\nFalling back to all available "
|
349 |
+
f"kernels for scaled_dot_product_attention (which may have a slower speed).",
|
350 |
+
category=UserWarning,
|
351 |
+
stacklevel=2,
|
352 |
+
)
|
353 |
+
global ALLOW_ALL_KERNELS
|
354 |
+
ALLOW_ALL_KERNELS = True
|
355 |
+
out = F.scaled_dot_product_attention(q, k, v, dropout_p=dropout_p)
|
356 |
+
|
357 |
+
out = self._recombine_heads(out)
|
358 |
+
out = self.out_proj(out)
|
359 |
+
|
360 |
+
return out
|
eval/grounded_sam/sam2/modeling/sam2_base.py
ADDED
@@ -0,0 +1,908 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
import torch
|
8 |
+
import torch.distributed
|
9 |
+
import torch.nn.functional as F
|
10 |
+
|
11 |
+
from torch.nn.init import trunc_normal_
|
12 |
+
|
13 |
+
from sam2.modeling.sam.mask_decoder import MaskDecoder
|
14 |
+
from sam2.modeling.sam.prompt_encoder import PromptEncoder
|
15 |
+
from sam2.modeling.sam.transformer import TwoWayTransformer
|
16 |
+
from sam2.modeling.sam2_utils import get_1d_sine_pe, MLP, select_closest_cond_frames
|
17 |
+
|
18 |
+
# a large negative value as a placeholder score for missing objects
|
19 |
+
NO_OBJ_SCORE = -1024.0
|
20 |
+
|
21 |
+
|
22 |
+
class SAM2Base(torch.nn.Module):
|
23 |
+
def __init__(
|
24 |
+
self,
|
25 |
+
image_encoder,
|
26 |
+
memory_attention,
|
27 |
+
memory_encoder,
|
28 |
+
num_maskmem=7, # default 1 input frame + 6 previous frames
|
29 |
+
image_size=512,
|
30 |
+
backbone_stride=16, # stride of the image backbone output
|
31 |
+
sigmoid_scale_for_mem_enc=1.0, # scale factor for mask sigmoid prob
|
32 |
+
sigmoid_bias_for_mem_enc=0.0, # bias factor for mask sigmoid prob
|
33 |
+
# During evaluation, whether to binarize the sigmoid mask logits on interacted frames with clicks
|
34 |
+
binarize_mask_from_pts_for_mem_enc=False,
|
35 |
+
use_mask_input_as_output_without_sam=False, # on frames with mask input, whether to directly output the input mask without using a SAM prompt encoder + mask decoder
|
36 |
+
# The maximum number of conditioning frames to participate in the memory attention (-1 means no limit; if there are more conditioning frames than this limit,
|
37 |
+
# we only cross-attend to the temporally closest `max_cond_frames_in_attn` conditioning frames in the encoder when tracking each frame). This gives the model
|
38 |
+
# a temporal locality when handling a large number of annotated frames (since closer frames should be more important) and also avoids GPU OOM.
|
39 |
+
max_cond_frames_in_attn=-1,
|
40 |
+
# on the first frame, whether to directly add the no-memory embedding to the image feature
|
41 |
+
# (instead of using the transformer encoder)
|
42 |
+
directly_add_no_mem_embed=False,
|
43 |
+
# whether to use high-resolution feature maps in the SAM mask decoder
|
44 |
+
use_high_res_features_in_sam=False,
|
45 |
+
# whether to output multiple (3) masks for the first click on initial conditioning frames
|
46 |
+
multimask_output_in_sam=False,
|
47 |
+
# the minimum and maximum number of clicks to use multimask_output_in_sam (only relevant when `multimask_output_in_sam=True`;
|
48 |
+
# default is 1 for both, meaning that only the first click gives multimask output; also note that a box counts as two points)
|
49 |
+
multimask_min_pt_num=1,
|
50 |
+
multimask_max_pt_num=1,
|
51 |
+
# whether to also use multimask output for tracking (not just for the first click on initial conditioning frames; only relevant when `multimask_output_in_sam=True`)
|
52 |
+
multimask_output_for_tracking=False,
|
53 |
+
# Whether to use multimask tokens for obj ptr; Only relevant when both
|
54 |
+
# use_obj_ptrs_in_encoder=True and multimask_output_for_tracking=True
|
55 |
+
use_multimask_token_for_obj_ptr: bool = False,
|
56 |
+
# whether to use sigmoid to restrict ious prediction to [0-1]
|
57 |
+
iou_prediction_use_sigmoid=False,
|
58 |
+
# The memory bank's temporal stride during evaluation (i.e. the `r` parameter in XMem and Cutie; XMem and Cutie use r=5).
|
59 |
+
# For r>1, the (self.num_maskmem - 1) non-conditioning memory frames consist of
|
60 |
+
# (self.num_maskmem - 2) nearest frames from every r-th frames, plus the last frame.
|
61 |
+
memory_temporal_stride_for_eval=1,
|
62 |
+
# whether to apply non-overlapping constraints on the object masks in the memory encoder during evaluation (to avoid/alleviate superposing masks)
|
63 |
+
non_overlap_masks_for_mem_enc=False,
|
64 |
+
# whether to cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
|
65 |
+
use_obj_ptrs_in_encoder=False,
|
66 |
+
# the maximum number of object pointers from other frames in encoder cross attention (only relevant when `use_obj_ptrs_in_encoder=True`)
|
67 |
+
max_obj_ptrs_in_encoder=16,
|
68 |
+
# whether to add temporal positional encoding to the object pointers in the encoder (only relevant when `use_obj_ptrs_in_encoder=True`)
|
69 |
+
add_tpos_enc_to_obj_ptrs=True,
|
70 |
+
# whether to add an extra linear projection layer for the temporal positional encoding in the object pointers to avoid potential interference
|
71 |
+
# with spatial positional encoding (only relevant when both `use_obj_ptrs_in_encoder=True` and `add_tpos_enc_to_obj_ptrs=True`)
|
72 |
+
proj_tpos_enc_in_obj_ptrs=False,
|
73 |
+
# whether to use signed distance (instead of unsigned absolute distance) in the temporal positional encoding in the object pointers
|
74 |
+
# (only relevant when both `use_obj_ptrs_in_encoder=True` and `add_tpos_enc_to_obj_ptrs=True`)
|
75 |
+
use_signed_tpos_enc_to_obj_ptrs=False,
|
76 |
+
# whether to only attend to object pointers in the past (before the current frame) in the encoder during evaluation
|
77 |
+
# (only relevant when `use_obj_ptrs_in_encoder=True`; this might avoid pointer information too far in the future to distract the initial tracking)
|
78 |
+
only_obj_ptrs_in_the_past_for_eval=False,
|
79 |
+
# Whether to predict if there is an object in the frame
|
80 |
+
pred_obj_scores: bool = False,
|
81 |
+
# Whether to use an MLP to predict object scores
|
82 |
+
pred_obj_scores_mlp: bool = False,
|
83 |
+
# Only relevant if pred_obj_scores=True and use_obj_ptrs_in_encoder=True;
|
84 |
+
# Whether to have a fixed no obj pointer when there is no object present
|
85 |
+
# or to use it as an additive embedding with obj_ptr produced by decoder
|
86 |
+
fixed_no_obj_ptr: bool = False,
|
87 |
+
# Soft no object, i.e. mix in no_obj_ptr softly,
|
88 |
+
# hope to make recovery easier if there is a mistake and mitigate accumulation of errors
|
89 |
+
soft_no_obj_ptr: bool = False,
|
90 |
+
use_mlp_for_obj_ptr_proj: bool = False,
|
91 |
+
# add no obj embedding to spatial frames
|
92 |
+
no_obj_embed_spatial: bool = False,
|
93 |
+
# extra arguments used to construct the SAM mask decoder; if not None, it should be a dict of kwargs to be passed into `MaskDecoder` class.
|
94 |
+
sam_mask_decoder_extra_args=None,
|
95 |
+
compile_image_encoder: bool = False,
|
96 |
+
):
|
97 |
+
super().__init__()
|
98 |
+
|
99 |
+
# Part 1: the image backbone
|
100 |
+
self.image_encoder = image_encoder
|
101 |
+
# Use level 0, 1, 2 for high-res setting, or just level 2 for the default setting
|
102 |
+
self.use_high_res_features_in_sam = use_high_res_features_in_sam
|
103 |
+
self.num_feature_levels = 3 if use_high_res_features_in_sam else 1
|
104 |
+
self.use_obj_ptrs_in_encoder = use_obj_ptrs_in_encoder
|
105 |
+
self.max_obj_ptrs_in_encoder = max_obj_ptrs_in_encoder
|
106 |
+
if use_obj_ptrs_in_encoder:
|
107 |
+
# A conv layer to downsample the mask prompt to stride 4 (the same stride as
|
108 |
+
# low-res SAM mask logits) and to change its scales from 0~1 to SAM logit scale,
|
109 |
+
# so that it can be fed into the SAM mask decoder to generate a pointer.
|
110 |
+
self.mask_downsample = torch.nn.Conv2d(1, 1, kernel_size=4, stride=4)
|
111 |
+
self.add_tpos_enc_to_obj_ptrs = add_tpos_enc_to_obj_ptrs
|
112 |
+
if proj_tpos_enc_in_obj_ptrs:
|
113 |
+
assert add_tpos_enc_to_obj_ptrs # these options need to be used together
|
114 |
+
self.proj_tpos_enc_in_obj_ptrs = proj_tpos_enc_in_obj_ptrs
|
115 |
+
self.use_signed_tpos_enc_to_obj_ptrs = use_signed_tpos_enc_to_obj_ptrs
|
116 |
+
self.only_obj_ptrs_in_the_past_for_eval = only_obj_ptrs_in_the_past_for_eval
|
117 |
+
|
118 |
+
# Part 2: memory attention to condition current frame's visual features
|
119 |
+
# with memories (and obj ptrs) from past frames
|
120 |
+
self.memory_attention = memory_attention
|
121 |
+
self.hidden_dim = image_encoder.neck.d_model
|
122 |
+
|
123 |
+
# Part 3: memory encoder for the previous frame's outputs
|
124 |
+
self.memory_encoder = memory_encoder
|
125 |
+
self.mem_dim = self.hidden_dim
|
126 |
+
if hasattr(self.memory_encoder, "out_proj") and hasattr(
|
127 |
+
self.memory_encoder.out_proj, "weight"
|
128 |
+
):
|
129 |
+
# if there is compression of memories along channel dim
|
130 |
+
self.mem_dim = self.memory_encoder.out_proj.weight.shape[0]
|
131 |
+
self.num_maskmem = num_maskmem # Number of memories accessible
|
132 |
+
# Temporal encoding of the memories
|
133 |
+
self.maskmem_tpos_enc = torch.nn.Parameter(
|
134 |
+
torch.zeros(num_maskmem, 1, 1, self.mem_dim)
|
135 |
+
)
|
136 |
+
trunc_normal_(self.maskmem_tpos_enc, std=0.02)
|
137 |
+
# a single token to indicate no memory embedding from previous frames
|
138 |
+
self.no_mem_embed = torch.nn.Parameter(torch.zeros(1, 1, self.hidden_dim))
|
139 |
+
self.no_mem_pos_enc = torch.nn.Parameter(torch.zeros(1, 1, self.hidden_dim))
|
140 |
+
trunc_normal_(self.no_mem_embed, std=0.02)
|
141 |
+
trunc_normal_(self.no_mem_pos_enc, std=0.02)
|
142 |
+
self.directly_add_no_mem_embed = directly_add_no_mem_embed
|
143 |
+
# Apply sigmoid to the output raw mask logits (to turn them from
|
144 |
+
# range (-inf, +inf) to range (0, 1)) before feeding them into the memory encoder
|
145 |
+
self.sigmoid_scale_for_mem_enc = sigmoid_scale_for_mem_enc
|
146 |
+
self.sigmoid_bias_for_mem_enc = sigmoid_bias_for_mem_enc
|
147 |
+
self.binarize_mask_from_pts_for_mem_enc = binarize_mask_from_pts_for_mem_enc
|
148 |
+
self.non_overlap_masks_for_mem_enc = non_overlap_masks_for_mem_enc
|
149 |
+
self.memory_temporal_stride_for_eval = memory_temporal_stride_for_eval
|
150 |
+
# On frames with mask input, whether to directly output the input mask without
|
151 |
+
# using a SAM prompt encoder + mask decoder
|
152 |
+
self.use_mask_input_as_output_without_sam = use_mask_input_as_output_without_sam
|
153 |
+
self.multimask_output_in_sam = multimask_output_in_sam
|
154 |
+
self.multimask_min_pt_num = multimask_min_pt_num
|
155 |
+
self.multimask_max_pt_num = multimask_max_pt_num
|
156 |
+
self.multimask_output_for_tracking = multimask_output_for_tracking
|
157 |
+
self.use_multimask_token_for_obj_ptr = use_multimask_token_for_obj_ptr
|
158 |
+
self.iou_prediction_use_sigmoid = iou_prediction_use_sigmoid
|
159 |
+
|
160 |
+
# Part 4: SAM-style prompt encoder (for both mask and point inputs)
|
161 |
+
# and SAM-style mask decoder for the final mask output
|
162 |
+
self.image_size = image_size
|
163 |
+
self.backbone_stride = backbone_stride
|
164 |
+
self.sam_mask_decoder_extra_args = sam_mask_decoder_extra_args
|
165 |
+
self.pred_obj_scores = pred_obj_scores
|
166 |
+
self.pred_obj_scores_mlp = pred_obj_scores_mlp
|
167 |
+
self.fixed_no_obj_ptr = fixed_no_obj_ptr
|
168 |
+
self.soft_no_obj_ptr = soft_no_obj_ptr
|
169 |
+
if self.fixed_no_obj_ptr:
|
170 |
+
assert self.pred_obj_scores
|
171 |
+
assert self.use_obj_ptrs_in_encoder
|
172 |
+
if self.pred_obj_scores and self.use_obj_ptrs_in_encoder:
|
173 |
+
self.no_obj_ptr = torch.nn.Parameter(torch.zeros(1, self.hidden_dim))
|
174 |
+
trunc_normal_(self.no_obj_ptr, std=0.02)
|
175 |
+
self.use_mlp_for_obj_ptr_proj = use_mlp_for_obj_ptr_proj
|
176 |
+
self.no_obj_embed_spatial = None
|
177 |
+
if no_obj_embed_spatial:
|
178 |
+
self.no_obj_embed_spatial = torch.nn.Parameter(torch.zeros(1, self.mem_dim))
|
179 |
+
trunc_normal_(self.no_obj_embed_spatial, std=0.02)
|
180 |
+
|
181 |
+
self._build_sam_heads()
|
182 |
+
self.max_cond_frames_in_attn = max_cond_frames_in_attn
|
183 |
+
|
184 |
+
# Model compilation
|
185 |
+
if compile_image_encoder:
|
186 |
+
# Compile the forward function (not the full module) to allow loading checkpoints.
|
187 |
+
print(
|
188 |
+
"Image encoder compilation is enabled. First forward pass will be slow."
|
189 |
+
)
|
190 |
+
self.image_encoder.forward = torch.compile(
|
191 |
+
self.image_encoder.forward,
|
192 |
+
mode="max-autotune",
|
193 |
+
fullgraph=True,
|
194 |
+
dynamic=False,
|
195 |
+
)
|
196 |
+
|
197 |
+
@property
|
198 |
+
def device(self):
|
199 |
+
return next(self.parameters()).device
|
200 |
+
|
201 |
+
def forward(self, *args, **kwargs):
|
202 |
+
raise NotImplementedError(
|
203 |
+
"Please use the corresponding methods in SAM2VideoPredictor for inference or SAM2Train for training/fine-tuning"
|
204 |
+
"See notebooks/video_predictor_example.ipynb for an inference example."
|
205 |
+
)
|
206 |
+
|
207 |
+
def _build_sam_heads(self):
|
208 |
+
"""Build SAM-style prompt encoder and mask decoder."""
|
209 |
+
self.sam_prompt_embed_dim = self.hidden_dim
|
210 |
+
self.sam_image_embedding_size = self.image_size // self.backbone_stride
|
211 |
+
|
212 |
+
# build PromptEncoder and MaskDecoder from SAM
|
213 |
+
# (their hyperparameters like `mask_in_chans=16` are from SAM code)
|
214 |
+
self.sam_prompt_encoder = PromptEncoder(
|
215 |
+
embed_dim=self.sam_prompt_embed_dim,
|
216 |
+
image_embedding_size=(
|
217 |
+
self.sam_image_embedding_size,
|
218 |
+
self.sam_image_embedding_size,
|
219 |
+
),
|
220 |
+
input_image_size=(self.image_size, self.image_size),
|
221 |
+
mask_in_chans=16,
|
222 |
+
)
|
223 |
+
self.sam_mask_decoder = MaskDecoder(
|
224 |
+
num_multimask_outputs=3,
|
225 |
+
transformer=TwoWayTransformer(
|
226 |
+
depth=2,
|
227 |
+
embedding_dim=self.sam_prompt_embed_dim,
|
228 |
+
mlp_dim=2048,
|
229 |
+
num_heads=8,
|
230 |
+
),
|
231 |
+
transformer_dim=self.sam_prompt_embed_dim,
|
232 |
+
iou_head_depth=3,
|
233 |
+
iou_head_hidden_dim=256,
|
234 |
+
use_high_res_features=self.use_high_res_features_in_sam,
|
235 |
+
iou_prediction_use_sigmoid=self.iou_prediction_use_sigmoid,
|
236 |
+
pred_obj_scores=self.pred_obj_scores,
|
237 |
+
pred_obj_scores_mlp=self.pred_obj_scores_mlp,
|
238 |
+
use_multimask_token_for_obj_ptr=self.use_multimask_token_for_obj_ptr,
|
239 |
+
**(self.sam_mask_decoder_extra_args or {}),
|
240 |
+
)
|
241 |
+
if self.use_obj_ptrs_in_encoder:
|
242 |
+
# a linear projection on SAM output tokens to turn them into object pointers
|
243 |
+
self.obj_ptr_proj = torch.nn.Linear(self.hidden_dim, self.hidden_dim)
|
244 |
+
if self.use_mlp_for_obj_ptr_proj:
|
245 |
+
self.obj_ptr_proj = MLP(
|
246 |
+
self.hidden_dim, self.hidden_dim, self.hidden_dim, 3
|
247 |
+
)
|
248 |
+
else:
|
249 |
+
self.obj_ptr_proj = torch.nn.Identity()
|
250 |
+
if self.proj_tpos_enc_in_obj_ptrs:
|
251 |
+
# a linear projection on temporal positional encoding in object pointers to
|
252 |
+
# avoid potential interference with spatial positional encoding
|
253 |
+
self.obj_ptr_tpos_proj = torch.nn.Linear(self.hidden_dim, self.mem_dim)
|
254 |
+
else:
|
255 |
+
self.obj_ptr_tpos_proj = torch.nn.Identity()
|
256 |
+
|
257 |
+
def _forward_sam_heads(
|
258 |
+
self,
|
259 |
+
backbone_features,
|
260 |
+
point_inputs=None,
|
261 |
+
mask_inputs=None,
|
262 |
+
high_res_features=None,
|
263 |
+
multimask_output=False,
|
264 |
+
):
|
265 |
+
"""
|
266 |
+
Forward SAM prompt encoders and mask heads.
|
267 |
+
|
268 |
+
Inputs:
|
269 |
+
- backbone_features: image features of [B, C, H, W] shape
|
270 |
+
- point_inputs: a dictionary with "point_coords" and "point_labels", where
|
271 |
+
1) "point_coords" has [B, P, 2] shape and float32 dtype and contains the
|
272 |
+
absolute pixel-unit coordinate in (x, y) format of the P input points
|
273 |
+
2) "point_labels" has shape [B, P] and int32 dtype, where 1 means
|
274 |
+
positive clicks, 0 means negative clicks, and -1 means padding
|
275 |
+
- mask_inputs: a mask of [B, 1, H*16, W*16] shape, float or bool, with the
|
276 |
+
same spatial size as the image.
|
277 |
+
- high_res_features: either 1) None or 2) or a list of length 2 containing
|
278 |
+
two feature maps of [B, C, 4*H, 4*W] and [B, C, 2*H, 2*W] shapes respectively,
|
279 |
+
which will be used as high-resolution feature maps for SAM decoder.
|
280 |
+
- multimask_output: if it's True, we output 3 candidate masks and their 3
|
281 |
+
corresponding IoU estimates, and if it's False, we output only 1 mask and
|
282 |
+
its corresponding IoU estimate.
|
283 |
+
|
284 |
+
Outputs:
|
285 |
+
- low_res_multimasks: [B, M, H*4, W*4] shape (where M = 3 if
|
286 |
+
`multimask_output=True` and M = 1 if `multimask_output=False`), the SAM
|
287 |
+
output mask logits (before sigmoid) for the low-resolution masks, with 4x
|
288 |
+
the resolution (1/4 stride) of the input backbone_features.
|
289 |
+
- high_res_multimasks: [B, M, H*16, W*16] shape (where M = 3
|
290 |
+
if `multimask_output=True` and M = 1 if `multimask_output=False`),
|
291 |
+
upsampled from the low-resolution masks, with shape size as the image
|
292 |
+
(stride is 1 pixel).
|
293 |
+
- ious, [B, M] shape, where (where M = 3 if `multimask_output=True` and M = 1
|
294 |
+
if `multimask_output=False`), the estimated IoU of each output mask.
|
295 |
+
- low_res_masks: [B, 1, H*4, W*4] shape, the best mask in `low_res_multimasks`.
|
296 |
+
If `multimask_output=True`, it's the mask with the highest IoU estimate.
|
297 |
+
If `multimask_output=False`, it's the same as `low_res_multimasks`.
|
298 |
+
- high_res_masks: [B, 1, H*16, W*16] shape, the best mask in `high_res_multimasks`.
|
299 |
+
If `multimask_output=True`, it's the mask with the highest IoU estimate.
|
300 |
+
If `multimask_output=False`, it's the same as `high_res_multimasks`.
|
301 |
+
- obj_ptr: [B, C] shape, the object pointer vector for the output mask, extracted
|
302 |
+
based on the output token from the SAM mask decoder.
|
303 |
+
"""
|
304 |
+
B = backbone_features.size(0)
|
305 |
+
device = backbone_features.device
|
306 |
+
assert backbone_features.size(1) == self.sam_prompt_embed_dim
|
307 |
+
assert backbone_features.size(2) == self.sam_image_embedding_size
|
308 |
+
assert backbone_features.size(3) == self.sam_image_embedding_size
|
309 |
+
|
310 |
+
# a) Handle point prompts
|
311 |
+
if point_inputs is not None:
|
312 |
+
sam_point_coords = point_inputs["point_coords"]
|
313 |
+
sam_point_labels = point_inputs["point_labels"]
|
314 |
+
assert sam_point_coords.size(0) == B and sam_point_labels.size(0) == B
|
315 |
+
else:
|
316 |
+
# If no points are provide, pad with an empty point (with label -1)
|
317 |
+
sam_point_coords = torch.zeros(B, 1, 2, device=device)
|
318 |
+
sam_point_labels = -torch.ones(B, 1, dtype=torch.int32, device=device)
|
319 |
+
|
320 |
+
# b) Handle mask prompts
|
321 |
+
if mask_inputs is not None:
|
322 |
+
# If mask_inputs is provided, downsize it into low-res mask input if needed
|
323 |
+
# and feed it as a dense mask prompt into the SAM mask encoder
|
324 |
+
assert len(mask_inputs.shape) == 4 and mask_inputs.shape[:2] == (B, 1)
|
325 |
+
if mask_inputs.shape[-2:] != self.sam_prompt_encoder.mask_input_size:
|
326 |
+
sam_mask_prompt = F.interpolate(
|
327 |
+
mask_inputs.float(),
|
328 |
+
size=self.sam_prompt_encoder.mask_input_size,
|
329 |
+
align_corners=False,
|
330 |
+
mode="bilinear",
|
331 |
+
antialias=True, # use antialias for downsampling
|
332 |
+
)
|
333 |
+
else:
|
334 |
+
sam_mask_prompt = mask_inputs
|
335 |
+
else:
|
336 |
+
# Otherwise, simply feed None (and SAM's prompt encoder will add
|
337 |
+
# a learned `no_mask_embed` to indicate no mask input in this case).
|
338 |
+
sam_mask_prompt = None
|
339 |
+
|
340 |
+
sparse_embeddings, dense_embeddings = self.sam_prompt_encoder(
|
341 |
+
points=(sam_point_coords, sam_point_labels),
|
342 |
+
boxes=None,
|
343 |
+
masks=sam_mask_prompt,
|
344 |
+
)
|
345 |
+
(
|
346 |
+
low_res_multimasks,
|
347 |
+
ious,
|
348 |
+
sam_output_tokens,
|
349 |
+
object_score_logits,
|
350 |
+
) = self.sam_mask_decoder(
|
351 |
+
image_embeddings=backbone_features,
|
352 |
+
image_pe=self.sam_prompt_encoder.get_dense_pe(),
|
353 |
+
sparse_prompt_embeddings=sparse_embeddings,
|
354 |
+
dense_prompt_embeddings=dense_embeddings,
|
355 |
+
multimask_output=multimask_output,
|
356 |
+
repeat_image=False, # the image is already batched
|
357 |
+
high_res_features=high_res_features,
|
358 |
+
)
|
359 |
+
if self.pred_obj_scores:
|
360 |
+
is_obj_appearing = object_score_logits > 0
|
361 |
+
|
362 |
+
# Mask used for spatial memories is always a *hard* choice between obj and no obj,
|
363 |
+
# consistent with the actual mask prediction
|
364 |
+
low_res_multimasks = torch.where(
|
365 |
+
is_obj_appearing[:, None, None],
|
366 |
+
low_res_multimasks,
|
367 |
+
NO_OBJ_SCORE,
|
368 |
+
)
|
369 |
+
|
370 |
+
# convert masks from possibly bfloat16 (or float16) to float32
|
371 |
+
# (older PyTorch versions before 2.1 don't support `interpolate` on bf16)
|
372 |
+
low_res_multimasks = low_res_multimasks.float()
|
373 |
+
high_res_multimasks = F.interpolate(
|
374 |
+
low_res_multimasks,
|
375 |
+
size=(self.image_size, self.image_size),
|
376 |
+
mode="bilinear",
|
377 |
+
align_corners=False,
|
378 |
+
)
|
379 |
+
|
380 |
+
sam_output_token = sam_output_tokens[:, 0]
|
381 |
+
if multimask_output:
|
382 |
+
# take the best mask prediction (with the highest IoU estimation)
|
383 |
+
best_iou_inds = torch.argmax(ious, dim=-1)
|
384 |
+
batch_inds = torch.arange(B, device=device)
|
385 |
+
low_res_masks = low_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1)
|
386 |
+
high_res_masks = high_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1)
|
387 |
+
if sam_output_tokens.size(1) > 1:
|
388 |
+
sam_output_token = sam_output_tokens[batch_inds, best_iou_inds]
|
389 |
+
else:
|
390 |
+
low_res_masks, high_res_masks = low_res_multimasks, high_res_multimasks
|
391 |
+
|
392 |
+
# Extract object pointer from the SAM output token (with occlusion handling)
|
393 |
+
obj_ptr = self.obj_ptr_proj(sam_output_token)
|
394 |
+
if self.pred_obj_scores:
|
395 |
+
# Allow *soft* no obj ptr, unlike for masks
|
396 |
+
if self.soft_no_obj_ptr:
|
397 |
+
lambda_is_obj_appearing = object_score_logits.sigmoid()
|
398 |
+
else:
|
399 |
+
lambda_is_obj_appearing = is_obj_appearing.float()
|
400 |
+
|
401 |
+
if self.fixed_no_obj_ptr:
|
402 |
+
obj_ptr = lambda_is_obj_appearing * obj_ptr
|
403 |
+
obj_ptr = obj_ptr + (1 - lambda_is_obj_appearing) * self.no_obj_ptr
|
404 |
+
|
405 |
+
return (
|
406 |
+
low_res_multimasks,
|
407 |
+
high_res_multimasks,
|
408 |
+
ious,
|
409 |
+
low_res_masks,
|
410 |
+
high_res_masks,
|
411 |
+
obj_ptr,
|
412 |
+
object_score_logits,
|
413 |
+
)
|
414 |
+
|
415 |
+
def _use_mask_as_output(self, backbone_features, high_res_features, mask_inputs):
|
416 |
+
"""
|
417 |
+
Directly turn binary `mask_inputs` into a output mask logits without using SAM.
|
418 |
+
(same input and output shapes as in _forward_sam_heads above).
|
419 |
+
"""
|
420 |
+
# Use -10/+10 as logits for neg/pos pixels (very close to 0/1 in prob after sigmoid).
|
421 |
+
out_scale, out_bias = 20.0, -10.0 # sigmoid(-10.0)=4.5398e-05
|
422 |
+
mask_inputs_float = mask_inputs.float()
|
423 |
+
high_res_masks = mask_inputs_float * out_scale + out_bias
|
424 |
+
low_res_masks = F.interpolate(
|
425 |
+
high_res_masks,
|
426 |
+
size=(high_res_masks.size(-2) // 4, high_res_masks.size(-1) // 4),
|
427 |
+
align_corners=False,
|
428 |
+
mode="bilinear",
|
429 |
+
antialias=True, # use antialias for downsampling
|
430 |
+
)
|
431 |
+
# a dummy IoU prediction of all 1's under mask input
|
432 |
+
ious = mask_inputs.new_ones(mask_inputs.size(0), 1).float()
|
433 |
+
if not self.use_obj_ptrs_in_encoder:
|
434 |
+
# all zeros as a dummy object pointer (of shape [B, C])
|
435 |
+
obj_ptr = torch.zeros(
|
436 |
+
mask_inputs.size(0), self.hidden_dim, device=mask_inputs.device
|
437 |
+
)
|
438 |
+
else:
|
439 |
+
# produce an object pointer using the SAM decoder from the mask input
|
440 |
+
_, _, _, _, _, obj_ptr, _ = self._forward_sam_heads(
|
441 |
+
backbone_features=backbone_features,
|
442 |
+
mask_inputs=self.mask_downsample(mask_inputs_float),
|
443 |
+
high_res_features=high_res_features,
|
444 |
+
)
|
445 |
+
# In this method, we are treating mask_input as output, e.g. using it directly to create spatial mem;
|
446 |
+
# Below, we follow the same design axiom to use mask_input to decide if obj appears or not instead of relying
|
447 |
+
# on the object_scores from the SAM decoder.
|
448 |
+
is_obj_appearing = torch.any(mask_inputs.flatten(1).float() > 0.0, dim=1)
|
449 |
+
is_obj_appearing = is_obj_appearing[..., None]
|
450 |
+
lambda_is_obj_appearing = is_obj_appearing.float()
|
451 |
+
object_score_logits = out_scale * lambda_is_obj_appearing + out_bias
|
452 |
+
if self.pred_obj_scores:
|
453 |
+
if self.fixed_no_obj_ptr:
|
454 |
+
obj_ptr = lambda_is_obj_appearing * obj_ptr
|
455 |
+
obj_ptr = obj_ptr + (1 - lambda_is_obj_appearing) * self.no_obj_ptr
|
456 |
+
|
457 |
+
return (
|
458 |
+
low_res_masks,
|
459 |
+
high_res_masks,
|
460 |
+
ious,
|
461 |
+
low_res_masks,
|
462 |
+
high_res_masks,
|
463 |
+
obj_ptr,
|
464 |
+
object_score_logits,
|
465 |
+
)
|
466 |
+
|
467 |
+
def forward_image(self, img_batch: torch.Tensor):
|
468 |
+
"""Get the image feature on the input batch."""
|
469 |
+
backbone_out = self.image_encoder(img_batch)
|
470 |
+
# print(f"[sam2_base.forward_image] backbone_out.dtype={backbone_out['vision_features'].dtype}")
|
471 |
+
if self.use_high_res_features_in_sam:
|
472 |
+
# precompute projected level 0 and level 1 features in SAM decoder
|
473 |
+
# to avoid running it again on every SAM click
|
474 |
+
backbone_out["backbone_fpn"][0] = self.sam_mask_decoder.conv_s0(
|
475 |
+
backbone_out["backbone_fpn"][0]
|
476 |
+
)
|
477 |
+
backbone_out["backbone_fpn"][1] = self.sam_mask_decoder.conv_s1(
|
478 |
+
backbone_out["backbone_fpn"][1]
|
479 |
+
)
|
480 |
+
return backbone_out
|
481 |
+
|
482 |
+
def _prepare_backbone_features(self, backbone_out):
|
483 |
+
"""Prepare and flatten visual features."""
|
484 |
+
backbone_out = backbone_out.copy()
|
485 |
+
assert len(backbone_out["backbone_fpn"]) == len(backbone_out["vision_pos_enc"])
|
486 |
+
assert len(backbone_out["backbone_fpn"]) >= self.num_feature_levels
|
487 |
+
|
488 |
+
feature_maps = backbone_out["backbone_fpn"][-self.num_feature_levels :]
|
489 |
+
vision_pos_embeds = backbone_out["vision_pos_enc"][-self.num_feature_levels :]
|
490 |
+
|
491 |
+
feat_sizes = [(x.shape[-2], x.shape[-1]) for x in vision_pos_embeds]
|
492 |
+
# flatten NxCxHxW to HWxNxC
|
493 |
+
vision_feats = [x.flatten(2).permute(2, 0, 1) for x in feature_maps]
|
494 |
+
vision_pos_embeds = [x.flatten(2).permute(2, 0, 1) for x in vision_pos_embeds]
|
495 |
+
|
496 |
+
return backbone_out, vision_feats, vision_pos_embeds, feat_sizes
|
497 |
+
|
498 |
+
def _prepare_memory_conditioned_features(
|
499 |
+
self,
|
500 |
+
frame_idx,
|
501 |
+
is_init_cond_frame,
|
502 |
+
current_vision_feats,
|
503 |
+
current_vision_pos_embeds,
|
504 |
+
feat_sizes,
|
505 |
+
output_dict,
|
506 |
+
num_frames,
|
507 |
+
track_in_reverse=False, # tracking in reverse time order (for demo usage)
|
508 |
+
):
|
509 |
+
"""Fuse the current frame's visual feature map with previous memory."""
|
510 |
+
B = current_vision_feats[-1].size(1) # batch size on this frame
|
511 |
+
C = self.hidden_dim
|
512 |
+
H, W = feat_sizes[-1] # top-level (lowest-resolution) feature size
|
513 |
+
device = current_vision_feats[-1].device
|
514 |
+
# The case of `self.num_maskmem == 0` below is primarily used for reproducing SAM on images.
|
515 |
+
# In this case, we skip the fusion with any memory.
|
516 |
+
if self.num_maskmem == 0: # Disable memory and skip fusion
|
517 |
+
pix_feat = current_vision_feats[-1].permute(1, 2, 0).view(B, C, H, W)
|
518 |
+
return pix_feat
|
519 |
+
|
520 |
+
num_obj_ptr_tokens = 0
|
521 |
+
tpos_sign_mul = -1 if track_in_reverse else 1
|
522 |
+
# Step 1: condition the visual features of the current frame on previous memories
|
523 |
+
if not is_init_cond_frame:
|
524 |
+
# Retrieve the memories encoded with the maskmem backbone
|
525 |
+
to_cat_memory, to_cat_memory_pos_embed = [], []
|
526 |
+
# Add conditioning frames's output first (all cond frames have t_pos=0 for
|
527 |
+
# when getting temporal positional embedding below)
|
528 |
+
assert len(output_dict["cond_frame_outputs"]) > 0
|
529 |
+
# Select a maximum number of temporally closest cond frames for cross attention
|
530 |
+
cond_outputs = output_dict["cond_frame_outputs"]
|
531 |
+
selected_cond_outputs, unselected_cond_outputs = select_closest_cond_frames(
|
532 |
+
frame_idx, cond_outputs, self.max_cond_frames_in_attn
|
533 |
+
)
|
534 |
+
t_pos_and_prevs = [(0, out) for out in selected_cond_outputs.values()]
|
535 |
+
# Add last (self.num_maskmem - 1) frames before current frame for non-conditioning memory
|
536 |
+
# the earliest one has t_pos=1 and the latest one has t_pos=self.num_maskmem-1
|
537 |
+
# We also allow taking the memory frame non-consecutively (with stride>1), in which case
|
538 |
+
# we take (self.num_maskmem - 2) frames among every stride-th frames plus the last frame.
|
539 |
+
stride = 1 if self.training else self.memory_temporal_stride_for_eval
|
540 |
+
for t_pos in range(1, self.num_maskmem):
|
541 |
+
t_rel = self.num_maskmem - t_pos # how many frames before current frame
|
542 |
+
if t_rel == 1:
|
543 |
+
# for t_rel == 1, we take the last frame (regardless of r)
|
544 |
+
if not track_in_reverse:
|
545 |
+
# the frame immediately before this frame (i.e. frame_idx - 1)
|
546 |
+
prev_frame_idx = frame_idx - t_rel
|
547 |
+
else:
|
548 |
+
# the frame immediately after this frame (i.e. frame_idx + 1)
|
549 |
+
prev_frame_idx = frame_idx + t_rel
|
550 |
+
else:
|
551 |
+
# for t_rel >= 2, we take the memory frame from every r-th frames
|
552 |
+
if not track_in_reverse:
|
553 |
+
# first find the nearest frame among every r-th frames before this frame
|
554 |
+
# for r=1, this would be (frame_idx - 2)
|
555 |
+
prev_frame_idx = ((frame_idx - 2) // stride) * stride
|
556 |
+
# then seek further among every r-th frames
|
557 |
+
prev_frame_idx = prev_frame_idx - (t_rel - 2) * stride
|
558 |
+
else:
|
559 |
+
# first find the nearest frame among every r-th frames after this frame
|
560 |
+
# for r=1, this would be (frame_idx + 2)
|
561 |
+
prev_frame_idx = -(-(frame_idx + 2) // stride) * stride
|
562 |
+
# then seek further among every r-th frames
|
563 |
+
prev_frame_idx = prev_frame_idx + (t_rel - 2) * stride
|
564 |
+
out = output_dict["non_cond_frame_outputs"].get(prev_frame_idx, None)
|
565 |
+
if out is None:
|
566 |
+
# If an unselected conditioning frame is among the last (self.num_maskmem - 1)
|
567 |
+
# frames, we still attend to it as if it's a non-conditioning frame.
|
568 |
+
out = unselected_cond_outputs.get(prev_frame_idx, None)
|
569 |
+
t_pos_and_prevs.append((t_pos, out))
|
570 |
+
|
571 |
+
for t_pos, prev in t_pos_and_prevs:
|
572 |
+
if prev is None:
|
573 |
+
continue # skip padding frames
|
574 |
+
# "maskmem_features" might have been offloaded to CPU in demo use cases,
|
575 |
+
# so we load it back to GPU (it's a no-op if it's already on GPU).
|
576 |
+
feats = prev["maskmem_features"].to(device, non_blocking=True)
|
577 |
+
to_cat_memory.append(feats.flatten(2).permute(2, 0, 1))
|
578 |
+
# Spatial positional encoding (it might have been offloaded to CPU in eval)
|
579 |
+
maskmem_enc = prev["maskmem_pos_enc"][-1].to(device)
|
580 |
+
maskmem_enc = maskmem_enc.flatten(2).permute(2, 0, 1)
|
581 |
+
# Temporal positional encoding
|
582 |
+
maskmem_enc = (
|
583 |
+
maskmem_enc + self.maskmem_tpos_enc[self.num_maskmem - t_pos - 1]
|
584 |
+
)
|
585 |
+
to_cat_memory_pos_embed.append(maskmem_enc)
|
586 |
+
|
587 |
+
# Construct the list of past object pointers
|
588 |
+
if self.use_obj_ptrs_in_encoder:
|
589 |
+
max_obj_ptrs_in_encoder = min(num_frames, self.max_obj_ptrs_in_encoder)
|
590 |
+
# First add those object pointers from selected conditioning frames
|
591 |
+
# (optionally, only include object pointers in the past during evaluation)
|
592 |
+
if not self.training and self.only_obj_ptrs_in_the_past_for_eval:
|
593 |
+
ptr_cond_outputs = {
|
594 |
+
t: out
|
595 |
+
for t, out in selected_cond_outputs.items()
|
596 |
+
if (t >= frame_idx if track_in_reverse else t <= frame_idx)
|
597 |
+
}
|
598 |
+
else:
|
599 |
+
ptr_cond_outputs = selected_cond_outputs
|
600 |
+
pos_and_ptrs = [
|
601 |
+
# Temporal pos encoding contains how far away each pointer is from current frame
|
602 |
+
(
|
603 |
+
(
|
604 |
+
(frame_idx - t) * tpos_sign_mul
|
605 |
+
if self.use_signed_tpos_enc_to_obj_ptrs
|
606 |
+
else abs(frame_idx - t)
|
607 |
+
),
|
608 |
+
out["obj_ptr"],
|
609 |
+
)
|
610 |
+
for t, out in ptr_cond_outputs.items()
|
611 |
+
]
|
612 |
+
# Add up to (max_obj_ptrs_in_encoder - 1) non-conditioning frames before current frame
|
613 |
+
for t_diff in range(1, max_obj_ptrs_in_encoder):
|
614 |
+
t = frame_idx + t_diff if track_in_reverse else frame_idx - t_diff
|
615 |
+
if t < 0 or (num_frames is not None and t >= num_frames):
|
616 |
+
break
|
617 |
+
out = output_dict["non_cond_frame_outputs"].get(
|
618 |
+
t, unselected_cond_outputs.get(t, None)
|
619 |
+
)
|
620 |
+
if out is not None:
|
621 |
+
pos_and_ptrs.append((t_diff, out["obj_ptr"]))
|
622 |
+
# If we have at least one object pointer, add them to the across attention
|
623 |
+
if len(pos_and_ptrs) > 0:
|
624 |
+
pos_list, ptrs_list = zip(*pos_and_ptrs)
|
625 |
+
# stack object pointers along dim=0 into [ptr_seq_len, B, C] shape
|
626 |
+
obj_ptrs = torch.stack(ptrs_list, dim=0)
|
627 |
+
# a temporal positional embedding based on how far each object pointer is from
|
628 |
+
# the current frame (sine embedding normalized by the max pointer num).
|
629 |
+
if self.add_tpos_enc_to_obj_ptrs:
|
630 |
+
t_diff_max = max_obj_ptrs_in_encoder - 1
|
631 |
+
tpos_dim = C if self.proj_tpos_enc_in_obj_ptrs else self.mem_dim
|
632 |
+
obj_pos = torch.tensor(pos_list, device=device)
|
633 |
+
obj_pos = get_1d_sine_pe(obj_pos / t_diff_max, dim=tpos_dim)
|
634 |
+
obj_pos = self.obj_ptr_tpos_proj(obj_pos)
|
635 |
+
obj_pos = obj_pos.unsqueeze(1).expand(-1, B, self.mem_dim)
|
636 |
+
else:
|
637 |
+
obj_pos = obj_ptrs.new_zeros(len(pos_list), B, self.mem_dim)
|
638 |
+
if self.mem_dim < C:
|
639 |
+
# split a pointer into (C // self.mem_dim) tokens for self.mem_dim < C
|
640 |
+
obj_ptrs = obj_ptrs.reshape(
|
641 |
+
-1, B, C // self.mem_dim, self.mem_dim
|
642 |
+
)
|
643 |
+
obj_ptrs = obj_ptrs.permute(0, 2, 1, 3).flatten(0, 1)
|
644 |
+
obj_pos = obj_pos.repeat_interleave(C // self.mem_dim, dim=0)
|
645 |
+
to_cat_memory.append(obj_ptrs)
|
646 |
+
to_cat_memory_pos_embed.append(obj_pos)
|
647 |
+
num_obj_ptr_tokens = obj_ptrs.shape[0]
|
648 |
+
else:
|
649 |
+
num_obj_ptr_tokens = 0
|
650 |
+
else:
|
651 |
+
# for initial conditioning frames, encode them without using any previous memory
|
652 |
+
if self.directly_add_no_mem_embed:
|
653 |
+
# directly add no-mem embedding (instead of using the transformer encoder)
|
654 |
+
pix_feat_with_mem = current_vision_feats[-1] + self.no_mem_embed
|
655 |
+
pix_feat_with_mem = pix_feat_with_mem.permute(1, 2, 0).view(B, C, H, W)
|
656 |
+
return pix_feat_with_mem
|
657 |
+
|
658 |
+
# Use a dummy token on the first frame (to avoid empty memory input to tranformer encoder)
|
659 |
+
to_cat_memory = [self.no_mem_embed.expand(1, B, self.mem_dim)]
|
660 |
+
to_cat_memory_pos_embed = [self.no_mem_pos_enc.expand(1, B, self.mem_dim)]
|
661 |
+
|
662 |
+
# Step 2: Concatenate the memories and forward through the transformer encoder
|
663 |
+
memory = torch.cat(to_cat_memory, dim=0)
|
664 |
+
memory_pos_embed = torch.cat(to_cat_memory_pos_embed, dim=0)
|
665 |
+
|
666 |
+
pix_feat_with_mem = self.memory_attention(
|
667 |
+
curr=current_vision_feats,
|
668 |
+
curr_pos=current_vision_pos_embeds,
|
669 |
+
memory=memory,
|
670 |
+
memory_pos=memory_pos_embed,
|
671 |
+
num_obj_ptr_tokens=num_obj_ptr_tokens,
|
672 |
+
)
|
673 |
+
# reshape the output (HW)BC => BCHW
|
674 |
+
pix_feat_with_mem = pix_feat_with_mem.permute(1, 2, 0).view(B, C, H, W)
|
675 |
+
return pix_feat_with_mem
|
676 |
+
|
677 |
+
def _encode_new_memory(
|
678 |
+
self,
|
679 |
+
current_vision_feats,
|
680 |
+
feat_sizes,
|
681 |
+
pred_masks_high_res,
|
682 |
+
object_score_logits,
|
683 |
+
is_mask_from_pts,
|
684 |
+
):
|
685 |
+
"""Encode the current image and its prediction into a memory feature."""
|
686 |
+
B = current_vision_feats[-1].size(1) # batch size on this frame
|
687 |
+
C = self.hidden_dim
|
688 |
+
H, W = feat_sizes[-1] # top-level (lowest-resolution) feature size
|
689 |
+
# top-level feature, (HW)BC => BCHW
|
690 |
+
pix_feat = current_vision_feats[-1].permute(1, 2, 0).view(B, C, H, W)
|
691 |
+
if self.non_overlap_masks_for_mem_enc and not self.training:
|
692 |
+
# optionally, apply non-overlapping constraints to the masks (it's applied
|
693 |
+
# in the batch dimension and should only be used during eval, where all
|
694 |
+
# the objects come from the same video under batch size 1).
|
695 |
+
pred_masks_high_res = self._apply_non_overlapping_constraints(
|
696 |
+
pred_masks_high_res
|
697 |
+
)
|
698 |
+
# scale the raw mask logits with a temperature before applying sigmoid
|
699 |
+
binarize = self.binarize_mask_from_pts_for_mem_enc and is_mask_from_pts
|
700 |
+
if binarize and not self.training:
|
701 |
+
mask_for_mem = (pred_masks_high_res > 0).float()
|
702 |
+
else:
|
703 |
+
# apply sigmoid on the raw mask logits to turn them into range (0, 1)
|
704 |
+
mask_for_mem = torch.sigmoid(pred_masks_high_res)
|
705 |
+
# apply scale and bias terms to the sigmoid probabilities
|
706 |
+
if self.sigmoid_scale_for_mem_enc != 1.0:
|
707 |
+
mask_for_mem = mask_for_mem * self.sigmoid_scale_for_mem_enc
|
708 |
+
if self.sigmoid_bias_for_mem_enc != 0.0:
|
709 |
+
mask_for_mem = mask_for_mem + self.sigmoid_bias_for_mem_enc
|
710 |
+
maskmem_out = self.memory_encoder(
|
711 |
+
pix_feat, mask_for_mem, skip_mask_sigmoid=True # sigmoid already applied
|
712 |
+
)
|
713 |
+
maskmem_features = maskmem_out["vision_features"]
|
714 |
+
maskmem_pos_enc = maskmem_out["vision_pos_enc"]
|
715 |
+
# add a no-object embedding to the spatial memory to indicate that the frame
|
716 |
+
# is predicted to be occluded (i.e. no object is appearing in the frame)
|
717 |
+
if self.no_obj_embed_spatial is not None:
|
718 |
+
is_obj_appearing = (object_score_logits > 0).float()
|
719 |
+
maskmem_features += (
|
720 |
+
1 - is_obj_appearing[..., None, None]
|
721 |
+
) * self.no_obj_embed_spatial[..., None, None].expand(
|
722 |
+
*maskmem_features.shape
|
723 |
+
)
|
724 |
+
|
725 |
+
return maskmem_features, maskmem_pos_enc
|
726 |
+
|
727 |
+
def _track_step(
|
728 |
+
self,
|
729 |
+
frame_idx,
|
730 |
+
is_init_cond_frame,
|
731 |
+
current_vision_feats,
|
732 |
+
current_vision_pos_embeds,
|
733 |
+
feat_sizes,
|
734 |
+
point_inputs,
|
735 |
+
mask_inputs,
|
736 |
+
output_dict,
|
737 |
+
num_frames,
|
738 |
+
track_in_reverse,
|
739 |
+
prev_sam_mask_logits,
|
740 |
+
):
|
741 |
+
current_out = {"point_inputs": point_inputs, "mask_inputs": mask_inputs}
|
742 |
+
# High-resolution feature maps for the SAM head, reshape (HW)BC => BCHW
|
743 |
+
if len(current_vision_feats) > 1:
|
744 |
+
high_res_features = [
|
745 |
+
x.permute(1, 2, 0).view(x.size(1), x.size(2), *s)
|
746 |
+
for x, s in zip(current_vision_feats[:-1], feat_sizes[:-1])
|
747 |
+
]
|
748 |
+
else:
|
749 |
+
high_res_features = None
|
750 |
+
if mask_inputs is not None and self.use_mask_input_as_output_without_sam:
|
751 |
+
# When use_mask_input_as_output_without_sam=True, we directly output the mask input
|
752 |
+
# (see it as a GT mask) without using a SAM prompt encoder + mask decoder.
|
753 |
+
pix_feat = current_vision_feats[-1].permute(1, 2, 0)
|
754 |
+
pix_feat = pix_feat.view(-1, self.hidden_dim, *feat_sizes[-1])
|
755 |
+
sam_outputs = self._use_mask_as_output(
|
756 |
+
pix_feat, high_res_features, mask_inputs
|
757 |
+
)
|
758 |
+
else:
|
759 |
+
# fused the visual feature with previous memory features in the memory bank
|
760 |
+
pix_feat = self._prepare_memory_conditioned_features(
|
761 |
+
frame_idx=frame_idx,
|
762 |
+
is_init_cond_frame=is_init_cond_frame,
|
763 |
+
current_vision_feats=current_vision_feats[-1:],
|
764 |
+
current_vision_pos_embeds=current_vision_pos_embeds[-1:],
|
765 |
+
feat_sizes=feat_sizes[-1:],
|
766 |
+
output_dict=output_dict,
|
767 |
+
num_frames=num_frames,
|
768 |
+
track_in_reverse=track_in_reverse,
|
769 |
+
)
|
770 |
+
# apply SAM-style segmentation head
|
771 |
+
# here we might feed previously predicted low-res SAM mask logits into the SAM mask decoder,
|
772 |
+
# e.g. in demo where such logits come from earlier interaction instead of correction sampling
|
773 |
+
# (in this case, any `mask_inputs` shouldn't reach here as they are sent to _use_mask_as_output instead)
|
774 |
+
if prev_sam_mask_logits is not None:
|
775 |
+
assert point_inputs is not None and mask_inputs is None
|
776 |
+
mask_inputs = prev_sam_mask_logits
|
777 |
+
multimask_output = self._use_multimask(is_init_cond_frame, point_inputs)
|
778 |
+
sam_outputs = self._forward_sam_heads(
|
779 |
+
backbone_features=pix_feat,
|
780 |
+
point_inputs=point_inputs,
|
781 |
+
mask_inputs=mask_inputs,
|
782 |
+
high_res_features=high_res_features,
|
783 |
+
multimask_output=multimask_output,
|
784 |
+
)
|
785 |
+
|
786 |
+
return current_out, sam_outputs, high_res_features, pix_feat
|
787 |
+
|
788 |
+
def _encode_memory_in_output(
|
789 |
+
self,
|
790 |
+
current_vision_feats,
|
791 |
+
feat_sizes,
|
792 |
+
point_inputs,
|
793 |
+
run_mem_encoder,
|
794 |
+
high_res_masks,
|
795 |
+
object_score_logits,
|
796 |
+
current_out,
|
797 |
+
):
|
798 |
+
if run_mem_encoder and self.num_maskmem > 0:
|
799 |
+
high_res_masks_for_mem_enc = high_res_masks
|
800 |
+
maskmem_features, maskmem_pos_enc = self._encode_new_memory(
|
801 |
+
current_vision_feats=current_vision_feats,
|
802 |
+
feat_sizes=feat_sizes,
|
803 |
+
pred_masks_high_res=high_res_masks_for_mem_enc,
|
804 |
+
object_score_logits=object_score_logits,
|
805 |
+
is_mask_from_pts=(point_inputs is not None),
|
806 |
+
)
|
807 |
+
current_out["maskmem_features"] = maskmem_features
|
808 |
+
current_out["maskmem_pos_enc"] = maskmem_pos_enc
|
809 |
+
else:
|
810 |
+
current_out["maskmem_features"] = None
|
811 |
+
current_out["maskmem_pos_enc"] = None
|
812 |
+
|
813 |
+
def track_step(
|
814 |
+
self,
|
815 |
+
frame_idx,
|
816 |
+
is_init_cond_frame,
|
817 |
+
current_vision_feats,
|
818 |
+
current_vision_pos_embeds,
|
819 |
+
feat_sizes,
|
820 |
+
point_inputs,
|
821 |
+
mask_inputs,
|
822 |
+
output_dict,
|
823 |
+
num_frames,
|
824 |
+
track_in_reverse=False, # tracking in reverse time order (for demo usage)
|
825 |
+
# Whether to run the memory encoder on the predicted masks. Sometimes we might want
|
826 |
+
# to skip the memory encoder with `run_mem_encoder=False`. For example,
|
827 |
+
# in demo we might call `track_step` multiple times for each user click,
|
828 |
+
# and only encode the memory when the user finalizes their clicks. And in ablation
|
829 |
+
# settings like SAM training on static images, we don't need the memory encoder.
|
830 |
+
run_mem_encoder=True,
|
831 |
+
# The previously predicted SAM mask logits (which can be fed together with new clicks in demo).
|
832 |
+
prev_sam_mask_logits=None,
|
833 |
+
):
|
834 |
+
current_out, sam_outputs, _, _ = self._track_step(
|
835 |
+
frame_idx,
|
836 |
+
is_init_cond_frame,
|
837 |
+
current_vision_feats,
|
838 |
+
current_vision_pos_embeds,
|
839 |
+
feat_sizes,
|
840 |
+
point_inputs,
|
841 |
+
mask_inputs,
|
842 |
+
output_dict,
|
843 |
+
num_frames,
|
844 |
+
track_in_reverse,
|
845 |
+
prev_sam_mask_logits,
|
846 |
+
)
|
847 |
+
|
848 |
+
(
|
849 |
+
_,
|
850 |
+
_,
|
851 |
+
_,
|
852 |
+
low_res_masks,
|
853 |
+
high_res_masks,
|
854 |
+
obj_ptr,
|
855 |
+
object_score_logits,
|
856 |
+
) = sam_outputs
|
857 |
+
|
858 |
+
current_out["pred_masks"] = low_res_masks
|
859 |
+
current_out["pred_masks_high_res"] = high_res_masks
|
860 |
+
current_out["obj_ptr"] = obj_ptr
|
861 |
+
if not self.training:
|
862 |
+
# Only add this in inference (to avoid unused param in activation checkpointing;
|
863 |
+
# it's mainly used in the demo to encode spatial memories w/ consolidated masks)
|
864 |
+
current_out["object_score_logits"] = object_score_logits
|
865 |
+
|
866 |
+
# Finally run the memory encoder on the predicted mask to encode
|
867 |
+
# it into a new memory feature (that can be used in future frames)
|
868 |
+
self._encode_memory_in_output(
|
869 |
+
current_vision_feats,
|
870 |
+
feat_sizes,
|
871 |
+
point_inputs,
|
872 |
+
run_mem_encoder,
|
873 |
+
high_res_masks,
|
874 |
+
object_score_logits,
|
875 |
+
current_out,
|
876 |
+
)
|
877 |
+
|
878 |
+
return current_out
|
879 |
+
|
880 |
+
def _use_multimask(self, is_init_cond_frame, point_inputs):
|
881 |
+
"""Whether to use multimask output in the SAM head."""
|
882 |
+
num_pts = 0 if point_inputs is None else point_inputs["point_labels"].size(1)
|
883 |
+
multimask_output = (
|
884 |
+
self.multimask_output_in_sam
|
885 |
+
and (is_init_cond_frame or self.multimask_output_for_tracking)
|
886 |
+
and (self.multimask_min_pt_num <= num_pts <= self.multimask_max_pt_num)
|
887 |
+
)
|
888 |
+
return multimask_output
|
889 |
+
|
890 |
+
def _apply_non_overlapping_constraints(self, pred_masks):
|
891 |
+
"""
|
892 |
+
Apply non-overlapping constraints to the object scores in pred_masks. Here we
|
893 |
+
keep only the highest scoring object at each spatial location in pred_masks.
|
894 |
+
"""
|
895 |
+
batch_size = pred_masks.size(0)
|
896 |
+
if batch_size == 1:
|
897 |
+
return pred_masks
|
898 |
+
|
899 |
+
device = pred_masks.device
|
900 |
+
# "max_obj_inds": object index of the object with the highest score at each location
|
901 |
+
max_obj_inds = torch.argmax(pred_masks, dim=0, keepdim=True)
|
902 |
+
# "batch_obj_inds": object index of each object slice (along dim 0) in `pred_masks`
|
903 |
+
batch_obj_inds = torch.arange(batch_size, device=device)[:, None, None, None]
|
904 |
+
keep = max_obj_inds == batch_obj_inds
|
905 |
+
# suppress overlapping regions' scores below -10.0 so that the foreground regions
|
906 |
+
# don't overlap (here sigmoid(-10.0)=4.5398e-05)
|
907 |
+
pred_masks = torch.where(keep, pred_masks, torch.clamp(pred_masks, max=-10.0))
|
908 |
+
return pred_masks
|
eval/grounded_sam/sam2/modeling/sam2_utils.py
ADDED
@@ -0,0 +1,323 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
|
8 |
+
import copy
|
9 |
+
from typing import Tuple
|
10 |
+
|
11 |
+
import numpy as np
|
12 |
+
import torch
|
13 |
+
import torch.nn as nn
|
14 |
+
import torch.nn.functional as F
|
15 |
+
|
16 |
+
from sam2.utils.misc import mask_to_box
|
17 |
+
|
18 |
+
|
19 |
+
def select_closest_cond_frames(frame_idx, cond_frame_outputs, max_cond_frame_num):
|
20 |
+
"""
|
21 |
+
Select up to `max_cond_frame_num` conditioning frames from `cond_frame_outputs`
|
22 |
+
that are temporally closest to the current frame at `frame_idx`. Here, we take
|
23 |
+
- a) the closest conditioning frame before `frame_idx` (if any);
|
24 |
+
- b) the closest conditioning frame after `frame_idx` (if any);
|
25 |
+
- c) any other temporally closest conditioning frames until reaching a total
|
26 |
+
of `max_cond_frame_num` conditioning frames.
|
27 |
+
|
28 |
+
Outputs:
|
29 |
+
- selected_outputs: selected items (keys & values) from `cond_frame_outputs`.
|
30 |
+
- unselected_outputs: items (keys & values) not selected in `cond_frame_outputs`.
|
31 |
+
"""
|
32 |
+
if max_cond_frame_num == -1 or len(cond_frame_outputs) <= max_cond_frame_num:
|
33 |
+
selected_outputs = cond_frame_outputs
|
34 |
+
unselected_outputs = {}
|
35 |
+
else:
|
36 |
+
assert max_cond_frame_num >= 2, "we should allow using 2+ conditioning frames"
|
37 |
+
selected_outputs = {}
|
38 |
+
|
39 |
+
# the closest conditioning frame before `frame_idx` (if any)
|
40 |
+
idx_before = max((t for t in cond_frame_outputs if t < frame_idx), default=None)
|
41 |
+
if idx_before is not None:
|
42 |
+
selected_outputs[idx_before] = cond_frame_outputs[idx_before]
|
43 |
+
|
44 |
+
# the closest conditioning frame after `frame_idx` (if any)
|
45 |
+
idx_after = min((t for t in cond_frame_outputs if t >= frame_idx), default=None)
|
46 |
+
if idx_after is not None:
|
47 |
+
selected_outputs[idx_after] = cond_frame_outputs[idx_after]
|
48 |
+
|
49 |
+
# add other temporally closest conditioning frames until reaching a total
|
50 |
+
# of `max_cond_frame_num` conditioning frames.
|
51 |
+
num_remain = max_cond_frame_num - len(selected_outputs)
|
52 |
+
inds_remain = sorted(
|
53 |
+
(t for t in cond_frame_outputs if t not in selected_outputs),
|
54 |
+
key=lambda x: abs(x - frame_idx),
|
55 |
+
)[:num_remain]
|
56 |
+
selected_outputs.update((t, cond_frame_outputs[t]) for t in inds_remain)
|
57 |
+
unselected_outputs = {
|
58 |
+
t: v for t, v in cond_frame_outputs.items() if t not in selected_outputs
|
59 |
+
}
|
60 |
+
|
61 |
+
return selected_outputs, unselected_outputs
|
62 |
+
|
63 |
+
|
64 |
+
def get_1d_sine_pe(pos_inds, dim, temperature=10000):
|
65 |
+
"""
|
66 |
+
Get 1D sine positional embedding as in the original Transformer paper.
|
67 |
+
"""
|
68 |
+
pe_dim = dim // 2
|
69 |
+
dim_t = torch.arange(pe_dim, dtype=torch.float32, device=pos_inds.device)
|
70 |
+
dim_t = temperature ** (2 * (dim_t // 2) / pe_dim)
|
71 |
+
|
72 |
+
pos_embed = pos_inds.unsqueeze(-1) / dim_t
|
73 |
+
pos_embed = torch.cat([pos_embed.sin(), pos_embed.cos()], dim=-1)
|
74 |
+
return pos_embed
|
75 |
+
|
76 |
+
|
77 |
+
def get_activation_fn(activation):
|
78 |
+
"""Return an activation function given a string"""
|
79 |
+
if activation == "relu":
|
80 |
+
return F.relu
|
81 |
+
if activation == "gelu":
|
82 |
+
return F.gelu
|
83 |
+
if activation == "glu":
|
84 |
+
return F.glu
|
85 |
+
raise RuntimeError(f"activation should be relu/gelu, not {activation}.")
|
86 |
+
|
87 |
+
|
88 |
+
def get_clones(module, N):
|
89 |
+
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
|
90 |
+
|
91 |
+
|
92 |
+
class DropPath(nn.Module):
|
93 |
+
# adapted from https://github.com/huggingface/pytorch-image-models/blob/main/timm/layers/drop.py
|
94 |
+
def __init__(self, drop_prob=0.0, scale_by_keep=True):
|
95 |
+
super(DropPath, self).__init__()
|
96 |
+
self.drop_prob = drop_prob
|
97 |
+
self.scale_by_keep = scale_by_keep
|
98 |
+
|
99 |
+
def forward(self, x):
|
100 |
+
if self.drop_prob == 0.0 or not self.training:
|
101 |
+
return x
|
102 |
+
keep_prob = 1 - self.drop_prob
|
103 |
+
shape = (x.shape[0],) + (1,) * (x.ndim - 1)
|
104 |
+
random_tensor = x.new_empty(shape).bernoulli_(keep_prob)
|
105 |
+
if keep_prob > 0.0 and self.scale_by_keep:
|
106 |
+
random_tensor.div_(keep_prob)
|
107 |
+
return x * random_tensor
|
108 |
+
|
109 |
+
|
110 |
+
# Lightly adapted from
|
111 |
+
# https://github.com/facebookresearch/MaskFormer/blob/main/mask_former/modeling/transformer/transformer_predictor.py # noqa
|
112 |
+
class MLP(nn.Module):
|
113 |
+
def __init__(
|
114 |
+
self,
|
115 |
+
input_dim: int,
|
116 |
+
hidden_dim: int,
|
117 |
+
output_dim: int,
|
118 |
+
num_layers: int,
|
119 |
+
activation: nn.Module = nn.ReLU,
|
120 |
+
sigmoid_output: bool = False,
|
121 |
+
) -> None:
|
122 |
+
super().__init__()
|
123 |
+
self.num_layers = num_layers
|
124 |
+
h = [hidden_dim] * (num_layers - 1)
|
125 |
+
self.layers = nn.ModuleList(
|
126 |
+
nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])
|
127 |
+
)
|
128 |
+
self.sigmoid_output = sigmoid_output
|
129 |
+
self.act = activation()
|
130 |
+
|
131 |
+
def forward(self, x):
|
132 |
+
for i, layer in enumerate(self.layers):
|
133 |
+
x = self.act(layer(x)) if i < self.num_layers - 1 else layer(x)
|
134 |
+
if self.sigmoid_output:
|
135 |
+
x = F.sigmoid(x)
|
136 |
+
return x
|
137 |
+
|
138 |
+
|
139 |
+
# From https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/batch_norm.py # noqa
|
140 |
+
# Itself from https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119 # noqa
|
141 |
+
class LayerNorm2d(nn.Module):
|
142 |
+
def __init__(self, num_channels: int, eps: float = 1e-6) -> None:
|
143 |
+
super().__init__()
|
144 |
+
self.weight = nn.Parameter(torch.ones(num_channels))
|
145 |
+
self.bias = nn.Parameter(torch.zeros(num_channels))
|
146 |
+
self.eps = eps
|
147 |
+
|
148 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
149 |
+
u = x.mean(1, keepdim=True)
|
150 |
+
s = (x - u).pow(2).mean(1, keepdim=True)
|
151 |
+
x = (x - u) / torch.sqrt(s + self.eps)
|
152 |
+
x = self.weight[:, None, None] * x + self.bias[:, None, None]
|
153 |
+
return x
|
154 |
+
|
155 |
+
|
156 |
+
def sample_box_points(
|
157 |
+
masks: torch.Tensor,
|
158 |
+
noise: float = 0.1, # SAM default
|
159 |
+
noise_bound: int = 20, # SAM default
|
160 |
+
top_left_label: int = 2,
|
161 |
+
bottom_right_label: int = 3,
|
162 |
+
) -> Tuple[np.array, np.array]:
|
163 |
+
"""
|
164 |
+
Sample a noised version of the top left and bottom right corners of a given `bbox`
|
165 |
+
|
166 |
+
Inputs:
|
167 |
+
- masks: [B, 1, H,W] boxes, dtype=torch.Tensor
|
168 |
+
- noise: noise as a fraction of box width and height, dtype=float
|
169 |
+
- noise_bound: maximum amount of noise (in pure pixesl), dtype=int
|
170 |
+
|
171 |
+
Returns:
|
172 |
+
- box_coords: [B, num_pt, 2], contains (x, y) coordinates of top left and bottom right box corners, dtype=torch.float
|
173 |
+
- box_labels: [B, num_pt], label 2 is reserverd for top left and 3 for bottom right corners, dtype=torch.int32
|
174 |
+
"""
|
175 |
+
device = masks.device
|
176 |
+
box_coords = mask_to_box(masks)
|
177 |
+
B, _, H, W = masks.shape
|
178 |
+
box_labels = torch.tensor(
|
179 |
+
[top_left_label, bottom_right_label], dtype=torch.int, device=device
|
180 |
+
).repeat(B)
|
181 |
+
if noise > 0.0:
|
182 |
+
if not isinstance(noise_bound, torch.Tensor):
|
183 |
+
noise_bound = torch.tensor(noise_bound, device=device)
|
184 |
+
bbox_w = box_coords[..., 2] - box_coords[..., 0]
|
185 |
+
bbox_h = box_coords[..., 3] - box_coords[..., 1]
|
186 |
+
max_dx = torch.min(bbox_w * noise, noise_bound)
|
187 |
+
max_dy = torch.min(bbox_h * noise, noise_bound)
|
188 |
+
box_noise = 2 * torch.rand(B, 1, 4, device=device) - 1
|
189 |
+
box_noise = box_noise * torch.stack((max_dx, max_dy, max_dx, max_dy), dim=-1)
|
190 |
+
|
191 |
+
box_coords = box_coords + box_noise
|
192 |
+
img_bounds = (
|
193 |
+
torch.tensor([W, H, W, H], device=device) - 1
|
194 |
+
) # uncentered pixel coords
|
195 |
+
box_coords.clamp_(torch.zeros_like(img_bounds), img_bounds) # In place clamping
|
196 |
+
|
197 |
+
box_coords = box_coords.reshape(-1, 2, 2) # always 2 points
|
198 |
+
box_labels = box_labels.reshape(-1, 2)
|
199 |
+
return box_coords, box_labels
|
200 |
+
|
201 |
+
|
202 |
+
def sample_random_points_from_errors(gt_masks, pred_masks, num_pt=1):
|
203 |
+
"""
|
204 |
+
Sample `num_pt` random points (along with their labels) independently from the error regions.
|
205 |
+
|
206 |
+
Inputs:
|
207 |
+
- gt_masks: [B, 1, H_im, W_im] masks, dtype=torch.bool
|
208 |
+
- pred_masks: [B, 1, H_im, W_im] masks, dtype=torch.bool or None
|
209 |
+
- num_pt: int, number of points to sample independently for each of the B error maps
|
210 |
+
|
211 |
+
Outputs:
|
212 |
+
- points: [B, num_pt, 2], dtype=torch.float, contains (x, y) coordinates of each sampled point
|
213 |
+
- labels: [B, num_pt], dtype=torch.int32, where 1 means positive clicks and 0 means
|
214 |
+
negative clicks
|
215 |
+
"""
|
216 |
+
if pred_masks is None: # if pred_masks is not provided, treat it as empty
|
217 |
+
pred_masks = torch.zeros_like(gt_masks)
|
218 |
+
assert gt_masks.dtype == torch.bool and gt_masks.size(1) == 1
|
219 |
+
assert pred_masks.dtype == torch.bool and pred_masks.shape == gt_masks.shape
|
220 |
+
assert num_pt >= 0
|
221 |
+
|
222 |
+
B, _, H_im, W_im = gt_masks.shape
|
223 |
+
device = gt_masks.device
|
224 |
+
|
225 |
+
# false positive region, a new point sampled in this region should have
|
226 |
+
# negative label to correct the FP error
|
227 |
+
fp_masks = ~gt_masks & pred_masks
|
228 |
+
# false negative region, a new point sampled in this region should have
|
229 |
+
# positive label to correct the FN error
|
230 |
+
fn_masks = gt_masks & ~pred_masks
|
231 |
+
# whether the prediction completely match the ground-truth on each mask
|
232 |
+
all_correct = torch.all((gt_masks == pred_masks).flatten(2), dim=2)
|
233 |
+
all_correct = all_correct[..., None, None]
|
234 |
+
|
235 |
+
# channel 0 is FP map, while channel 1 is FN map
|
236 |
+
pts_noise = torch.rand(B, num_pt, H_im, W_im, 2, device=device)
|
237 |
+
# sample a negative new click from FP region or a positive new click
|
238 |
+
# from FN region, depend on where the maximum falls,
|
239 |
+
# and in case the predictions are all correct (no FP or FN), we just
|
240 |
+
# sample a negative click from the background region
|
241 |
+
pts_noise[..., 0] *= fp_masks | (all_correct & ~gt_masks)
|
242 |
+
pts_noise[..., 1] *= fn_masks
|
243 |
+
pts_idx = pts_noise.flatten(2).argmax(dim=2)
|
244 |
+
labels = (pts_idx % 2).to(torch.int32)
|
245 |
+
pts_idx = pts_idx // 2
|
246 |
+
pts_x = pts_idx % W_im
|
247 |
+
pts_y = pts_idx // W_im
|
248 |
+
points = torch.stack([pts_x, pts_y], dim=2).to(torch.float)
|
249 |
+
return points, labels
|
250 |
+
|
251 |
+
|
252 |
+
def sample_one_point_from_error_center(gt_masks, pred_masks, padding=True):
|
253 |
+
"""
|
254 |
+
Sample 1 random point (along with its label) from the center of each error region,
|
255 |
+
that is, the point with the largest distance to the boundary of each error region.
|
256 |
+
This is the RITM sampling method from https://github.com/saic-vul/ritm_interactive_segmentation/blob/master/isegm/inference/clicker.py
|
257 |
+
|
258 |
+
Inputs:
|
259 |
+
- gt_masks: [B, 1, H_im, W_im] masks, dtype=torch.bool
|
260 |
+
- pred_masks: [B, 1, H_im, W_im] masks, dtype=torch.bool or None
|
261 |
+
- padding: if True, pad with boundary of 1 px for distance transform
|
262 |
+
|
263 |
+
Outputs:
|
264 |
+
- points: [B, 1, 2], dtype=torch.float, contains (x, y) coordinates of each sampled point
|
265 |
+
- labels: [B, 1], dtype=torch.int32, where 1 means positive clicks and 0 means negative clicks
|
266 |
+
"""
|
267 |
+
import cv2
|
268 |
+
|
269 |
+
if pred_masks is None:
|
270 |
+
pred_masks = torch.zeros_like(gt_masks)
|
271 |
+
assert gt_masks.dtype == torch.bool and gt_masks.size(1) == 1
|
272 |
+
assert pred_masks.dtype == torch.bool and pred_masks.shape == gt_masks.shape
|
273 |
+
|
274 |
+
B, _, _, W_im = gt_masks.shape
|
275 |
+
device = gt_masks.device
|
276 |
+
|
277 |
+
# false positive region, a new point sampled in this region should have
|
278 |
+
# negative label to correct the FP error
|
279 |
+
fp_masks = ~gt_masks & pred_masks
|
280 |
+
# false negative region, a new point sampled in this region should have
|
281 |
+
# positive label to correct the FN error
|
282 |
+
fn_masks = gt_masks & ~pred_masks
|
283 |
+
|
284 |
+
fp_masks = fp_masks.cpu().numpy()
|
285 |
+
fn_masks = fn_masks.cpu().numpy()
|
286 |
+
points = torch.zeros(B, 1, 2, dtype=torch.float)
|
287 |
+
labels = torch.ones(B, 1, dtype=torch.int32)
|
288 |
+
for b in range(B):
|
289 |
+
fn_mask = fn_masks[b, 0]
|
290 |
+
fp_mask = fp_masks[b, 0]
|
291 |
+
if padding:
|
292 |
+
fn_mask = np.pad(fn_mask, ((1, 1), (1, 1)), "constant")
|
293 |
+
fp_mask = np.pad(fp_mask, ((1, 1), (1, 1)), "constant")
|
294 |
+
# compute the distance of each point in FN/FP region to its boundary
|
295 |
+
fn_mask_dt = cv2.distanceTransform(fn_mask.astype(np.uint8), cv2.DIST_L2, 0)
|
296 |
+
fp_mask_dt = cv2.distanceTransform(fp_mask.astype(np.uint8), cv2.DIST_L2, 0)
|
297 |
+
if padding:
|
298 |
+
fn_mask_dt = fn_mask_dt[1:-1, 1:-1]
|
299 |
+
fp_mask_dt = fp_mask_dt[1:-1, 1:-1]
|
300 |
+
|
301 |
+
# take the point in FN/FP region with the largest distance to its boundary
|
302 |
+
fn_mask_dt_flat = fn_mask_dt.reshape(-1)
|
303 |
+
fp_mask_dt_flat = fp_mask_dt.reshape(-1)
|
304 |
+
fn_argmax = np.argmax(fn_mask_dt_flat)
|
305 |
+
fp_argmax = np.argmax(fp_mask_dt_flat)
|
306 |
+
is_positive = fn_mask_dt_flat[fn_argmax] > fp_mask_dt_flat[fp_argmax]
|
307 |
+
pt_idx = fn_argmax if is_positive else fp_argmax
|
308 |
+
points[b, 0, 0] = pt_idx % W_im # x
|
309 |
+
points[b, 0, 1] = pt_idx // W_im # y
|
310 |
+
labels[b, 0] = int(is_positive)
|
311 |
+
|
312 |
+
points = points.to(device)
|
313 |
+
labels = labels.to(device)
|
314 |
+
return points, labels
|
315 |
+
|
316 |
+
|
317 |
+
def get_next_point(gt_masks, pred_masks, method):
|
318 |
+
if method == "uniform":
|
319 |
+
return sample_random_points_from_errors(gt_masks, pred_masks)
|
320 |
+
elif method == "center":
|
321 |
+
return sample_one_point_from_error_center(gt_masks, pred_masks)
|
322 |
+
else:
|
323 |
+
raise ValueError(f"unknown sampling method {method}")
|
eval/grounded_sam/sam2/sam2_hiera_b+.yaml
ADDED
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# @package _global_
|
2 |
+
|
3 |
+
# Model
|
4 |
+
model:
|
5 |
+
_target_: sam2.modeling.sam2_base.SAM2Base
|
6 |
+
image_encoder:
|
7 |
+
_target_: sam2.modeling.backbones.image_encoder.ImageEncoder
|
8 |
+
scalp: 1
|
9 |
+
trunk:
|
10 |
+
_target_: sam2.modeling.backbones.hieradet.Hiera
|
11 |
+
embed_dim: 112
|
12 |
+
num_heads: 2
|
13 |
+
neck:
|
14 |
+
_target_: sam2.modeling.backbones.image_encoder.FpnNeck
|
15 |
+
position_encoding:
|
16 |
+
_target_: sam2.modeling.position_encoding.PositionEmbeddingSine
|
17 |
+
num_pos_feats: 256
|
18 |
+
normalize: true
|
19 |
+
scale: null
|
20 |
+
temperature: 10000
|
21 |
+
d_model: 256
|
22 |
+
backbone_channel_list: [896, 448, 224, 112]
|
23 |
+
fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
|
24 |
+
fpn_interp_model: nearest
|
25 |
+
|
26 |
+
memory_attention:
|
27 |
+
_target_: sam2.modeling.memory_attention.MemoryAttention
|
28 |
+
d_model: 256
|
29 |
+
pos_enc_at_input: true
|
30 |
+
layer:
|
31 |
+
_target_: sam2.modeling.memory_attention.MemoryAttentionLayer
|
32 |
+
activation: relu
|
33 |
+
dim_feedforward: 2048
|
34 |
+
dropout: 0.1
|
35 |
+
pos_enc_at_attn: false
|
36 |
+
self_attention:
|
37 |
+
_target_: sam2.modeling.sam.transformer.RoPEAttention
|
38 |
+
rope_theta: 10000.0
|
39 |
+
feat_sizes: [32, 32]
|
40 |
+
embedding_dim: 256
|
41 |
+
num_heads: 1
|
42 |
+
downsample_rate: 1
|
43 |
+
dropout: 0.1
|
44 |
+
d_model: 256
|
45 |
+
pos_enc_at_cross_attn_keys: true
|
46 |
+
pos_enc_at_cross_attn_queries: false
|
47 |
+
cross_attention:
|
48 |
+
_target_: sam2.modeling.sam.transformer.RoPEAttention
|
49 |
+
rope_theta: 10000.0
|
50 |
+
feat_sizes: [32, 32]
|
51 |
+
rope_k_repeat: True
|
52 |
+
embedding_dim: 256
|
53 |
+
num_heads: 1
|
54 |
+
downsample_rate: 1
|
55 |
+
dropout: 0.1
|
56 |
+
kv_in_dim: 64
|
57 |
+
num_layers: 4
|
58 |
+
|
59 |
+
memory_encoder:
|
60 |
+
_target_: sam2.modeling.memory_encoder.MemoryEncoder
|
61 |
+
out_dim: 64
|
62 |
+
position_encoding:
|
63 |
+
_target_: sam2.modeling.position_encoding.PositionEmbeddingSine
|
64 |
+
num_pos_feats: 64
|
65 |
+
normalize: true
|
66 |
+
scale: null
|
67 |
+
temperature: 10000
|
68 |
+
mask_downsampler:
|
69 |
+
_target_: sam2.modeling.memory_encoder.MaskDownSampler
|
70 |
+
kernel_size: 3
|
71 |
+
stride: 2
|
72 |
+
padding: 1
|
73 |
+
fuser:
|
74 |
+
_target_: sam2.modeling.memory_encoder.Fuser
|
75 |
+
layer:
|
76 |
+
_target_: sam2.modeling.memory_encoder.CXBlock
|
77 |
+
dim: 256
|
78 |
+
kernel_size: 7
|
79 |
+
padding: 3
|
80 |
+
layer_scale_init_value: 1e-6
|
81 |
+
use_dwconv: True # depth-wise convs
|
82 |
+
num_layers: 2
|
83 |
+
|
84 |
+
num_maskmem: 7
|
85 |
+
image_size: 1024
|
86 |
+
# apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
|
87 |
+
sigmoid_scale_for_mem_enc: 20.0
|
88 |
+
sigmoid_bias_for_mem_enc: -10.0
|
89 |
+
use_mask_input_as_output_without_sam: true
|
90 |
+
# Memory
|
91 |
+
directly_add_no_mem_embed: true
|
92 |
+
# use high-resolution feature map in the SAM mask decoder
|
93 |
+
use_high_res_features_in_sam: true
|
94 |
+
# output 3 masks on the first click on initial conditioning frames
|
95 |
+
multimask_output_in_sam: true
|
96 |
+
# SAM heads
|
97 |
+
iou_prediction_use_sigmoid: True
|
98 |
+
# cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
|
99 |
+
use_obj_ptrs_in_encoder: true
|
100 |
+
add_tpos_enc_to_obj_ptrs: false
|
101 |
+
only_obj_ptrs_in_the_past_for_eval: true
|
102 |
+
# object occlusion prediction
|
103 |
+
pred_obj_scores: true
|
104 |
+
pred_obj_scores_mlp: true
|
105 |
+
fixed_no_obj_ptr: true
|
106 |
+
# multimask tracking settings
|
107 |
+
multimask_output_for_tracking: true
|
108 |
+
use_multimask_token_for_obj_ptr: true
|
109 |
+
multimask_min_pt_num: 0
|
110 |
+
multimask_max_pt_num: 1
|
111 |
+
use_mlp_for_obj_ptr_proj: true
|
112 |
+
# Compilation flag
|
113 |
+
compile_image_encoder: False
|
eval/grounded_sam/sam2/sam2_hiera_l.yaml
ADDED
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# @package _global_
|
2 |
+
|
3 |
+
# Model
|
4 |
+
model:
|
5 |
+
_target_: sam2.modeling.sam2_base.SAM2Base
|
6 |
+
image_encoder:
|
7 |
+
_target_: sam2.modeling.backbones.image_encoder.ImageEncoder
|
8 |
+
scalp: 1
|
9 |
+
trunk:
|
10 |
+
_target_: sam2.modeling.backbones.hieradet.Hiera
|
11 |
+
embed_dim: 144
|
12 |
+
num_heads: 2
|
13 |
+
stages: [2, 6, 36, 4]
|
14 |
+
global_att_blocks: [23, 33, 43]
|
15 |
+
window_pos_embed_bkg_spatial_size: [7, 7]
|
16 |
+
window_spec: [8, 4, 16, 8]
|
17 |
+
neck:
|
18 |
+
_target_: sam2.modeling.backbones.image_encoder.FpnNeck
|
19 |
+
position_encoding:
|
20 |
+
_target_: sam2.modeling.position_encoding.PositionEmbeddingSine
|
21 |
+
num_pos_feats: 256
|
22 |
+
normalize: true
|
23 |
+
scale: null
|
24 |
+
temperature: 10000
|
25 |
+
d_model: 256
|
26 |
+
backbone_channel_list: [1152, 576, 288, 144]
|
27 |
+
fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
|
28 |
+
fpn_interp_model: nearest
|
29 |
+
|
30 |
+
memory_attention:
|
31 |
+
_target_: sam2.modeling.memory_attention.MemoryAttention
|
32 |
+
d_model: 256
|
33 |
+
pos_enc_at_input: true
|
34 |
+
layer:
|
35 |
+
_target_: sam2.modeling.memory_attention.MemoryAttentionLayer
|
36 |
+
activation: relu
|
37 |
+
dim_feedforward: 2048
|
38 |
+
dropout: 0.1
|
39 |
+
pos_enc_at_attn: false
|
40 |
+
self_attention:
|
41 |
+
_target_: sam2.modeling.sam.transformer.RoPEAttention
|
42 |
+
rope_theta: 10000.0
|
43 |
+
feat_sizes: [32, 32]
|
44 |
+
embedding_dim: 256
|
45 |
+
num_heads: 1
|
46 |
+
downsample_rate: 1
|
47 |
+
dropout: 0.1
|
48 |
+
d_model: 256
|
49 |
+
pos_enc_at_cross_attn_keys: true
|
50 |
+
pos_enc_at_cross_attn_queries: false
|
51 |
+
cross_attention:
|
52 |
+
_target_: sam2.modeling.sam.transformer.RoPEAttention
|
53 |
+
rope_theta: 10000.0
|
54 |
+
feat_sizes: [32, 32]
|
55 |
+
rope_k_repeat: True
|
56 |
+
embedding_dim: 256
|
57 |
+
num_heads: 1
|
58 |
+
downsample_rate: 1
|
59 |
+
dropout: 0.1
|
60 |
+
kv_in_dim: 64
|
61 |
+
num_layers: 4
|
62 |
+
|
63 |
+
memory_encoder:
|
64 |
+
_target_: sam2.modeling.memory_encoder.MemoryEncoder
|
65 |
+
out_dim: 64
|
66 |
+
position_encoding:
|
67 |
+
_target_: sam2.modeling.position_encoding.PositionEmbeddingSine
|
68 |
+
num_pos_feats: 64
|
69 |
+
normalize: true
|
70 |
+
scale: null
|
71 |
+
temperature: 10000
|
72 |
+
mask_downsampler:
|
73 |
+
_target_: sam2.modeling.memory_encoder.MaskDownSampler
|
74 |
+
kernel_size: 3
|
75 |
+
stride: 2
|
76 |
+
padding: 1
|
77 |
+
fuser:
|
78 |
+
_target_: sam2.modeling.memory_encoder.Fuser
|
79 |
+
layer:
|
80 |
+
_target_: sam2.modeling.memory_encoder.CXBlock
|
81 |
+
dim: 256
|
82 |
+
kernel_size: 7
|
83 |
+
padding: 3
|
84 |
+
layer_scale_init_value: 1e-6
|
85 |
+
use_dwconv: True # depth-wise convs
|
86 |
+
num_layers: 2
|
87 |
+
|
88 |
+
num_maskmem: 7
|
89 |
+
image_size: 1024
|
90 |
+
# apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
|
91 |
+
sigmoid_scale_for_mem_enc: 20.0
|
92 |
+
sigmoid_bias_for_mem_enc: -10.0
|
93 |
+
use_mask_input_as_output_without_sam: true
|
94 |
+
# Memory
|
95 |
+
directly_add_no_mem_embed: true
|
96 |
+
# use high-resolution feature map in the SAM mask decoder
|
97 |
+
use_high_res_features_in_sam: true
|
98 |
+
# output 3 masks on the first click on initial conditioning frames
|
99 |
+
multimask_output_in_sam: true
|
100 |
+
# SAM heads
|
101 |
+
iou_prediction_use_sigmoid: True
|
102 |
+
# cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
|
103 |
+
use_obj_ptrs_in_encoder: true
|
104 |
+
add_tpos_enc_to_obj_ptrs: false
|
105 |
+
only_obj_ptrs_in_the_past_for_eval: true
|
106 |
+
# object occlusion prediction
|
107 |
+
pred_obj_scores: true
|
108 |
+
pred_obj_scores_mlp: true
|
109 |
+
fixed_no_obj_ptr: true
|
110 |
+
# multimask tracking settings
|
111 |
+
multimask_output_for_tracking: true
|
112 |
+
use_multimask_token_for_obj_ptr: true
|
113 |
+
multimask_min_pt_num: 0
|
114 |
+
multimask_max_pt_num: 1
|
115 |
+
use_mlp_for_obj_ptr_proj: true
|
116 |
+
# Compilation flag
|
117 |
+
compile_image_encoder: False
|
eval/grounded_sam/sam2/sam2_hiera_s.yaml
ADDED
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# @package _global_
|
2 |
+
|
3 |
+
# Model
|
4 |
+
model:
|
5 |
+
_target_: sam2.modeling.sam2_base.SAM2Base
|
6 |
+
image_encoder:
|
7 |
+
_target_: sam2.modeling.backbones.image_encoder.ImageEncoder
|
8 |
+
scalp: 1
|
9 |
+
trunk:
|
10 |
+
_target_: sam2.modeling.backbones.hieradet.Hiera
|
11 |
+
embed_dim: 96
|
12 |
+
num_heads: 1
|
13 |
+
stages: [1, 2, 11, 2]
|
14 |
+
global_att_blocks: [7, 10, 13]
|
15 |
+
window_pos_embed_bkg_spatial_size: [7, 7]
|
16 |
+
neck:
|
17 |
+
_target_: sam2.modeling.backbones.image_encoder.FpnNeck
|
18 |
+
position_encoding:
|
19 |
+
_target_: sam2.modeling.position_encoding.PositionEmbeddingSine
|
20 |
+
num_pos_feats: 256
|
21 |
+
normalize: true
|
22 |
+
scale: null
|
23 |
+
temperature: 10000
|
24 |
+
d_model: 256
|
25 |
+
backbone_channel_list: [768, 384, 192, 96]
|
26 |
+
fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
|
27 |
+
fpn_interp_model: nearest
|
28 |
+
|
29 |
+
memory_attention:
|
30 |
+
_target_: sam2.modeling.memory_attention.MemoryAttention
|
31 |
+
d_model: 256
|
32 |
+
pos_enc_at_input: true
|
33 |
+
layer:
|
34 |
+
_target_: sam2.modeling.memory_attention.MemoryAttentionLayer
|
35 |
+
activation: relu
|
36 |
+
dim_feedforward: 2048
|
37 |
+
dropout: 0.1
|
38 |
+
pos_enc_at_attn: false
|
39 |
+
self_attention:
|
40 |
+
_target_: sam2.modeling.sam.transformer.RoPEAttention
|
41 |
+
rope_theta: 10000.0
|
42 |
+
feat_sizes: [32, 32]
|
43 |
+
embedding_dim: 256
|
44 |
+
num_heads: 1
|
45 |
+
downsample_rate: 1
|
46 |
+
dropout: 0.1
|
47 |
+
d_model: 256
|
48 |
+
pos_enc_at_cross_attn_keys: true
|
49 |
+
pos_enc_at_cross_attn_queries: false
|
50 |
+
cross_attention:
|
51 |
+
_target_: sam2.modeling.sam.transformer.RoPEAttention
|
52 |
+
rope_theta: 10000.0
|
53 |
+
feat_sizes: [32, 32]
|
54 |
+
rope_k_repeat: True
|
55 |
+
embedding_dim: 256
|
56 |
+
num_heads: 1
|
57 |
+
downsample_rate: 1
|
58 |
+
dropout: 0.1
|
59 |
+
kv_in_dim: 64
|
60 |
+
num_layers: 4
|
61 |
+
|
62 |
+
memory_encoder:
|
63 |
+
_target_: sam2.modeling.memory_encoder.MemoryEncoder
|
64 |
+
out_dim: 64
|
65 |
+
position_encoding:
|
66 |
+
_target_: sam2.modeling.position_encoding.PositionEmbeddingSine
|
67 |
+
num_pos_feats: 64
|
68 |
+
normalize: true
|
69 |
+
scale: null
|
70 |
+
temperature: 10000
|
71 |
+
mask_downsampler:
|
72 |
+
_target_: sam2.modeling.memory_encoder.MaskDownSampler
|
73 |
+
kernel_size: 3
|
74 |
+
stride: 2
|
75 |
+
padding: 1
|
76 |
+
fuser:
|
77 |
+
_target_: sam2.modeling.memory_encoder.Fuser
|
78 |
+
layer:
|
79 |
+
_target_: sam2.modeling.memory_encoder.CXBlock
|
80 |
+
dim: 256
|
81 |
+
kernel_size: 7
|
82 |
+
padding: 3
|
83 |
+
layer_scale_init_value: 1e-6
|
84 |
+
use_dwconv: True # depth-wise convs
|
85 |
+
num_layers: 2
|
86 |
+
|
87 |
+
num_maskmem: 7
|
88 |
+
image_size: 1024
|
89 |
+
# apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
|
90 |
+
sigmoid_scale_for_mem_enc: 20.0
|
91 |
+
sigmoid_bias_for_mem_enc: -10.0
|
92 |
+
use_mask_input_as_output_without_sam: true
|
93 |
+
# Memory
|
94 |
+
directly_add_no_mem_embed: true
|
95 |
+
# use high-resolution feature map in the SAM mask decoder
|
96 |
+
use_high_res_features_in_sam: true
|
97 |
+
# output 3 masks on the first click on initial conditioning frames
|
98 |
+
multimask_output_in_sam: true
|
99 |
+
# SAM heads
|
100 |
+
iou_prediction_use_sigmoid: True
|
101 |
+
# cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
|
102 |
+
use_obj_ptrs_in_encoder: true
|
103 |
+
add_tpos_enc_to_obj_ptrs: false
|
104 |
+
only_obj_ptrs_in_the_past_for_eval: true
|
105 |
+
# object occlusion prediction
|
106 |
+
pred_obj_scores: true
|
107 |
+
pred_obj_scores_mlp: true
|
108 |
+
fixed_no_obj_ptr: true
|
109 |
+
# multimask tracking settings
|
110 |
+
multimask_output_for_tracking: true
|
111 |
+
use_multimask_token_for_obj_ptr: true
|
112 |
+
multimask_min_pt_num: 0
|
113 |
+
multimask_max_pt_num: 1
|
114 |
+
use_mlp_for_obj_ptr_proj: true
|
115 |
+
# Compilation flag
|
116 |
+
compile_image_encoder: False
|
eval/grounded_sam/sam2/sam2_hiera_t.yaml
ADDED
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# @package _global_
|
2 |
+
|
3 |
+
# Model
|
4 |
+
model:
|
5 |
+
_target_: sam2.modeling.sam2_base.SAM2Base
|
6 |
+
image_encoder:
|
7 |
+
_target_: sam2.modeling.backbones.image_encoder.ImageEncoder
|
8 |
+
scalp: 1
|
9 |
+
trunk:
|
10 |
+
_target_: sam2.modeling.backbones.hieradet.Hiera
|
11 |
+
embed_dim: 96
|
12 |
+
num_heads: 1
|
13 |
+
stages: [1, 2, 7, 2]
|
14 |
+
global_att_blocks: [5, 7, 9]
|
15 |
+
window_pos_embed_bkg_spatial_size: [7, 7]
|
16 |
+
neck:
|
17 |
+
_target_: sam2.modeling.backbones.image_encoder.FpnNeck
|
18 |
+
position_encoding:
|
19 |
+
_target_: sam2.modeling.position_encoding.PositionEmbeddingSine
|
20 |
+
num_pos_feats: 256
|
21 |
+
normalize: true
|
22 |
+
scale: null
|
23 |
+
temperature: 10000
|
24 |
+
d_model: 256
|
25 |
+
backbone_channel_list: [768, 384, 192, 96]
|
26 |
+
fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
|
27 |
+
fpn_interp_model: nearest
|
28 |
+
|
29 |
+
memory_attention:
|
30 |
+
_target_: sam2.modeling.memory_attention.MemoryAttention
|
31 |
+
d_model: 256
|
32 |
+
pos_enc_at_input: true
|
33 |
+
layer:
|
34 |
+
_target_: sam2.modeling.memory_attention.MemoryAttentionLayer
|
35 |
+
activation: relu
|
36 |
+
dim_feedforward: 2048
|
37 |
+
dropout: 0.1
|
38 |
+
pos_enc_at_attn: false
|
39 |
+
self_attention:
|
40 |
+
_target_: sam2.modeling.sam.transformer.RoPEAttention
|
41 |
+
rope_theta: 10000.0
|
42 |
+
feat_sizes: [32, 32]
|
43 |
+
embedding_dim: 256
|
44 |
+
num_heads: 1
|
45 |
+
downsample_rate: 1
|
46 |
+
dropout: 0.1
|
47 |
+
d_model: 256
|
48 |
+
pos_enc_at_cross_attn_keys: true
|
49 |
+
pos_enc_at_cross_attn_queries: false
|
50 |
+
cross_attention:
|
51 |
+
_target_: sam2.modeling.sam.transformer.RoPEAttention
|
52 |
+
rope_theta: 10000.0
|
53 |
+
feat_sizes: [32, 32]
|
54 |
+
rope_k_repeat: True
|
55 |
+
embedding_dim: 256
|
56 |
+
num_heads: 1
|
57 |
+
downsample_rate: 1
|
58 |
+
dropout: 0.1
|
59 |
+
kv_in_dim: 64
|
60 |
+
num_layers: 4
|
61 |
+
|
62 |
+
memory_encoder:
|
63 |
+
_target_: sam2.modeling.memory_encoder.MemoryEncoder
|
64 |
+
out_dim: 64
|
65 |
+
position_encoding:
|
66 |
+
_target_: sam2.modeling.position_encoding.PositionEmbeddingSine
|
67 |
+
num_pos_feats: 64
|
68 |
+
normalize: true
|
69 |
+
scale: null
|
70 |
+
temperature: 10000
|
71 |
+
mask_downsampler:
|
72 |
+
_target_: sam2.modeling.memory_encoder.MaskDownSampler
|
73 |
+
kernel_size: 3
|
74 |
+
stride: 2
|
75 |
+
padding: 1
|
76 |
+
fuser:
|
77 |
+
_target_: sam2.modeling.memory_encoder.Fuser
|
78 |
+
layer:
|
79 |
+
_target_: sam2.modeling.memory_encoder.CXBlock
|
80 |
+
dim: 256
|
81 |
+
kernel_size: 7
|
82 |
+
padding: 3
|
83 |
+
layer_scale_init_value: 1e-6
|
84 |
+
use_dwconv: True # depth-wise convs
|
85 |
+
num_layers: 2
|
86 |
+
|
87 |
+
num_maskmem: 7
|
88 |
+
image_size: 1024
|
89 |
+
# apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
|
90 |
+
# SAM decoder
|
91 |
+
sigmoid_scale_for_mem_enc: 20.0
|
92 |
+
sigmoid_bias_for_mem_enc: -10.0
|
93 |
+
use_mask_input_as_output_without_sam: true
|
94 |
+
# Memory
|
95 |
+
directly_add_no_mem_embed: true
|
96 |
+
# use high-resolution feature map in the SAM mask decoder
|
97 |
+
use_high_res_features_in_sam: true
|
98 |
+
# output 3 masks on the first click on initial conditioning frames
|
99 |
+
multimask_output_in_sam: true
|
100 |
+
# SAM heads
|
101 |
+
iou_prediction_use_sigmoid: True
|
102 |
+
# cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
|
103 |
+
use_obj_ptrs_in_encoder: true
|
104 |
+
add_tpos_enc_to_obj_ptrs: false
|
105 |
+
only_obj_ptrs_in_the_past_for_eval: true
|
106 |
+
# object occlusion prediction
|
107 |
+
pred_obj_scores: true
|
108 |
+
pred_obj_scores_mlp: true
|
109 |
+
fixed_no_obj_ptr: true
|
110 |
+
# multimask tracking settings
|
111 |
+
multimask_output_for_tracking: true
|
112 |
+
use_multimask_token_for_obj_ptr: true
|
113 |
+
multimask_min_pt_num: 0
|
114 |
+
multimask_max_pt_num: 1
|
115 |
+
use_mlp_for_obj_ptr_proj: true
|
116 |
+
# Compilation flag
|
117 |
+
# HieraT does not currently support compilation, should always be set to False
|
118 |
+
compile_image_encoder: False
|
eval/grounded_sam/sam2/sam2_image_predictor.py
ADDED
@@ -0,0 +1,465 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
import logging
|
8 |
+
|
9 |
+
from typing import List, Optional, Tuple, Union
|
10 |
+
|
11 |
+
import numpy as np
|
12 |
+
import torch
|
13 |
+
from PIL.Image import Image
|
14 |
+
|
15 |
+
from sam2.modeling.sam2_base import SAM2Base
|
16 |
+
from sam2.utils.transforms import SAM2Transforms
|
17 |
+
|
18 |
+
|
19 |
+
class SAM2ImagePredictor:
|
20 |
+
def __init__(
|
21 |
+
self,
|
22 |
+
sam_model: SAM2Base,
|
23 |
+
mask_threshold=0.0,
|
24 |
+
max_hole_area=0.0,
|
25 |
+
max_sprinkle_area=0.0,
|
26 |
+
**kwargs,
|
27 |
+
) -> None:
|
28 |
+
"""
|
29 |
+
Uses SAM-2 to calculate the image embedding for an image, and then
|
30 |
+
allow repeated, efficient mask prediction given prompts.
|
31 |
+
|
32 |
+
Arguments:
|
33 |
+
sam_model (Sam-2): The model to use for mask prediction.
|
34 |
+
mask_threshold (float): The threshold to use when converting mask logits
|
35 |
+
to binary masks. Masks are thresholded at 0 by default.
|
36 |
+
max_hole_area (int): If max_hole_area > 0, we fill small holes in up to
|
37 |
+
the maximum area of max_hole_area in low_res_masks.
|
38 |
+
max_sprinkle_area (int): If max_sprinkle_area > 0, we remove small sprinkles up to
|
39 |
+
the maximum area of max_sprinkle_area in low_res_masks.
|
40 |
+
"""
|
41 |
+
super().__init__()
|
42 |
+
self.model = sam_model
|
43 |
+
self._transforms = SAM2Transforms(
|
44 |
+
resolution=self.model.image_size,
|
45 |
+
mask_threshold=mask_threshold,
|
46 |
+
max_hole_area=max_hole_area,
|
47 |
+
max_sprinkle_area=max_sprinkle_area,
|
48 |
+
)
|
49 |
+
|
50 |
+
# Predictor state
|
51 |
+
self._is_image_set = False
|
52 |
+
self._features = None
|
53 |
+
self._orig_hw = None
|
54 |
+
# Whether the predictor is set for single image or a batch of images
|
55 |
+
self._is_batch = False
|
56 |
+
|
57 |
+
# Predictor config
|
58 |
+
self.mask_threshold = mask_threshold
|
59 |
+
|
60 |
+
# Spatial dim for backbone feature maps
|
61 |
+
self._bb_feat_sizes = [
|
62 |
+
(256, 256),
|
63 |
+
(128, 128),
|
64 |
+
(64, 64),
|
65 |
+
]
|
66 |
+
|
67 |
+
@classmethod
|
68 |
+
def from_pretrained(cls, model_id: str, **kwargs) -> "SAM2ImagePredictor":
|
69 |
+
"""
|
70 |
+
Load a pretrained model from the Hugging Face hub.
|
71 |
+
|
72 |
+
Arguments:
|
73 |
+
model_id (str): The Hugging Face repository ID.
|
74 |
+
**kwargs: Additional arguments to pass to the model constructor.
|
75 |
+
|
76 |
+
Returns:
|
77 |
+
(SAM2ImagePredictor): The loaded model.
|
78 |
+
"""
|
79 |
+
from sam2.build_sam import build_sam2_hf
|
80 |
+
|
81 |
+
sam_model = build_sam2_hf(model_id, **kwargs)
|
82 |
+
return cls(sam_model, **kwargs)
|
83 |
+
|
84 |
+
@torch.no_grad()
|
85 |
+
def set_image(
|
86 |
+
self,
|
87 |
+
image: Union[np.ndarray, Image],
|
88 |
+
) -> None:
|
89 |
+
"""
|
90 |
+
Calculates the image embeddings for the provided image, allowing
|
91 |
+
masks to be predicted with the 'predict' method.
|
92 |
+
|
93 |
+
Arguments:
|
94 |
+
image (np.ndarray or PIL Image): The input image to embed in RGB format. The image should be in HWC format if np.ndarray, or WHC format if PIL Image
|
95 |
+
with pixel values in [0, 255].
|
96 |
+
image_format (str): The color format of the image, in ['RGB', 'BGR'].
|
97 |
+
"""
|
98 |
+
self.reset_predictor()
|
99 |
+
# Transform the image to the form expected by the model
|
100 |
+
if isinstance(image, np.ndarray):
|
101 |
+
logging.info("For numpy array image, we assume (HxWxC) format")
|
102 |
+
self._orig_hw = [image.shape[:2]]
|
103 |
+
elif isinstance(image, Image):
|
104 |
+
w, h = image.size
|
105 |
+
self._orig_hw = [(h, w)]
|
106 |
+
else:
|
107 |
+
raise NotImplementedError("Image format not supported")
|
108 |
+
|
109 |
+
input_image = self._transforms(image)
|
110 |
+
input_image = input_image[None, ...].to(self.device)
|
111 |
+
|
112 |
+
assert (
|
113 |
+
len(input_image.shape) == 4 and input_image.shape[1] == 3
|
114 |
+
), f"input_image must be of size 1x3xHxW, got {input_image.shape}"
|
115 |
+
logging.info("Computing image embeddings for the provided image...")
|
116 |
+
backbone_out = self.model.forward_image(input_image)
|
117 |
+
_, vision_feats, _, _ = self.model._prepare_backbone_features(backbone_out)
|
118 |
+
# Add no_mem_embed, which is added to the lowest rest feat. map during training on videos
|
119 |
+
if self.model.directly_add_no_mem_embed:
|
120 |
+
vision_feats[-1] = vision_feats[-1] + self.model.no_mem_embed
|
121 |
+
|
122 |
+
feats = [
|
123 |
+
feat.permute(1, 2, 0).view(1, -1, *feat_size)
|
124 |
+
for feat, feat_size in zip(vision_feats[::-1], self._bb_feat_sizes[::-1])
|
125 |
+
][::-1]
|
126 |
+
self._features = {"image_embed": feats[-1], "high_res_feats": feats[:-1]}
|
127 |
+
self._is_image_set = True
|
128 |
+
logging.info("Image embeddings computed.")
|
129 |
+
|
130 |
+
@torch.no_grad()
|
131 |
+
def set_image_batch(
|
132 |
+
self,
|
133 |
+
image_list: List[Union[np.ndarray]],
|
134 |
+
) -> None:
|
135 |
+
"""
|
136 |
+
Calculates the image embeddings for the provided image batch, allowing
|
137 |
+
masks to be predicted with the 'predict_batch' method.
|
138 |
+
|
139 |
+
Arguments:
|
140 |
+
image_list (List[np.ndarray]): The input images to embed in RGB format. The image should be in HWC format if np.ndarray
|
141 |
+
with pixel values in [0, 255].
|
142 |
+
"""
|
143 |
+
self.reset_predictor()
|
144 |
+
assert isinstance(image_list, list)
|
145 |
+
self._orig_hw = []
|
146 |
+
for image in image_list:
|
147 |
+
assert isinstance(
|
148 |
+
image, np.ndarray
|
149 |
+
), "Images are expected to be an np.ndarray in RGB format, and of shape HWC"
|
150 |
+
self._orig_hw.append(image.shape[:2])
|
151 |
+
# Transform the image to the form expected by the model
|
152 |
+
img_batch = self._transforms.forward_batch(image_list)
|
153 |
+
img_batch = img_batch.to(self.device)
|
154 |
+
batch_size = img_batch.shape[0]
|
155 |
+
assert (
|
156 |
+
len(img_batch.shape) == 4 and img_batch.shape[1] == 3
|
157 |
+
), f"img_batch must be of size Bx3xHxW, got {img_batch.shape}"
|
158 |
+
logging.info("Computing image embeddings for the provided images...")
|
159 |
+
backbone_out = self.model.forward_image(img_batch)
|
160 |
+
_, vision_feats, _, _ = self.model._prepare_backbone_features(backbone_out)
|
161 |
+
# Add no_mem_embed, which is added to the lowest rest feat. map during training on videos
|
162 |
+
if self.model.directly_add_no_mem_embed:
|
163 |
+
vision_feats[-1] = vision_feats[-1] + self.model.no_mem_embed
|
164 |
+
|
165 |
+
feats = [
|
166 |
+
feat.permute(1, 2, 0).view(batch_size, -1, *feat_size)
|
167 |
+
for feat, feat_size in zip(vision_feats[::-1], self._bb_feat_sizes[::-1])
|
168 |
+
][::-1]
|
169 |
+
self._features = {"image_embed": feats[-1], "high_res_feats": feats[:-1]}
|
170 |
+
self._is_image_set = True
|
171 |
+
self._is_batch = True
|
172 |
+
logging.info("Image embeddings computed.")
|
173 |
+
|
174 |
+
def predict_batch(
|
175 |
+
self,
|
176 |
+
point_coords_batch: List[np.ndarray] = None,
|
177 |
+
point_labels_batch: List[np.ndarray] = None,
|
178 |
+
box_batch: List[np.ndarray] = None,
|
179 |
+
mask_input_batch: List[np.ndarray] = None,
|
180 |
+
multimask_output: bool = True,
|
181 |
+
return_logits: bool = False,
|
182 |
+
normalize_coords=True,
|
183 |
+
) -> Tuple[List[np.ndarray], List[np.ndarray], List[np.ndarray]]:
|
184 |
+
"""This function is very similar to predict(...), however it is used for batched mode, when the model is expected to generate predictions on multiple images.
|
185 |
+
It returns a tuple of lists of masks, ious, and low_res_masks_logits.
|
186 |
+
"""
|
187 |
+
assert self._is_batch, "This function should only be used when in batched mode"
|
188 |
+
if not self._is_image_set:
|
189 |
+
raise RuntimeError(
|
190 |
+
"An image must be set with .set_image_batch(...) before mask prediction."
|
191 |
+
)
|
192 |
+
num_images = len(self._features["image_embed"])
|
193 |
+
all_masks = []
|
194 |
+
all_ious = []
|
195 |
+
all_low_res_masks = []
|
196 |
+
for img_idx in range(num_images):
|
197 |
+
# Transform input prompts
|
198 |
+
point_coords = (
|
199 |
+
point_coords_batch[img_idx] if point_coords_batch is not None else None
|
200 |
+
)
|
201 |
+
point_labels = (
|
202 |
+
point_labels_batch[img_idx] if point_labels_batch is not None else None
|
203 |
+
)
|
204 |
+
box = box_batch[img_idx] if box_batch is not None else None
|
205 |
+
mask_input = (
|
206 |
+
mask_input_batch[img_idx] if mask_input_batch is not None else None
|
207 |
+
)
|
208 |
+
mask_input, unnorm_coords, labels, unnorm_box = self._prep_prompts(
|
209 |
+
point_coords,
|
210 |
+
point_labels,
|
211 |
+
box,
|
212 |
+
mask_input,
|
213 |
+
normalize_coords,
|
214 |
+
img_idx=img_idx,
|
215 |
+
)
|
216 |
+
masks, iou_predictions, low_res_masks = self._predict(
|
217 |
+
unnorm_coords,
|
218 |
+
labels,
|
219 |
+
unnorm_box,
|
220 |
+
mask_input,
|
221 |
+
multimask_output,
|
222 |
+
return_logits=return_logits,
|
223 |
+
img_idx=img_idx,
|
224 |
+
)
|
225 |
+
masks_np = masks.squeeze(0).float().detach().cpu().numpy()
|
226 |
+
iou_predictions_np = (
|
227 |
+
iou_predictions.squeeze(0).float().detach().cpu().numpy()
|
228 |
+
)
|
229 |
+
low_res_masks_np = low_res_masks.squeeze(0).float().detach().cpu().numpy()
|
230 |
+
all_masks.append(masks_np)
|
231 |
+
all_ious.append(iou_predictions_np)
|
232 |
+
all_low_res_masks.append(low_res_masks_np)
|
233 |
+
|
234 |
+
return all_masks, all_ious, all_low_res_masks
|
235 |
+
|
236 |
+
def predict(
|
237 |
+
self,
|
238 |
+
point_coords: Optional[np.ndarray] = None,
|
239 |
+
point_labels: Optional[np.ndarray] = None,
|
240 |
+
box: Optional[np.ndarray] = None,
|
241 |
+
mask_input: Optional[np.ndarray] = None,
|
242 |
+
multimask_output: bool = True,
|
243 |
+
return_logits: bool = False,
|
244 |
+
normalize_coords=True,
|
245 |
+
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
|
246 |
+
"""
|
247 |
+
Predict masks for the given input prompts, using the currently set image.
|
248 |
+
|
249 |
+
Arguments:
|
250 |
+
point_coords (np.ndarray or None): A Nx2 array of point prompts to the
|
251 |
+
model. Each point is in (X,Y) in pixels.
|
252 |
+
point_labels (np.ndarray or None): A length N array of labels for the
|
253 |
+
point prompts. 1 indicates a foreground point and 0 indicates a
|
254 |
+
background point.
|
255 |
+
box (np.ndarray or None): A length 4 array given a box prompt to the
|
256 |
+
model, in XYXY format.
|
257 |
+
mask_input (np.ndarray): A low resolution mask input to the model, typically
|
258 |
+
coming from a previous prediction iteration. Has form 1xHxW, where
|
259 |
+
for SAM, H=W=256.
|
260 |
+
multimask_output (bool): If true, the model will return three masks.
|
261 |
+
For ambiguous input prompts (such as a single click), this will often
|
262 |
+
produce better masks than a single prediction. If only a single
|
263 |
+
mask is needed, the model's predicted quality score can be used
|
264 |
+
to select the best mask. For non-ambiguous prompts, such as multiple
|
265 |
+
input prompts, multimask_output=False can give better results.
|
266 |
+
return_logits (bool): If true, returns un-thresholded masks logits
|
267 |
+
instead of a binary mask.
|
268 |
+
normalize_coords (bool): If true, the point coordinates will be normalized to the range [0,1] and point_coords is expected to be wrt. image dimensions.
|
269 |
+
|
270 |
+
Returns:
|
271 |
+
(np.ndarray): The output masks in CxHxW format, where C is the
|
272 |
+
number of masks, and (H, W) is the original image size.
|
273 |
+
(np.ndarray): An array of length C containing the model's
|
274 |
+
predictions for the quality of each mask.
|
275 |
+
(np.ndarray): An array of shape CxHxW, where C is the number
|
276 |
+
of masks and H=W=256. These low resolution logits can be passed to
|
277 |
+
a subsequent iteration as mask input.
|
278 |
+
"""
|
279 |
+
if not self._is_image_set:
|
280 |
+
raise RuntimeError(
|
281 |
+
"An image must be set with .set_image(...) before mask prediction."
|
282 |
+
)
|
283 |
+
|
284 |
+
# Transform input prompts
|
285 |
+
|
286 |
+
mask_input, unnorm_coords, labels, unnorm_box = self._prep_prompts(
|
287 |
+
point_coords, point_labels, box, mask_input, normalize_coords
|
288 |
+
)
|
289 |
+
|
290 |
+
masks, iou_predictions, low_res_masks = self._predict(
|
291 |
+
unnorm_coords,
|
292 |
+
labels,
|
293 |
+
unnorm_box,
|
294 |
+
mask_input,
|
295 |
+
multimask_output,
|
296 |
+
return_logits=return_logits,
|
297 |
+
)
|
298 |
+
|
299 |
+
masks_np = masks.squeeze(0).float().detach().cpu().numpy()
|
300 |
+
iou_predictions_np = iou_predictions.squeeze(0).float().detach().cpu().numpy()
|
301 |
+
low_res_masks_np = low_res_masks.squeeze(0).float().detach().cpu().numpy()
|
302 |
+
return masks_np, iou_predictions_np, low_res_masks_np
|
303 |
+
|
304 |
+
def _prep_prompts(
|
305 |
+
self, point_coords, point_labels, box, mask_logits, normalize_coords, img_idx=-1
|
306 |
+
):
|
307 |
+
|
308 |
+
unnorm_coords, labels, unnorm_box, mask_input = None, None, None, None
|
309 |
+
if point_coords is not None:
|
310 |
+
assert (
|
311 |
+
point_labels is not None
|
312 |
+
), "point_labels must be supplied if point_coords is supplied."
|
313 |
+
point_coords = torch.as_tensor(
|
314 |
+
point_coords, dtype=torch.float, device=self.device
|
315 |
+
)
|
316 |
+
unnorm_coords = self._transforms.transform_coords(
|
317 |
+
point_coords, normalize=normalize_coords, orig_hw=self._orig_hw[img_idx]
|
318 |
+
)
|
319 |
+
labels = torch.as_tensor(point_labels, dtype=torch.int, device=self.device)
|
320 |
+
if len(unnorm_coords.shape) == 2:
|
321 |
+
unnorm_coords, labels = unnorm_coords[None, ...], labels[None, ...]
|
322 |
+
if box is not None:
|
323 |
+
box = torch.as_tensor(box, dtype=torch.float, device=self.device)
|
324 |
+
unnorm_box = self._transforms.transform_boxes(
|
325 |
+
box, normalize=normalize_coords, orig_hw=self._orig_hw[img_idx]
|
326 |
+
) # Bx2x2
|
327 |
+
if mask_logits is not None:
|
328 |
+
mask_input = torch.as_tensor(
|
329 |
+
mask_logits, dtype=torch.float, device=self.device
|
330 |
+
)
|
331 |
+
if len(mask_input.shape) == 3:
|
332 |
+
mask_input = mask_input[None, :, :, :]
|
333 |
+
return mask_input, unnorm_coords, labels, unnorm_box
|
334 |
+
|
335 |
+
@torch.no_grad()
|
336 |
+
def _predict(
|
337 |
+
self,
|
338 |
+
point_coords: Optional[torch.Tensor],
|
339 |
+
point_labels: Optional[torch.Tensor],
|
340 |
+
boxes: Optional[torch.Tensor] = None,
|
341 |
+
mask_input: Optional[torch.Tensor] = None,
|
342 |
+
multimask_output: bool = True,
|
343 |
+
return_logits: bool = False,
|
344 |
+
img_idx: int = -1,
|
345 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
346 |
+
"""
|
347 |
+
Predict masks for the given input prompts, using the currently set image.
|
348 |
+
Input prompts are batched torch tensors and are expected to already be
|
349 |
+
transformed to the input frame using SAM2Transforms.
|
350 |
+
|
351 |
+
Arguments:
|
352 |
+
point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the
|
353 |
+
model. Each point is in (X,Y) in pixels.
|
354 |
+
point_labels (torch.Tensor or None): A BxN array of labels for the
|
355 |
+
point prompts. 1 indicates a foreground point and 0 indicates a
|
356 |
+
background point.
|
357 |
+
boxes (np.ndarray or None): A Bx4 array given a box prompt to the
|
358 |
+
model, in XYXY format.
|
359 |
+
mask_input (np.ndarray): A low resolution mask input to the model, typically
|
360 |
+
coming from a previous prediction iteration. Has form Bx1xHxW, where
|
361 |
+
for SAM, H=W=256. Masks returned by a previous iteration of the
|
362 |
+
predict method do not need further transformation.
|
363 |
+
multimask_output (bool): If true, the model will return three masks.
|
364 |
+
For ambiguous input prompts (such as a single click), this will often
|
365 |
+
produce better masks than a single prediction. If only a single
|
366 |
+
mask is needed, the model's predicted quality score can be used
|
367 |
+
to select the best mask. For non-ambiguous prompts, such as multiple
|
368 |
+
input prompts, multimask_output=False can give better results.
|
369 |
+
return_logits (bool): If true, returns un-thresholded masks logits
|
370 |
+
instead of a binary mask.
|
371 |
+
|
372 |
+
Returns:
|
373 |
+
(torch.Tensor): The output masks in BxCxHxW format, where C is the
|
374 |
+
number of masks, and (H, W) is the original image size.
|
375 |
+
(torch.Tensor): An array of shape BxC containing the model's
|
376 |
+
predictions for the quality of each mask.
|
377 |
+
(torch.Tensor): An array of shape BxCxHxW, where C is the number
|
378 |
+
of masks and H=W=256. These low res logits can be passed to
|
379 |
+
a subsequent iteration as mask input.
|
380 |
+
"""
|
381 |
+
if not self._is_image_set:
|
382 |
+
raise RuntimeError(
|
383 |
+
"An image must be set with .set_image(...) before mask prediction."
|
384 |
+
)
|
385 |
+
|
386 |
+
if point_coords is not None:
|
387 |
+
concat_points = (point_coords, point_labels)
|
388 |
+
else:
|
389 |
+
concat_points = None
|
390 |
+
|
391 |
+
# Embed prompts
|
392 |
+
if boxes is not None:
|
393 |
+
box_coords = boxes.reshape(-1, 2, 2)
|
394 |
+
box_labels = torch.tensor([[2, 3]], dtype=torch.int, device=boxes.device)
|
395 |
+
box_labels = box_labels.repeat(boxes.size(0), 1)
|
396 |
+
# we merge "boxes" and "points" into a single "concat_points" input (where
|
397 |
+
# boxes are added at the beginning) to sam_prompt_encoder
|
398 |
+
if concat_points is not None:
|
399 |
+
concat_coords = torch.cat([box_coords, concat_points[0]], dim=1)
|
400 |
+
concat_labels = torch.cat([box_labels, concat_points[1]], dim=1)
|
401 |
+
concat_points = (concat_coords, concat_labels)
|
402 |
+
else:
|
403 |
+
concat_points = (box_coords, box_labels)
|
404 |
+
|
405 |
+
sparse_embeddings, dense_embeddings = self.model.sam_prompt_encoder(
|
406 |
+
points=concat_points,
|
407 |
+
boxes=None,
|
408 |
+
masks=mask_input,
|
409 |
+
)
|
410 |
+
|
411 |
+
# Predict masks
|
412 |
+
batched_mode = (
|
413 |
+
concat_points is not None and concat_points[0].shape[0] > 1
|
414 |
+
) # multi object prediction
|
415 |
+
high_res_features = [
|
416 |
+
feat_level[img_idx].unsqueeze(0)
|
417 |
+
for feat_level in self._features["high_res_feats"]
|
418 |
+
]
|
419 |
+
low_res_masks, iou_predictions, _, _ = self.model.sam_mask_decoder(
|
420 |
+
image_embeddings=self._features["image_embed"][img_idx].unsqueeze(0),
|
421 |
+
image_pe=self.model.sam_prompt_encoder.get_dense_pe(),
|
422 |
+
sparse_prompt_embeddings=sparse_embeddings,
|
423 |
+
dense_prompt_embeddings=dense_embeddings,
|
424 |
+
multimask_output=multimask_output,
|
425 |
+
repeat_image=batched_mode,
|
426 |
+
high_res_features=high_res_features,
|
427 |
+
)
|
428 |
+
|
429 |
+
# Upscale the masks to the original image resolution
|
430 |
+
masks = self._transforms.postprocess_masks(
|
431 |
+
low_res_masks, self._orig_hw[img_idx]
|
432 |
+
)
|
433 |
+
low_res_masks = torch.clamp(low_res_masks, -32.0, 32.0)
|
434 |
+
if not return_logits:
|
435 |
+
masks = masks > self.mask_threshold
|
436 |
+
|
437 |
+
return masks, iou_predictions, low_res_masks
|
438 |
+
|
439 |
+
def get_image_embedding(self) -> torch.Tensor:
|
440 |
+
"""
|
441 |
+
Returns the image embeddings for the currently set image, with
|
442 |
+
shape 1xCxHxW, where C is the embedding dimension and (H,W) are
|
443 |
+
the embedding spatial dimension of SAM (typically C=256, H=W=64).
|
444 |
+
"""
|
445 |
+
if not self._is_image_set:
|
446 |
+
raise RuntimeError(
|
447 |
+
"An image must be set with .set_image(...) to generate an embedding."
|
448 |
+
)
|
449 |
+
assert (
|
450 |
+
self._features is not None
|
451 |
+
), "Features must exist if an image has been set."
|
452 |
+
return self._features["image_embed"]
|
453 |
+
|
454 |
+
@property
|
455 |
+
def device(self) -> torch.device:
|
456 |
+
return self.model.device
|
457 |
+
|
458 |
+
def reset_predictor(self) -> None:
|
459 |
+
"""
|
460 |
+
Resets the image embeddings and other state variables.
|
461 |
+
"""
|
462 |
+
self._is_image_set = False
|
463 |
+
self._features = None
|
464 |
+
self._orig_hw = None
|
465 |
+
self._is_batch = False
|
eval/grounded_sam/sam2/sam2_video_predictor.py
ADDED
@@ -0,0 +1,1172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
import warnings
|
8 |
+
from collections import OrderedDict
|
9 |
+
|
10 |
+
import torch
|
11 |
+
|
12 |
+
from tqdm import tqdm
|
13 |
+
|
14 |
+
from sam2.modeling.sam2_base import NO_OBJ_SCORE, SAM2Base
|
15 |
+
from sam2.utils.misc import concat_points, fill_holes_in_mask_scores, load_video_frames
|
16 |
+
|
17 |
+
|
18 |
+
class SAM2VideoPredictor(SAM2Base):
|
19 |
+
"""The predictor class to handle user interactions and manage inference states."""
|
20 |
+
|
21 |
+
def __init__(
|
22 |
+
self,
|
23 |
+
fill_hole_area=0,
|
24 |
+
# whether to apply non-overlapping constraints on the output object masks
|
25 |
+
non_overlap_masks=False,
|
26 |
+
# whether to clear non-conditioning memory of the surrounding frames (which may contain outdated information) after adding correction clicks;
|
27 |
+
# note that this would only apply to *single-object tracking* unless `clear_non_cond_mem_for_multi_obj` is also set to True)
|
28 |
+
clear_non_cond_mem_around_input=False,
|
29 |
+
# whether to also clear non-conditioning memory of the surrounding frames (only effective when `clear_non_cond_mem_around_input` is True).
|
30 |
+
clear_non_cond_mem_for_multi_obj=False,
|
31 |
+
# if `add_all_frames_to_correct_as_cond` is True, we also append to the conditioning frame list any frame that receives a later correction click
|
32 |
+
# if `add_all_frames_to_correct_as_cond` is False, we conditioning frame list to only use those initial conditioning frames
|
33 |
+
add_all_frames_to_correct_as_cond=False,
|
34 |
+
**kwargs,
|
35 |
+
):
|
36 |
+
super().__init__(**kwargs)
|
37 |
+
self.fill_hole_area = fill_hole_area
|
38 |
+
self.non_overlap_masks = non_overlap_masks
|
39 |
+
self.clear_non_cond_mem_around_input = clear_non_cond_mem_around_input
|
40 |
+
self.clear_non_cond_mem_for_multi_obj = clear_non_cond_mem_for_multi_obj
|
41 |
+
self.add_all_frames_to_correct_as_cond = add_all_frames_to_correct_as_cond
|
42 |
+
|
43 |
+
@torch.inference_mode()
|
44 |
+
def init_state(
|
45 |
+
self,
|
46 |
+
video_path,
|
47 |
+
offload_video_to_cpu=False,
|
48 |
+
offload_state_to_cpu=False,
|
49 |
+
async_loading_frames=False,
|
50 |
+
):
|
51 |
+
"""Initialize an inference state."""
|
52 |
+
compute_device = self.device # device of the model
|
53 |
+
images, video_height, video_width = load_video_frames(
|
54 |
+
video_path=video_path,
|
55 |
+
image_size=self.image_size,
|
56 |
+
offload_video_to_cpu=offload_video_to_cpu,
|
57 |
+
async_loading_frames=async_loading_frames,
|
58 |
+
compute_device=compute_device,
|
59 |
+
)
|
60 |
+
inference_state = {}
|
61 |
+
inference_state["images"] = images
|
62 |
+
inference_state["num_frames"] = len(images)
|
63 |
+
# whether to offload the video frames to CPU memory
|
64 |
+
# turning on this option saves the GPU memory with only a very small overhead
|
65 |
+
inference_state["offload_video_to_cpu"] = offload_video_to_cpu
|
66 |
+
# whether to offload the inference state to CPU memory
|
67 |
+
# turning on this option saves the GPU memory at the cost of a lower tracking fps
|
68 |
+
# (e.g. in a test case of 768x768 model, fps dropped from 27 to 24 when tracking one object
|
69 |
+
# and from 24 to 21 when tracking two objects)
|
70 |
+
inference_state["offload_state_to_cpu"] = offload_state_to_cpu
|
71 |
+
# the original video height and width, used for resizing final output scores
|
72 |
+
inference_state["video_height"] = video_height
|
73 |
+
inference_state["video_width"] = video_width
|
74 |
+
inference_state["device"] = compute_device
|
75 |
+
if offload_state_to_cpu:
|
76 |
+
inference_state["storage_device"] = torch.device("cpu")
|
77 |
+
else:
|
78 |
+
inference_state["storage_device"] = compute_device
|
79 |
+
# inputs on each frame
|
80 |
+
inference_state["point_inputs_per_obj"] = {}
|
81 |
+
inference_state["mask_inputs_per_obj"] = {}
|
82 |
+
# visual features on a small number of recently visited frames for quick interactions
|
83 |
+
inference_state["cached_features"] = {}
|
84 |
+
# values that don't change across frames (so we only need to hold one copy of them)
|
85 |
+
inference_state["constants"] = {}
|
86 |
+
# mapping between client-side object id and model-side object index
|
87 |
+
inference_state["obj_id_to_idx"] = OrderedDict()
|
88 |
+
inference_state["obj_idx_to_id"] = OrderedDict()
|
89 |
+
inference_state["obj_ids"] = []
|
90 |
+
# A storage to hold the model's tracking results and states on each frame
|
91 |
+
inference_state["output_dict"] = {
|
92 |
+
"cond_frame_outputs": {}, # dict containing {frame_idx: <out>}
|
93 |
+
"non_cond_frame_outputs": {}, # dict containing {frame_idx: <out>}
|
94 |
+
}
|
95 |
+
# Slice (view) of each object tracking results, sharing the same memory with "output_dict"
|
96 |
+
inference_state["output_dict_per_obj"] = {}
|
97 |
+
# A temporary storage to hold new outputs when user interact with a frame
|
98 |
+
# to add clicks or mask (it's merged into "output_dict" before propagation starts)
|
99 |
+
inference_state["temp_output_dict_per_obj"] = {}
|
100 |
+
# Frames that already holds consolidated outputs from click or mask inputs
|
101 |
+
# (we directly use their consolidated outputs during tracking)
|
102 |
+
inference_state["consolidated_frame_inds"] = {
|
103 |
+
"cond_frame_outputs": set(), # set containing frame indices
|
104 |
+
"non_cond_frame_outputs": set(), # set containing frame indices
|
105 |
+
}
|
106 |
+
# metadata for each tracking frame (e.g. which direction it's tracked)
|
107 |
+
inference_state["tracking_has_started"] = False
|
108 |
+
inference_state["frames_already_tracked"] = {}
|
109 |
+
# Warm up the visual backbone and cache the image feature on frame 0
|
110 |
+
self._get_image_feature(inference_state, frame_idx=0, batch_size=1)
|
111 |
+
return inference_state
|
112 |
+
|
113 |
+
@classmethod
|
114 |
+
def from_pretrained(cls, model_id: str, **kwargs) -> "SAM2VideoPredictor":
|
115 |
+
"""
|
116 |
+
Load a pretrained model from the Hugging Face hub.
|
117 |
+
|
118 |
+
Arguments:
|
119 |
+
model_id (str): The Hugging Face repository ID.
|
120 |
+
**kwargs: Additional arguments to pass to the model constructor.
|
121 |
+
|
122 |
+
Returns:
|
123 |
+
(SAM2VideoPredictor): The loaded model.
|
124 |
+
"""
|
125 |
+
from sam2.build_sam import build_sam2_video_predictor_hf
|
126 |
+
|
127 |
+
sam_model = build_sam2_video_predictor_hf(model_id, **kwargs)
|
128 |
+
return sam_model
|
129 |
+
|
130 |
+
def _obj_id_to_idx(self, inference_state, obj_id):
|
131 |
+
"""Map client-side object id to model-side object index."""
|
132 |
+
obj_idx = inference_state["obj_id_to_idx"].get(obj_id, None)
|
133 |
+
if obj_idx is not None:
|
134 |
+
return obj_idx
|
135 |
+
|
136 |
+
# This is a new object id not sent to the server before. We only allow adding
|
137 |
+
# new objects *before* the tracking starts.
|
138 |
+
allow_new_object = not inference_state["tracking_has_started"]
|
139 |
+
if allow_new_object:
|
140 |
+
# get the next object slot
|
141 |
+
obj_idx = len(inference_state["obj_id_to_idx"])
|
142 |
+
inference_state["obj_id_to_idx"][obj_id] = obj_idx
|
143 |
+
inference_state["obj_idx_to_id"][obj_idx] = obj_id
|
144 |
+
inference_state["obj_ids"] = list(inference_state["obj_id_to_idx"])
|
145 |
+
# set up input and output structures for this object
|
146 |
+
inference_state["point_inputs_per_obj"][obj_idx] = {}
|
147 |
+
inference_state["mask_inputs_per_obj"][obj_idx] = {}
|
148 |
+
inference_state["output_dict_per_obj"][obj_idx] = {
|
149 |
+
"cond_frame_outputs": {}, # dict containing {frame_idx: <out>}
|
150 |
+
"non_cond_frame_outputs": {}, # dict containing {frame_idx: <out>}
|
151 |
+
}
|
152 |
+
inference_state["temp_output_dict_per_obj"][obj_idx] = {
|
153 |
+
"cond_frame_outputs": {}, # dict containing {frame_idx: <out>}
|
154 |
+
"non_cond_frame_outputs": {}, # dict containing {frame_idx: <out>}
|
155 |
+
}
|
156 |
+
return obj_idx
|
157 |
+
else:
|
158 |
+
raise RuntimeError(
|
159 |
+
f"Cannot add new object id {obj_id} after tracking starts. "
|
160 |
+
f"All existing object ids: {inference_state['obj_ids']}. "
|
161 |
+
f"Please call 'reset_state' to restart from scratch."
|
162 |
+
)
|
163 |
+
|
164 |
+
def _obj_idx_to_id(self, inference_state, obj_idx):
|
165 |
+
"""Map model-side object index to client-side object id."""
|
166 |
+
return inference_state["obj_idx_to_id"][obj_idx]
|
167 |
+
|
168 |
+
def _get_obj_num(self, inference_state):
|
169 |
+
"""Get the total number of unique object ids received so far in this session."""
|
170 |
+
return len(inference_state["obj_idx_to_id"])
|
171 |
+
|
172 |
+
@torch.inference_mode()
|
173 |
+
def add_new_points_or_box(
|
174 |
+
self,
|
175 |
+
inference_state,
|
176 |
+
frame_idx,
|
177 |
+
obj_id,
|
178 |
+
points=None,
|
179 |
+
labels=None,
|
180 |
+
clear_old_points=True,
|
181 |
+
normalize_coords=True,
|
182 |
+
box=None,
|
183 |
+
):
|
184 |
+
"""Add new points to a frame."""
|
185 |
+
obj_idx = self._obj_id_to_idx(inference_state, obj_id)
|
186 |
+
point_inputs_per_frame = inference_state["point_inputs_per_obj"][obj_idx]
|
187 |
+
mask_inputs_per_frame = inference_state["mask_inputs_per_obj"][obj_idx]
|
188 |
+
|
189 |
+
if (points is not None) != (labels is not None):
|
190 |
+
raise ValueError("points and labels must be provided together")
|
191 |
+
if points is None and box is None:
|
192 |
+
raise ValueError("at least one of points or box must be provided as input")
|
193 |
+
|
194 |
+
if points is None:
|
195 |
+
points = torch.zeros(0, 2, dtype=torch.float32)
|
196 |
+
elif not isinstance(points, torch.Tensor):
|
197 |
+
points = torch.tensor(points, dtype=torch.float32)
|
198 |
+
if labels is None:
|
199 |
+
labels = torch.zeros(0, dtype=torch.int32)
|
200 |
+
elif not isinstance(labels, torch.Tensor):
|
201 |
+
labels = torch.tensor(labels, dtype=torch.int32)
|
202 |
+
if points.dim() == 2:
|
203 |
+
points = points.unsqueeze(0) # add batch dimension
|
204 |
+
if labels.dim() == 1:
|
205 |
+
labels = labels.unsqueeze(0) # add batch dimension
|
206 |
+
|
207 |
+
# If `box` is provided, we add it as the first two points with labels 2 and 3
|
208 |
+
# along with the user-provided points (consistent with how SAM 2 is trained).
|
209 |
+
if box is not None:
|
210 |
+
if not clear_old_points:
|
211 |
+
raise ValueError(
|
212 |
+
"cannot add box without clearing old points, since "
|
213 |
+
"box prompt must be provided before any point prompt "
|
214 |
+
"(please use clear_old_points=True instead)"
|
215 |
+
)
|
216 |
+
if inference_state["tracking_has_started"]:
|
217 |
+
warnings.warn(
|
218 |
+
"You are adding a box after tracking starts. SAM 2 may not always be "
|
219 |
+
"able to incorporate a box prompt for *refinement*. If you intend to "
|
220 |
+
"use box prompt as an *initial* input before tracking, please call "
|
221 |
+
"'reset_state' on the inference state to restart from scratch.",
|
222 |
+
category=UserWarning,
|
223 |
+
stacklevel=2,
|
224 |
+
)
|
225 |
+
if not isinstance(box, torch.Tensor):
|
226 |
+
box = torch.tensor(box, dtype=torch.float32, device=points.device)
|
227 |
+
box_coords = box.reshape(1, 2, 2)
|
228 |
+
box_labels = torch.tensor([2, 3], dtype=torch.int32, device=labels.device)
|
229 |
+
box_labels = box_labels.reshape(1, 2)
|
230 |
+
points = torch.cat([box_coords, points], dim=1)
|
231 |
+
labels = torch.cat([box_labels, labels], dim=1)
|
232 |
+
|
233 |
+
if normalize_coords:
|
234 |
+
video_H = inference_state["video_height"]
|
235 |
+
video_W = inference_state["video_width"]
|
236 |
+
points = points / torch.tensor([video_W, video_H]).to(points.device)
|
237 |
+
# scale the (normalized) coordinates by the model's internal image size
|
238 |
+
points = points * self.image_size
|
239 |
+
points = points.to(inference_state["device"])
|
240 |
+
labels = labels.to(inference_state["device"])
|
241 |
+
|
242 |
+
if not clear_old_points:
|
243 |
+
point_inputs = point_inputs_per_frame.get(frame_idx, None)
|
244 |
+
else:
|
245 |
+
point_inputs = None
|
246 |
+
point_inputs = concat_points(point_inputs, points, labels)
|
247 |
+
|
248 |
+
point_inputs_per_frame[frame_idx] = point_inputs
|
249 |
+
mask_inputs_per_frame.pop(frame_idx, None)
|
250 |
+
# If this frame hasn't been tracked before, we treat it as an initial conditioning
|
251 |
+
# frame, meaning that the inputs points are to generate segments on this frame without
|
252 |
+
# using any memory from other frames, like in SAM. Otherwise (if it has been tracked),
|
253 |
+
# the input points will be used to correct the already tracked masks.
|
254 |
+
is_init_cond_frame = frame_idx not in inference_state["frames_already_tracked"]
|
255 |
+
# whether to track in reverse time order
|
256 |
+
if is_init_cond_frame:
|
257 |
+
reverse = False
|
258 |
+
else:
|
259 |
+
reverse = inference_state["frames_already_tracked"][frame_idx]["reverse"]
|
260 |
+
obj_output_dict = inference_state["output_dict_per_obj"][obj_idx]
|
261 |
+
obj_temp_output_dict = inference_state["temp_output_dict_per_obj"][obj_idx]
|
262 |
+
# Add a frame to conditioning output if it's an initial conditioning frame or
|
263 |
+
# if the model sees all frames receiving clicks/mask as conditioning frames.
|
264 |
+
is_cond = is_init_cond_frame or self.add_all_frames_to_correct_as_cond
|
265 |
+
storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs"
|
266 |
+
|
267 |
+
# Get any previously predicted mask logits on this object and feed it along with
|
268 |
+
# the new clicks into the SAM mask decoder.
|
269 |
+
prev_sam_mask_logits = None
|
270 |
+
# lookup temporary output dict first, which contains the most recent output
|
271 |
+
# (if not found, then lookup conditioning and non-conditioning frame output)
|
272 |
+
prev_out = obj_temp_output_dict[storage_key].get(frame_idx)
|
273 |
+
if prev_out is None:
|
274 |
+
prev_out = obj_output_dict["cond_frame_outputs"].get(frame_idx)
|
275 |
+
if prev_out is None:
|
276 |
+
prev_out = obj_output_dict["non_cond_frame_outputs"].get(frame_idx)
|
277 |
+
|
278 |
+
if prev_out is not None and prev_out["pred_masks"] is not None:
|
279 |
+
device = inference_state["device"]
|
280 |
+
prev_sam_mask_logits = prev_out["pred_masks"].to(device, non_blocking=True)
|
281 |
+
# Clamp the scale of prev_sam_mask_logits to avoid rare numerical issues.
|
282 |
+
prev_sam_mask_logits = torch.clamp(prev_sam_mask_logits, -32.0, 32.0)
|
283 |
+
current_out, _ = self._run_single_frame_inference(
|
284 |
+
inference_state=inference_state,
|
285 |
+
output_dict=obj_output_dict, # run on the slice of a single object
|
286 |
+
frame_idx=frame_idx,
|
287 |
+
batch_size=1, # run on the slice of a single object
|
288 |
+
is_init_cond_frame=is_init_cond_frame,
|
289 |
+
point_inputs=point_inputs,
|
290 |
+
mask_inputs=None,
|
291 |
+
reverse=reverse,
|
292 |
+
# Skip the memory encoder when adding clicks or mask. We execute the memory encoder
|
293 |
+
# at the beginning of `propagate_in_video` (after user finalize their clicks). This
|
294 |
+
# allows us to enforce non-overlapping constraints on all objects before encoding
|
295 |
+
# them into memory.
|
296 |
+
run_mem_encoder=False,
|
297 |
+
prev_sam_mask_logits=prev_sam_mask_logits,
|
298 |
+
)
|
299 |
+
# Add the output to the output dict (to be used as future memory)
|
300 |
+
obj_temp_output_dict[storage_key][frame_idx] = current_out
|
301 |
+
|
302 |
+
# Resize the output mask to the original video resolution
|
303 |
+
obj_ids = inference_state["obj_ids"]
|
304 |
+
consolidated_out = self._consolidate_temp_output_across_obj(
|
305 |
+
inference_state,
|
306 |
+
frame_idx,
|
307 |
+
is_cond=is_cond,
|
308 |
+
run_mem_encoder=False,
|
309 |
+
consolidate_at_video_res=True,
|
310 |
+
)
|
311 |
+
_, video_res_masks = self._get_orig_video_res_output(
|
312 |
+
inference_state, consolidated_out["pred_masks_video_res"]
|
313 |
+
)
|
314 |
+
return frame_idx, obj_ids, video_res_masks
|
315 |
+
|
316 |
+
def add_new_points(self, *args, **kwargs):
|
317 |
+
"""Deprecated method. Please use `add_new_points_or_box` instead."""
|
318 |
+
return self.add_new_points_or_box(*args, **kwargs)
|
319 |
+
|
320 |
+
@torch.inference_mode()
|
321 |
+
def add_new_mask(
|
322 |
+
self,
|
323 |
+
inference_state,
|
324 |
+
frame_idx,
|
325 |
+
obj_id,
|
326 |
+
mask,
|
327 |
+
):
|
328 |
+
"""Add new mask to a frame."""
|
329 |
+
obj_idx = self._obj_id_to_idx(inference_state, obj_id)
|
330 |
+
point_inputs_per_frame = inference_state["point_inputs_per_obj"][obj_idx]
|
331 |
+
mask_inputs_per_frame = inference_state["mask_inputs_per_obj"][obj_idx]
|
332 |
+
|
333 |
+
if not isinstance(mask, torch.Tensor):
|
334 |
+
mask = torch.tensor(mask, dtype=torch.bool)
|
335 |
+
assert mask.dim() == 2
|
336 |
+
mask_H, mask_W = mask.shape
|
337 |
+
mask_inputs_orig = mask[None, None] # add batch and channel dimension
|
338 |
+
mask_inputs_orig = mask_inputs_orig.float().to(inference_state["device"])
|
339 |
+
|
340 |
+
# resize the mask if it doesn't match the model's image size
|
341 |
+
if mask_H != self.image_size or mask_W != self.image_size:
|
342 |
+
mask_inputs = torch.nn.functional.interpolate(
|
343 |
+
mask_inputs_orig,
|
344 |
+
size=(self.image_size, self.image_size),
|
345 |
+
align_corners=False,
|
346 |
+
mode="bilinear",
|
347 |
+
antialias=True, # use antialias for downsampling
|
348 |
+
)
|
349 |
+
mask_inputs = (mask_inputs >= 0.5).float()
|
350 |
+
else:
|
351 |
+
mask_inputs = mask_inputs_orig
|
352 |
+
|
353 |
+
mask_inputs_per_frame[frame_idx] = mask_inputs
|
354 |
+
point_inputs_per_frame.pop(frame_idx, None)
|
355 |
+
# If this frame hasn't been tracked before, we treat it as an initial conditioning
|
356 |
+
# frame, meaning that the inputs points are to generate segments on this frame without
|
357 |
+
# using any memory from other frames, like in SAM. Otherwise (if it has been tracked),
|
358 |
+
# the input points will be used to correct the already tracked masks.
|
359 |
+
is_init_cond_frame = frame_idx not in inference_state["frames_already_tracked"]
|
360 |
+
# whether to track in reverse time order
|
361 |
+
if is_init_cond_frame:
|
362 |
+
reverse = False
|
363 |
+
else:
|
364 |
+
reverse = inference_state["frames_already_tracked"][frame_idx]["reverse"]
|
365 |
+
obj_output_dict = inference_state["output_dict_per_obj"][obj_idx]
|
366 |
+
obj_temp_output_dict = inference_state["temp_output_dict_per_obj"][obj_idx]
|
367 |
+
# Add a frame to conditioning output if it's an initial conditioning frame or
|
368 |
+
# if the model sees all frames receiving clicks/mask as conditioning frames.
|
369 |
+
is_cond = is_init_cond_frame or self.add_all_frames_to_correct_as_cond
|
370 |
+
storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs"
|
371 |
+
|
372 |
+
current_out, _ = self._run_single_frame_inference(
|
373 |
+
inference_state=inference_state,
|
374 |
+
output_dict=obj_output_dict, # run on the slice of a single object
|
375 |
+
frame_idx=frame_idx,
|
376 |
+
batch_size=1, # run on the slice of a single object
|
377 |
+
is_init_cond_frame=is_init_cond_frame,
|
378 |
+
point_inputs=None,
|
379 |
+
mask_inputs=mask_inputs,
|
380 |
+
reverse=reverse,
|
381 |
+
# Skip the memory encoder when adding clicks or mask. We execute the memory encoder
|
382 |
+
# at the beginning of `propagate_in_video` (after user finalize their clicks). This
|
383 |
+
# allows us to enforce non-overlapping constraints on all objects before encoding
|
384 |
+
# them into memory.
|
385 |
+
run_mem_encoder=False,
|
386 |
+
)
|
387 |
+
# Add the output to the output dict (to be used as future memory)
|
388 |
+
obj_temp_output_dict[storage_key][frame_idx] = current_out
|
389 |
+
|
390 |
+
# Resize the output mask to the original video resolution
|
391 |
+
obj_ids = inference_state["obj_ids"]
|
392 |
+
consolidated_out = self._consolidate_temp_output_across_obj(
|
393 |
+
inference_state,
|
394 |
+
frame_idx,
|
395 |
+
is_cond=is_cond,
|
396 |
+
run_mem_encoder=False,
|
397 |
+
consolidate_at_video_res=True,
|
398 |
+
)
|
399 |
+
_, video_res_masks = self._get_orig_video_res_output(
|
400 |
+
inference_state, consolidated_out["pred_masks_video_res"]
|
401 |
+
)
|
402 |
+
return frame_idx, obj_ids, video_res_masks
|
403 |
+
|
404 |
+
def _get_orig_video_res_output(self, inference_state, any_res_masks):
|
405 |
+
"""
|
406 |
+
Resize the object scores to the original video resolution (video_res_masks)
|
407 |
+
and apply non-overlapping constraints for final output.
|
408 |
+
"""
|
409 |
+
device = inference_state["device"]
|
410 |
+
video_H = inference_state["video_height"]
|
411 |
+
video_W = inference_state["video_width"]
|
412 |
+
any_res_masks = any_res_masks.to(device, non_blocking=True)
|
413 |
+
if any_res_masks.shape[-2:] == (video_H, video_W):
|
414 |
+
video_res_masks = any_res_masks
|
415 |
+
else:
|
416 |
+
video_res_masks = torch.nn.functional.interpolate(
|
417 |
+
any_res_masks,
|
418 |
+
size=(video_H, video_W),
|
419 |
+
mode="bilinear",
|
420 |
+
align_corners=False,
|
421 |
+
)
|
422 |
+
if self.non_overlap_masks:
|
423 |
+
video_res_masks = self._apply_non_overlapping_constraints(video_res_masks)
|
424 |
+
return any_res_masks, video_res_masks
|
425 |
+
|
426 |
+
def _consolidate_temp_output_across_obj(
|
427 |
+
self,
|
428 |
+
inference_state,
|
429 |
+
frame_idx,
|
430 |
+
is_cond,
|
431 |
+
run_mem_encoder,
|
432 |
+
consolidate_at_video_res=False,
|
433 |
+
):
|
434 |
+
"""
|
435 |
+
Consolidate the per-object temporary outputs in `temp_output_dict_per_obj` on
|
436 |
+
a frame into a single output for all objects, including
|
437 |
+
1) fill any missing objects either from `output_dict_per_obj` (if they exist in
|
438 |
+
`output_dict_per_obj` for this frame) or leave them as placeholder values
|
439 |
+
(if they don't exist in `output_dict_per_obj` for this frame);
|
440 |
+
2) if specified, rerun memory encoder after apply non-overlapping constraints
|
441 |
+
on the object scores.
|
442 |
+
"""
|
443 |
+
batch_size = self._get_obj_num(inference_state)
|
444 |
+
storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs"
|
445 |
+
# Optionally, we allow consolidating the temporary outputs at the original
|
446 |
+
# video resolution (to provide a better editing experience for mask prompts).
|
447 |
+
if consolidate_at_video_res:
|
448 |
+
assert not run_mem_encoder, "memory encoder cannot run at video resolution"
|
449 |
+
consolidated_H = inference_state["video_height"]
|
450 |
+
consolidated_W = inference_state["video_width"]
|
451 |
+
consolidated_mask_key = "pred_masks_video_res"
|
452 |
+
else:
|
453 |
+
consolidated_H = consolidated_W = self.image_size // 4
|
454 |
+
consolidated_mask_key = "pred_masks"
|
455 |
+
|
456 |
+
# Initialize `consolidated_out`. Its "maskmem_features" and "maskmem_pos_enc"
|
457 |
+
# will be added when rerunning the memory encoder after applying non-overlapping
|
458 |
+
# constraints to object scores. Its "pred_masks" are prefilled with a large
|
459 |
+
# negative value (NO_OBJ_SCORE) to represent missing objects.
|
460 |
+
consolidated_out = {
|
461 |
+
"maskmem_features": None,
|
462 |
+
"maskmem_pos_enc": None,
|
463 |
+
consolidated_mask_key: torch.full(
|
464 |
+
size=(batch_size, 1, consolidated_H, consolidated_W),
|
465 |
+
fill_value=NO_OBJ_SCORE,
|
466 |
+
dtype=torch.float32,
|
467 |
+
device=inference_state["storage_device"],
|
468 |
+
),
|
469 |
+
"obj_ptr": torch.full(
|
470 |
+
size=(batch_size, self.hidden_dim),
|
471 |
+
fill_value=NO_OBJ_SCORE,
|
472 |
+
dtype=torch.float32,
|
473 |
+
device=inference_state["device"],
|
474 |
+
),
|
475 |
+
"object_score_logits": torch.full(
|
476 |
+
size=(batch_size, 1),
|
477 |
+
# default to 10.0 for object_score_logits, i.e. assuming the object is
|
478 |
+
# present as sigmoid(10)=1, same as in `predict_masks` of `MaskDecoder`
|
479 |
+
fill_value=10.0,
|
480 |
+
dtype=torch.float32,
|
481 |
+
device=inference_state["device"],
|
482 |
+
),
|
483 |
+
}
|
484 |
+
empty_mask_ptr = None
|
485 |
+
for obj_idx in range(batch_size):
|
486 |
+
obj_temp_output_dict = inference_state["temp_output_dict_per_obj"][obj_idx]
|
487 |
+
obj_output_dict = inference_state["output_dict_per_obj"][obj_idx]
|
488 |
+
out = obj_temp_output_dict[storage_key].get(frame_idx, None)
|
489 |
+
# If the object doesn't appear in "temp_output_dict_per_obj" on this frame,
|
490 |
+
# we fall back and look up its previous output in "output_dict_per_obj".
|
491 |
+
# We look up both "cond_frame_outputs" and "non_cond_frame_outputs" in
|
492 |
+
# "output_dict_per_obj" to find a previous output for this object.
|
493 |
+
if out is None:
|
494 |
+
out = obj_output_dict["cond_frame_outputs"].get(frame_idx, None)
|
495 |
+
if out is None:
|
496 |
+
out = obj_output_dict["non_cond_frame_outputs"].get(frame_idx, None)
|
497 |
+
# If the object doesn't appear in "output_dict_per_obj" either, we skip it
|
498 |
+
# and leave its mask scores to the default scores (i.e. the NO_OBJ_SCORE
|
499 |
+
# placeholder above) and set its object pointer to be a dummy pointer.
|
500 |
+
if out is None:
|
501 |
+
# Fill in dummy object pointers for those objects without any inputs or
|
502 |
+
# tracking outcomes on this frame (only do it under `run_mem_encoder=True`,
|
503 |
+
# i.e. when we need to build the memory for tracking).
|
504 |
+
if run_mem_encoder:
|
505 |
+
if empty_mask_ptr is None:
|
506 |
+
empty_mask_ptr = self._get_empty_mask_ptr(
|
507 |
+
inference_state, frame_idx
|
508 |
+
)
|
509 |
+
# fill object pointer with a dummy pointer (based on an empty mask)
|
510 |
+
consolidated_out["obj_ptr"][obj_idx : obj_idx + 1] = empty_mask_ptr
|
511 |
+
continue
|
512 |
+
# Add the temporary object output mask to consolidated output mask
|
513 |
+
obj_mask = out["pred_masks"]
|
514 |
+
consolidated_pred_masks = consolidated_out[consolidated_mask_key]
|
515 |
+
if obj_mask.shape[-2:] == consolidated_pred_masks.shape[-2:]:
|
516 |
+
consolidated_pred_masks[obj_idx : obj_idx + 1] = obj_mask
|
517 |
+
else:
|
518 |
+
# Resize first if temporary object mask has a different resolution
|
519 |
+
resized_obj_mask = torch.nn.functional.interpolate(
|
520 |
+
obj_mask,
|
521 |
+
size=consolidated_pred_masks.shape[-2:],
|
522 |
+
mode="bilinear",
|
523 |
+
align_corners=False,
|
524 |
+
)
|
525 |
+
consolidated_pred_masks[obj_idx : obj_idx + 1] = resized_obj_mask
|
526 |
+
consolidated_out["obj_ptr"][obj_idx : obj_idx + 1] = out["obj_ptr"]
|
527 |
+
consolidated_out["object_score_logits"][obj_idx : obj_idx + 1] = out[
|
528 |
+
"object_score_logits"
|
529 |
+
]
|
530 |
+
|
531 |
+
# Optionally, apply non-overlapping constraints on the consolidated scores
|
532 |
+
# and rerun the memory encoder
|
533 |
+
if run_mem_encoder:
|
534 |
+
device = inference_state["device"]
|
535 |
+
high_res_masks = torch.nn.functional.interpolate(
|
536 |
+
consolidated_out["pred_masks"].to(device, non_blocking=True),
|
537 |
+
size=(self.image_size, self.image_size),
|
538 |
+
mode="bilinear",
|
539 |
+
align_corners=False,
|
540 |
+
)
|
541 |
+
if self.non_overlap_masks_for_mem_enc:
|
542 |
+
high_res_masks = self._apply_non_overlapping_constraints(high_res_masks)
|
543 |
+
maskmem_features, maskmem_pos_enc = self._run_memory_encoder(
|
544 |
+
inference_state=inference_state,
|
545 |
+
frame_idx=frame_idx,
|
546 |
+
batch_size=batch_size,
|
547 |
+
high_res_masks=high_res_masks,
|
548 |
+
object_score_logits=consolidated_out["object_score_logits"],
|
549 |
+
is_mask_from_pts=True, # these frames are what the user interacted with
|
550 |
+
)
|
551 |
+
consolidated_out["maskmem_features"] = maskmem_features
|
552 |
+
consolidated_out["maskmem_pos_enc"] = maskmem_pos_enc
|
553 |
+
|
554 |
+
return consolidated_out
|
555 |
+
|
556 |
+
def _get_empty_mask_ptr(self, inference_state, frame_idx):
|
557 |
+
"""Get a dummy object pointer based on an empty mask on the current frame."""
|
558 |
+
# A dummy (empty) mask with a single object
|
559 |
+
batch_size = 1
|
560 |
+
mask_inputs = torch.zeros(
|
561 |
+
(batch_size, 1, self.image_size, self.image_size),
|
562 |
+
dtype=torch.float32,
|
563 |
+
device=inference_state["device"],
|
564 |
+
)
|
565 |
+
|
566 |
+
# Retrieve correct image features
|
567 |
+
(
|
568 |
+
_,
|
569 |
+
_,
|
570 |
+
current_vision_feats,
|
571 |
+
current_vision_pos_embeds,
|
572 |
+
feat_sizes,
|
573 |
+
) = self._get_image_feature(inference_state, frame_idx, batch_size)
|
574 |
+
|
575 |
+
# Feed the empty mask and image feature above to get a dummy object pointer
|
576 |
+
current_out = self.track_step(
|
577 |
+
frame_idx=frame_idx,
|
578 |
+
is_init_cond_frame=True,
|
579 |
+
current_vision_feats=current_vision_feats,
|
580 |
+
current_vision_pos_embeds=current_vision_pos_embeds,
|
581 |
+
feat_sizes=feat_sizes,
|
582 |
+
point_inputs=None,
|
583 |
+
mask_inputs=mask_inputs,
|
584 |
+
output_dict={},
|
585 |
+
num_frames=inference_state["num_frames"],
|
586 |
+
track_in_reverse=False,
|
587 |
+
run_mem_encoder=False,
|
588 |
+
prev_sam_mask_logits=None,
|
589 |
+
)
|
590 |
+
return current_out["obj_ptr"]
|
591 |
+
|
592 |
+
@torch.inference_mode()
|
593 |
+
def propagate_in_video_preflight(self, inference_state):
|
594 |
+
"""Prepare inference_state and consolidate temporary outputs before tracking."""
|
595 |
+
# Tracking has started and we don't allow adding new objects until session is reset.
|
596 |
+
inference_state["tracking_has_started"] = True
|
597 |
+
batch_size = self._get_obj_num(inference_state)
|
598 |
+
|
599 |
+
# Consolidate per-object temporary outputs in "temp_output_dict_per_obj" and
|
600 |
+
# add them into "output_dict".
|
601 |
+
temp_output_dict_per_obj = inference_state["temp_output_dict_per_obj"]
|
602 |
+
output_dict = inference_state["output_dict"]
|
603 |
+
# "consolidated_frame_inds" contains indices of those frames where consolidated
|
604 |
+
# temporary outputs have been added (either in this call or any previous calls
|
605 |
+
# to `propagate_in_video_preflight`).
|
606 |
+
consolidated_frame_inds = inference_state["consolidated_frame_inds"]
|
607 |
+
for is_cond in [False, True]:
|
608 |
+
# Separately consolidate conditioning and non-conditioning temp outputs
|
609 |
+
storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs"
|
610 |
+
# Find all the frames that contain temporary outputs for any objects
|
611 |
+
# (these should be the frames that have just received clicks for mask inputs
|
612 |
+
# via `add_new_points_or_box` or `add_new_mask`)
|
613 |
+
temp_frame_inds = set()
|
614 |
+
for obj_temp_output_dict in temp_output_dict_per_obj.values():
|
615 |
+
temp_frame_inds.update(obj_temp_output_dict[storage_key].keys())
|
616 |
+
consolidated_frame_inds[storage_key].update(temp_frame_inds)
|
617 |
+
# consolidate the temporary output across all objects on this frame
|
618 |
+
for frame_idx in temp_frame_inds:
|
619 |
+
consolidated_out = self._consolidate_temp_output_across_obj(
|
620 |
+
inference_state, frame_idx, is_cond=is_cond, run_mem_encoder=True
|
621 |
+
)
|
622 |
+
# merge them into "output_dict" and also create per-object slices
|
623 |
+
output_dict[storage_key][frame_idx] = consolidated_out
|
624 |
+
self._add_output_per_object(
|
625 |
+
inference_state, frame_idx, consolidated_out, storage_key
|
626 |
+
)
|
627 |
+
clear_non_cond_mem = self.clear_non_cond_mem_around_input and (
|
628 |
+
self.clear_non_cond_mem_for_multi_obj or batch_size <= 1
|
629 |
+
)
|
630 |
+
if clear_non_cond_mem:
|
631 |
+
# clear non-conditioning memory of the surrounding frames
|
632 |
+
self._clear_non_cond_mem_around_input(inference_state, frame_idx)
|
633 |
+
|
634 |
+
# clear temporary outputs in `temp_output_dict_per_obj`
|
635 |
+
for obj_temp_output_dict in temp_output_dict_per_obj.values():
|
636 |
+
obj_temp_output_dict[storage_key].clear()
|
637 |
+
|
638 |
+
# edge case: if an output is added to "cond_frame_outputs", we remove any prior
|
639 |
+
# output on the same frame in "non_cond_frame_outputs"
|
640 |
+
for frame_idx in output_dict["cond_frame_outputs"]:
|
641 |
+
output_dict["non_cond_frame_outputs"].pop(frame_idx, None)
|
642 |
+
for obj_output_dict in inference_state["output_dict_per_obj"].values():
|
643 |
+
for frame_idx in obj_output_dict["cond_frame_outputs"]:
|
644 |
+
obj_output_dict["non_cond_frame_outputs"].pop(frame_idx, None)
|
645 |
+
for frame_idx in consolidated_frame_inds["cond_frame_outputs"]:
|
646 |
+
assert frame_idx in output_dict["cond_frame_outputs"]
|
647 |
+
consolidated_frame_inds["non_cond_frame_outputs"].discard(frame_idx)
|
648 |
+
|
649 |
+
# Make sure that the frame indices in "consolidated_frame_inds" are exactly those frames
|
650 |
+
# with either points or mask inputs (which should be true under a correct workflow).
|
651 |
+
all_consolidated_frame_inds = (
|
652 |
+
consolidated_frame_inds["cond_frame_outputs"]
|
653 |
+
| consolidated_frame_inds["non_cond_frame_outputs"]
|
654 |
+
)
|
655 |
+
input_frames_inds = set()
|
656 |
+
for point_inputs_per_frame in inference_state["point_inputs_per_obj"].values():
|
657 |
+
input_frames_inds.update(point_inputs_per_frame.keys())
|
658 |
+
for mask_inputs_per_frame in inference_state["mask_inputs_per_obj"].values():
|
659 |
+
input_frames_inds.update(mask_inputs_per_frame.keys())
|
660 |
+
assert all_consolidated_frame_inds == input_frames_inds
|
661 |
+
|
662 |
+
@torch.inference_mode()
|
663 |
+
def propagate_in_video(
|
664 |
+
self,
|
665 |
+
inference_state,
|
666 |
+
start_frame_idx=None,
|
667 |
+
max_frame_num_to_track=None,
|
668 |
+
reverse=False,
|
669 |
+
):
|
670 |
+
"""Propagate the input points across frames to track in the entire video."""
|
671 |
+
self.propagate_in_video_preflight(inference_state)
|
672 |
+
|
673 |
+
output_dict = inference_state["output_dict"]
|
674 |
+
consolidated_frame_inds = inference_state["consolidated_frame_inds"]
|
675 |
+
obj_ids = inference_state["obj_ids"]
|
676 |
+
num_frames = inference_state["num_frames"]
|
677 |
+
batch_size = self._get_obj_num(inference_state)
|
678 |
+
if len(output_dict["cond_frame_outputs"]) == 0:
|
679 |
+
raise RuntimeError("No points are provided; please add points first")
|
680 |
+
clear_non_cond_mem = self.clear_non_cond_mem_around_input and (
|
681 |
+
self.clear_non_cond_mem_for_multi_obj or batch_size <= 1
|
682 |
+
)
|
683 |
+
|
684 |
+
# set start index, end index, and processing order
|
685 |
+
if start_frame_idx is None:
|
686 |
+
# default: start from the earliest frame with input points
|
687 |
+
start_frame_idx = min(output_dict["cond_frame_outputs"])
|
688 |
+
if max_frame_num_to_track is None:
|
689 |
+
# default: track all the frames in the video
|
690 |
+
max_frame_num_to_track = num_frames
|
691 |
+
if reverse:
|
692 |
+
end_frame_idx = max(start_frame_idx - max_frame_num_to_track, 0)
|
693 |
+
if start_frame_idx > 0:
|
694 |
+
processing_order = range(start_frame_idx, end_frame_idx - 1, -1)
|
695 |
+
else:
|
696 |
+
processing_order = [] # skip reverse tracking if starting from frame 0
|
697 |
+
else:
|
698 |
+
end_frame_idx = min(
|
699 |
+
start_frame_idx + max_frame_num_to_track, num_frames - 1
|
700 |
+
)
|
701 |
+
processing_order = range(start_frame_idx, end_frame_idx + 1)
|
702 |
+
|
703 |
+
for frame_idx in tqdm(processing_order, desc="propagate in video", disable=True):
|
704 |
+
# We skip those frames already in consolidated outputs (these are frames
|
705 |
+
# that received input clicks or mask). Note that we cannot directly run
|
706 |
+
# batched forward on them via `_run_single_frame_inference` because the
|
707 |
+
# number of clicks on each object might be different.
|
708 |
+
if frame_idx in consolidated_frame_inds["cond_frame_outputs"]:
|
709 |
+
storage_key = "cond_frame_outputs"
|
710 |
+
current_out = output_dict[storage_key][frame_idx]
|
711 |
+
pred_masks = current_out["pred_masks"]
|
712 |
+
if clear_non_cond_mem:
|
713 |
+
# clear non-conditioning memory of the surrounding frames
|
714 |
+
self._clear_non_cond_mem_around_input(inference_state, frame_idx)
|
715 |
+
elif frame_idx in consolidated_frame_inds["non_cond_frame_outputs"]:
|
716 |
+
storage_key = "non_cond_frame_outputs"
|
717 |
+
current_out = output_dict[storage_key][frame_idx]
|
718 |
+
pred_masks = current_out["pred_masks"]
|
719 |
+
else:
|
720 |
+
storage_key = "non_cond_frame_outputs"
|
721 |
+
current_out, pred_masks = self._run_single_frame_inference(
|
722 |
+
inference_state=inference_state,
|
723 |
+
output_dict=output_dict,
|
724 |
+
frame_idx=frame_idx,
|
725 |
+
batch_size=batch_size,
|
726 |
+
is_init_cond_frame=False,
|
727 |
+
point_inputs=None,
|
728 |
+
mask_inputs=None,
|
729 |
+
reverse=reverse,
|
730 |
+
run_mem_encoder=True,
|
731 |
+
)
|
732 |
+
output_dict[storage_key][frame_idx] = current_out
|
733 |
+
# Create slices of per-object outputs for subsequent interaction with each
|
734 |
+
# individual object after tracking.
|
735 |
+
self._add_output_per_object(
|
736 |
+
inference_state, frame_idx, current_out, storage_key
|
737 |
+
)
|
738 |
+
inference_state["frames_already_tracked"][frame_idx] = {"reverse": reverse}
|
739 |
+
|
740 |
+
# Resize the output mask to the original video resolution (we directly use
|
741 |
+
# the mask scores on GPU for output to avoid any CPU conversion in between)
|
742 |
+
_, video_res_masks = self._get_orig_video_res_output(
|
743 |
+
inference_state, pred_masks
|
744 |
+
)
|
745 |
+
yield frame_idx, obj_ids, video_res_masks
|
746 |
+
|
747 |
+
def _add_output_per_object(
|
748 |
+
self, inference_state, frame_idx, current_out, storage_key
|
749 |
+
):
|
750 |
+
"""
|
751 |
+
Split a multi-object output into per-object output slices and add them into
|
752 |
+
`output_dict_per_obj`. The resulting slices share the same tensor storage.
|
753 |
+
"""
|
754 |
+
maskmem_features = current_out["maskmem_features"]
|
755 |
+
assert maskmem_features is None or isinstance(maskmem_features, torch.Tensor)
|
756 |
+
|
757 |
+
maskmem_pos_enc = current_out["maskmem_pos_enc"]
|
758 |
+
assert maskmem_pos_enc is None or isinstance(maskmem_pos_enc, list)
|
759 |
+
|
760 |
+
output_dict_per_obj = inference_state["output_dict_per_obj"]
|
761 |
+
for obj_idx, obj_output_dict in output_dict_per_obj.items():
|
762 |
+
obj_slice = slice(obj_idx, obj_idx + 1)
|
763 |
+
obj_out = {
|
764 |
+
"maskmem_features": None,
|
765 |
+
"maskmem_pos_enc": None,
|
766 |
+
"pred_masks": current_out["pred_masks"][obj_slice],
|
767 |
+
"obj_ptr": current_out["obj_ptr"][obj_slice],
|
768 |
+
"object_score_logits": current_out["object_score_logits"][obj_slice],
|
769 |
+
}
|
770 |
+
if maskmem_features is not None:
|
771 |
+
obj_out["maskmem_features"] = maskmem_features[obj_slice]
|
772 |
+
if maskmem_pos_enc is not None:
|
773 |
+
obj_out["maskmem_pos_enc"] = [x[obj_slice] for x in maskmem_pos_enc]
|
774 |
+
obj_output_dict[storage_key][frame_idx] = obj_out
|
775 |
+
|
776 |
+
@torch.inference_mode()
|
777 |
+
def clear_all_prompts_in_frame(
|
778 |
+
self, inference_state, frame_idx, obj_id, need_output=True
|
779 |
+
):
|
780 |
+
"""Remove all input points or mask in a specific frame for a given object."""
|
781 |
+
obj_idx = self._obj_id_to_idx(inference_state, obj_id)
|
782 |
+
|
783 |
+
# Clear the conditioning information on the given frame
|
784 |
+
inference_state["point_inputs_per_obj"][obj_idx].pop(frame_idx, None)
|
785 |
+
inference_state["mask_inputs_per_obj"][obj_idx].pop(frame_idx, None)
|
786 |
+
|
787 |
+
temp_output_dict_per_obj = inference_state["temp_output_dict_per_obj"]
|
788 |
+
temp_output_dict_per_obj[obj_idx]["cond_frame_outputs"].pop(frame_idx, None)
|
789 |
+
temp_output_dict_per_obj[obj_idx]["non_cond_frame_outputs"].pop(frame_idx, None)
|
790 |
+
|
791 |
+
# Check and see if there are still any inputs left on this frame
|
792 |
+
batch_size = self._get_obj_num(inference_state)
|
793 |
+
frame_has_input = False
|
794 |
+
for obj_idx2 in range(batch_size):
|
795 |
+
if frame_idx in inference_state["point_inputs_per_obj"][obj_idx2]:
|
796 |
+
frame_has_input = True
|
797 |
+
break
|
798 |
+
if frame_idx in inference_state["mask_inputs_per_obj"][obj_idx2]:
|
799 |
+
frame_has_input = True
|
800 |
+
break
|
801 |
+
|
802 |
+
# If this frame has no remaining inputs for any objects, we further clear its
|
803 |
+
# conditioning frame status
|
804 |
+
if not frame_has_input:
|
805 |
+
output_dict = inference_state["output_dict"]
|
806 |
+
consolidated_frame_inds = inference_state["consolidated_frame_inds"]
|
807 |
+
consolidated_frame_inds["cond_frame_outputs"].discard(frame_idx)
|
808 |
+
consolidated_frame_inds["non_cond_frame_outputs"].discard(frame_idx)
|
809 |
+
# Remove the frame's conditioning output (possibly downgrading it to non-conditioning)
|
810 |
+
out = output_dict["cond_frame_outputs"].pop(frame_idx, None)
|
811 |
+
if out is not None:
|
812 |
+
# The frame is not a conditioning frame anymore since it's not receiving inputs,
|
813 |
+
# so we "downgrade" its output (if exists) to a non-conditioning frame output.
|
814 |
+
output_dict["non_cond_frame_outputs"][frame_idx] = out
|
815 |
+
inference_state["frames_already_tracked"].pop(frame_idx, None)
|
816 |
+
# Similarly, do it for the sliced output on each object.
|
817 |
+
for obj_idx2 in range(batch_size):
|
818 |
+
obj_output_dict = inference_state["output_dict_per_obj"][obj_idx2]
|
819 |
+
obj_out = obj_output_dict["cond_frame_outputs"].pop(frame_idx, None)
|
820 |
+
if obj_out is not None:
|
821 |
+
obj_output_dict["non_cond_frame_outputs"][frame_idx] = obj_out
|
822 |
+
|
823 |
+
# If all the conditioning frames have been removed, we also clear the tracking outputs
|
824 |
+
if len(output_dict["cond_frame_outputs"]) == 0:
|
825 |
+
self._reset_tracking_results(inference_state)
|
826 |
+
|
827 |
+
if not need_output:
|
828 |
+
return
|
829 |
+
# Finally, output updated masks per object (after removing the inputs above)
|
830 |
+
obj_ids = inference_state["obj_ids"]
|
831 |
+
is_cond = any(
|
832 |
+
frame_idx in obj_temp_output_dict["cond_frame_outputs"]
|
833 |
+
for obj_temp_output_dict in temp_output_dict_per_obj.values()
|
834 |
+
)
|
835 |
+
consolidated_out = self._consolidate_temp_output_across_obj(
|
836 |
+
inference_state,
|
837 |
+
frame_idx,
|
838 |
+
is_cond=is_cond,
|
839 |
+
run_mem_encoder=False,
|
840 |
+
consolidate_at_video_res=True,
|
841 |
+
)
|
842 |
+
_, video_res_masks = self._get_orig_video_res_output(
|
843 |
+
inference_state, consolidated_out["pred_masks_video_res"]
|
844 |
+
)
|
845 |
+
return frame_idx, obj_ids, video_res_masks
|
846 |
+
|
847 |
+
@torch.inference_mode()
|
848 |
+
def reset_state(self, inference_state):
|
849 |
+
"""Remove all input points or mask in all frames throughout the video."""
|
850 |
+
self._reset_tracking_results(inference_state)
|
851 |
+
# Remove all object ids
|
852 |
+
inference_state["obj_id_to_idx"].clear()
|
853 |
+
inference_state["obj_idx_to_id"].clear()
|
854 |
+
inference_state["obj_ids"].clear()
|
855 |
+
inference_state["point_inputs_per_obj"].clear()
|
856 |
+
inference_state["mask_inputs_per_obj"].clear()
|
857 |
+
inference_state["output_dict_per_obj"].clear()
|
858 |
+
inference_state["temp_output_dict_per_obj"].clear()
|
859 |
+
|
860 |
+
def _reset_tracking_results(self, inference_state):
|
861 |
+
"""Reset all tracking inputs and results across the videos."""
|
862 |
+
for v in inference_state["point_inputs_per_obj"].values():
|
863 |
+
v.clear()
|
864 |
+
for v in inference_state["mask_inputs_per_obj"].values():
|
865 |
+
v.clear()
|
866 |
+
for v in inference_state["output_dict_per_obj"].values():
|
867 |
+
v["cond_frame_outputs"].clear()
|
868 |
+
v["non_cond_frame_outputs"].clear()
|
869 |
+
for v in inference_state["temp_output_dict_per_obj"].values():
|
870 |
+
v["cond_frame_outputs"].clear()
|
871 |
+
v["non_cond_frame_outputs"].clear()
|
872 |
+
inference_state["output_dict"]["cond_frame_outputs"].clear()
|
873 |
+
inference_state["output_dict"]["non_cond_frame_outputs"].clear()
|
874 |
+
inference_state["consolidated_frame_inds"]["cond_frame_outputs"].clear()
|
875 |
+
inference_state["consolidated_frame_inds"]["non_cond_frame_outputs"].clear()
|
876 |
+
inference_state["tracking_has_started"] = False
|
877 |
+
inference_state["frames_already_tracked"].clear()
|
878 |
+
|
879 |
+
def _get_image_feature(self, inference_state, frame_idx, batch_size):
|
880 |
+
"""Compute the image features on a given frame."""
|
881 |
+
# Look up in the cache first
|
882 |
+
image, backbone_out = inference_state["cached_features"].get(
|
883 |
+
frame_idx, (None, None)
|
884 |
+
)
|
885 |
+
if backbone_out is None:
|
886 |
+
# Cache miss -- we will run inference on a single image
|
887 |
+
device = inference_state["device"]
|
888 |
+
image = inference_state["images"][frame_idx].to(device).float().unsqueeze(0)
|
889 |
+
backbone_out = self.forward_image(image)
|
890 |
+
# Cache the most recent frame's feature (for repeated interactions with
|
891 |
+
# a frame; we can use an LRU cache for more frames in the future).
|
892 |
+
inference_state["cached_features"] = {frame_idx: (image, backbone_out)}
|
893 |
+
|
894 |
+
# expand the features to have the same dimension as the number of objects
|
895 |
+
expanded_image = image.expand(batch_size, -1, -1, -1)
|
896 |
+
expanded_backbone_out = {
|
897 |
+
"backbone_fpn": backbone_out["backbone_fpn"].copy(),
|
898 |
+
"vision_pos_enc": backbone_out["vision_pos_enc"].copy(),
|
899 |
+
}
|
900 |
+
for i, feat in enumerate(expanded_backbone_out["backbone_fpn"]):
|
901 |
+
expanded_backbone_out["backbone_fpn"][i] = feat.expand(
|
902 |
+
batch_size, -1, -1, -1
|
903 |
+
)
|
904 |
+
for i, pos in enumerate(expanded_backbone_out["vision_pos_enc"]):
|
905 |
+
pos = pos.expand(batch_size, -1, -1, -1)
|
906 |
+
expanded_backbone_out["vision_pos_enc"][i] = pos
|
907 |
+
|
908 |
+
features = self._prepare_backbone_features(expanded_backbone_out)
|
909 |
+
features = (expanded_image,) + features
|
910 |
+
return features
|
911 |
+
|
912 |
+
def _run_single_frame_inference(
|
913 |
+
self,
|
914 |
+
inference_state,
|
915 |
+
output_dict,
|
916 |
+
frame_idx,
|
917 |
+
batch_size,
|
918 |
+
is_init_cond_frame,
|
919 |
+
point_inputs,
|
920 |
+
mask_inputs,
|
921 |
+
reverse,
|
922 |
+
run_mem_encoder,
|
923 |
+
prev_sam_mask_logits=None,
|
924 |
+
):
|
925 |
+
"""Run tracking on a single frame based on current inputs and previous memory."""
|
926 |
+
# Retrieve correct image features
|
927 |
+
(
|
928 |
+
_,
|
929 |
+
_,
|
930 |
+
current_vision_feats,
|
931 |
+
current_vision_pos_embeds,
|
932 |
+
feat_sizes,
|
933 |
+
) = self._get_image_feature(inference_state, frame_idx, batch_size)
|
934 |
+
|
935 |
+
# point and mask should not appear as input simultaneously on the same frame
|
936 |
+
assert point_inputs is None or mask_inputs is None
|
937 |
+
current_out = self.track_step(
|
938 |
+
frame_idx=frame_idx,
|
939 |
+
is_init_cond_frame=is_init_cond_frame,
|
940 |
+
current_vision_feats=current_vision_feats,
|
941 |
+
current_vision_pos_embeds=current_vision_pos_embeds,
|
942 |
+
feat_sizes=feat_sizes,
|
943 |
+
point_inputs=point_inputs,
|
944 |
+
mask_inputs=mask_inputs,
|
945 |
+
output_dict=output_dict,
|
946 |
+
num_frames=inference_state["num_frames"],
|
947 |
+
track_in_reverse=reverse,
|
948 |
+
run_mem_encoder=run_mem_encoder,
|
949 |
+
prev_sam_mask_logits=prev_sam_mask_logits,
|
950 |
+
)
|
951 |
+
|
952 |
+
# optionally offload the output to CPU memory to save GPU space
|
953 |
+
storage_device = inference_state["storage_device"]
|
954 |
+
maskmem_features = current_out["maskmem_features"]
|
955 |
+
if maskmem_features is not None:
|
956 |
+
maskmem_features = maskmem_features.to(torch.bfloat16)
|
957 |
+
maskmem_features = maskmem_features.to(storage_device, non_blocking=True)
|
958 |
+
pred_masks_gpu = current_out["pred_masks"]
|
959 |
+
# potentially fill holes in the predicted masks
|
960 |
+
if self.fill_hole_area > 0:
|
961 |
+
pred_masks_gpu = fill_holes_in_mask_scores(
|
962 |
+
pred_masks_gpu, self.fill_hole_area
|
963 |
+
)
|
964 |
+
pred_masks = pred_masks_gpu.to(storage_device, non_blocking=True)
|
965 |
+
# "maskmem_pos_enc" is the same across frames, so we only need to store one copy of it
|
966 |
+
maskmem_pos_enc = self._get_maskmem_pos_enc(inference_state, current_out)
|
967 |
+
# object pointer is a small tensor, so we always keep it on GPU memory for fast access
|
968 |
+
obj_ptr = current_out["obj_ptr"]
|
969 |
+
object_score_logits = current_out["object_score_logits"]
|
970 |
+
# make a compact version of this frame's output to reduce the state size
|
971 |
+
compact_current_out = {
|
972 |
+
"maskmem_features": maskmem_features,
|
973 |
+
"maskmem_pos_enc": maskmem_pos_enc,
|
974 |
+
"pred_masks": pred_masks,
|
975 |
+
"obj_ptr": obj_ptr,
|
976 |
+
"object_score_logits": object_score_logits,
|
977 |
+
}
|
978 |
+
return compact_current_out, pred_masks_gpu
|
979 |
+
|
980 |
+
def _run_memory_encoder(
|
981 |
+
self,
|
982 |
+
inference_state,
|
983 |
+
frame_idx,
|
984 |
+
batch_size,
|
985 |
+
high_res_masks,
|
986 |
+
object_score_logits,
|
987 |
+
is_mask_from_pts,
|
988 |
+
):
|
989 |
+
"""
|
990 |
+
Run the memory encoder on `high_res_masks`. This is usually after applying
|
991 |
+
non-overlapping constraints to object scores. Since their scores changed, their
|
992 |
+
memory also need to be computed again with the memory encoder.
|
993 |
+
"""
|
994 |
+
# Retrieve correct image features
|
995 |
+
_, _, current_vision_feats, _, feat_sizes = self._get_image_feature(
|
996 |
+
inference_state, frame_idx, batch_size
|
997 |
+
)
|
998 |
+
maskmem_features, maskmem_pos_enc = self._encode_new_memory(
|
999 |
+
current_vision_feats=current_vision_feats,
|
1000 |
+
feat_sizes=feat_sizes,
|
1001 |
+
pred_masks_high_res=high_res_masks,
|
1002 |
+
object_score_logits=object_score_logits,
|
1003 |
+
is_mask_from_pts=is_mask_from_pts,
|
1004 |
+
)
|
1005 |
+
|
1006 |
+
# optionally offload the output to CPU memory to save GPU space
|
1007 |
+
storage_device = inference_state["storage_device"]
|
1008 |
+
maskmem_features = maskmem_features.to(torch.bfloat16)
|
1009 |
+
maskmem_features = maskmem_features.to(storage_device, non_blocking=True)
|
1010 |
+
# "maskmem_pos_enc" is the same across frames, so we only need to store one copy of it
|
1011 |
+
maskmem_pos_enc = self._get_maskmem_pos_enc(
|
1012 |
+
inference_state, {"maskmem_pos_enc": maskmem_pos_enc}
|
1013 |
+
)
|
1014 |
+
return maskmem_features, maskmem_pos_enc
|
1015 |
+
|
1016 |
+
def _get_maskmem_pos_enc(self, inference_state, current_out):
|
1017 |
+
"""
|
1018 |
+
`maskmem_pos_enc` is the same across frames and objects, so we cache it as
|
1019 |
+
a constant in the inference session to reduce session storage size.
|
1020 |
+
"""
|
1021 |
+
model_constants = inference_state["constants"]
|
1022 |
+
# "out_maskmem_pos_enc" should be either a list of tensors or None
|
1023 |
+
out_maskmem_pos_enc = current_out["maskmem_pos_enc"]
|
1024 |
+
if out_maskmem_pos_enc is not None:
|
1025 |
+
if "maskmem_pos_enc" not in model_constants:
|
1026 |
+
assert isinstance(out_maskmem_pos_enc, list)
|
1027 |
+
# only take the slice for one object, since it's same across objects
|
1028 |
+
maskmem_pos_enc = [x[0:1].clone() for x in out_maskmem_pos_enc]
|
1029 |
+
model_constants["maskmem_pos_enc"] = maskmem_pos_enc
|
1030 |
+
else:
|
1031 |
+
maskmem_pos_enc = model_constants["maskmem_pos_enc"]
|
1032 |
+
# expand the cached maskmem_pos_enc to the actual batch size
|
1033 |
+
batch_size = out_maskmem_pos_enc[0].size(0)
|
1034 |
+
expanded_maskmem_pos_enc = [
|
1035 |
+
x.expand(batch_size, -1, -1, -1) for x in maskmem_pos_enc
|
1036 |
+
]
|
1037 |
+
else:
|
1038 |
+
expanded_maskmem_pos_enc = None
|
1039 |
+
return expanded_maskmem_pos_enc
|
1040 |
+
|
1041 |
+
@torch.inference_mode()
|
1042 |
+
def remove_object(self, inference_state, obj_id, strict=False, need_output=True):
|
1043 |
+
"""
|
1044 |
+
Remove an object id from the tracking state. If strict is True, we check whether
|
1045 |
+
the object id actually exists and raise an error if it doesn't exist.
|
1046 |
+
"""
|
1047 |
+
old_obj_idx_to_rm = inference_state["obj_id_to_idx"].get(obj_id, None)
|
1048 |
+
updated_frames = []
|
1049 |
+
# Check whether this object_id to remove actually exists and possibly raise an error.
|
1050 |
+
if old_obj_idx_to_rm is None:
|
1051 |
+
if not strict:
|
1052 |
+
return inference_state["obj_ids"], updated_frames
|
1053 |
+
raise RuntimeError(
|
1054 |
+
f"Cannot remove object id {obj_id} as it doesn't exist. "
|
1055 |
+
f"All existing object ids: {inference_state['obj_ids']}."
|
1056 |
+
)
|
1057 |
+
|
1058 |
+
# If this is the only remaining object id, we simply reset the state.
|
1059 |
+
if len(inference_state["obj_id_to_idx"]) == 1:
|
1060 |
+
self.reset_state(inference_state)
|
1061 |
+
return inference_state["obj_ids"], updated_frames
|
1062 |
+
|
1063 |
+
# There are still remaining objects after removing this object id. In this case,
|
1064 |
+
# we need to delete the object storage from inference state tensors.
|
1065 |
+
# Step 0: clear the input on those frames where this object id has point or mask input
|
1066 |
+
# (note that this step is required as it might downgrade conditioning frames to
|
1067 |
+
# non-conditioning ones)
|
1068 |
+
obj_input_frames_inds = set()
|
1069 |
+
obj_input_frames_inds.update(
|
1070 |
+
inference_state["point_inputs_per_obj"][old_obj_idx_to_rm]
|
1071 |
+
)
|
1072 |
+
obj_input_frames_inds.update(
|
1073 |
+
inference_state["mask_inputs_per_obj"][old_obj_idx_to_rm]
|
1074 |
+
)
|
1075 |
+
for frame_idx in obj_input_frames_inds:
|
1076 |
+
self.clear_all_prompts_in_frame(
|
1077 |
+
inference_state, frame_idx, obj_id, need_output=False
|
1078 |
+
)
|
1079 |
+
|
1080 |
+
# Step 1: Update the object id mapping (note that it must be done after Step 0,
|
1081 |
+
# since Step 0 still requires the old object id mappings in inference_state)
|
1082 |
+
old_obj_ids = inference_state["obj_ids"]
|
1083 |
+
old_obj_inds = list(range(len(old_obj_ids)))
|
1084 |
+
remain_old_obj_inds = old_obj_inds.copy()
|
1085 |
+
remain_old_obj_inds.remove(old_obj_idx_to_rm)
|
1086 |
+
new_obj_ids = [old_obj_ids[old_idx] for old_idx in remain_old_obj_inds]
|
1087 |
+
new_obj_inds = list(range(len(new_obj_ids)))
|
1088 |
+
# build new mappings
|
1089 |
+
old_idx_to_new_idx = dict(zip(remain_old_obj_inds, new_obj_inds))
|
1090 |
+
inference_state["obj_id_to_idx"] = dict(zip(new_obj_ids, new_obj_inds))
|
1091 |
+
inference_state["obj_idx_to_id"] = dict(zip(new_obj_inds, new_obj_ids))
|
1092 |
+
inference_state["obj_ids"] = new_obj_ids
|
1093 |
+
|
1094 |
+
# Step 2: For per-object tensor storage, we shift their obj_idx in the dict keys.
|
1095 |
+
# (note that "consolidated_frame_inds" doesn't need to be updated in this step as
|
1096 |
+
# it's already handled in Step 0)
|
1097 |
+
def _map_keys(container):
|
1098 |
+
new_kvs = []
|
1099 |
+
for k in old_obj_inds:
|
1100 |
+
v = container.pop(k)
|
1101 |
+
if k in old_idx_to_new_idx:
|
1102 |
+
new_kvs.append((old_idx_to_new_idx[k], v))
|
1103 |
+
container.update(new_kvs)
|
1104 |
+
|
1105 |
+
_map_keys(inference_state["point_inputs_per_obj"])
|
1106 |
+
_map_keys(inference_state["mask_inputs_per_obj"])
|
1107 |
+
_map_keys(inference_state["output_dict_per_obj"])
|
1108 |
+
_map_keys(inference_state["temp_output_dict_per_obj"])
|
1109 |
+
|
1110 |
+
# Step 3: For packed tensor storage, we index the remaining ids and rebuild the per-object slices.
|
1111 |
+
def _slice_state(output_dict, storage_key):
|
1112 |
+
for frame_idx, out in output_dict[storage_key].items():
|
1113 |
+
out["maskmem_features"] = out["maskmem_features"][remain_old_obj_inds]
|
1114 |
+
out["maskmem_pos_enc"] = [
|
1115 |
+
x[remain_old_obj_inds] for x in out["maskmem_pos_enc"]
|
1116 |
+
]
|
1117 |
+
# "maskmem_pos_enc" is the same across frames, so we only need to store one copy of it
|
1118 |
+
out["maskmem_pos_enc"] = self._get_maskmem_pos_enc(inference_state, out)
|
1119 |
+
out["pred_masks"] = out["pred_masks"][remain_old_obj_inds]
|
1120 |
+
out["obj_ptr"] = out["obj_ptr"][remain_old_obj_inds]
|
1121 |
+
out["object_score_logits"] = out["object_score_logits"][
|
1122 |
+
remain_old_obj_inds
|
1123 |
+
]
|
1124 |
+
# also update the per-object slices
|
1125 |
+
self._add_output_per_object(
|
1126 |
+
inference_state, frame_idx, out, storage_key
|
1127 |
+
)
|
1128 |
+
|
1129 |
+
_slice_state(inference_state["output_dict"], "cond_frame_outputs")
|
1130 |
+
_slice_state(inference_state["output_dict"], "non_cond_frame_outputs")
|
1131 |
+
|
1132 |
+
# Step 4: Further collect the outputs on those frames in `obj_input_frames_inds`, which
|
1133 |
+
# could show an updated mask for objects previously occluded by the object being removed
|
1134 |
+
if need_output:
|
1135 |
+
temp_output_dict_per_obj = inference_state["temp_output_dict_per_obj"]
|
1136 |
+
for frame_idx in obj_input_frames_inds:
|
1137 |
+
is_cond = any(
|
1138 |
+
frame_idx in obj_temp_output_dict["cond_frame_outputs"]
|
1139 |
+
for obj_temp_output_dict in temp_output_dict_per_obj.values()
|
1140 |
+
)
|
1141 |
+
consolidated_out = self._consolidate_temp_output_across_obj(
|
1142 |
+
inference_state,
|
1143 |
+
frame_idx,
|
1144 |
+
is_cond=is_cond,
|
1145 |
+
run_mem_encoder=False,
|
1146 |
+
consolidate_at_video_res=True,
|
1147 |
+
)
|
1148 |
+
_, video_res_masks = self._get_orig_video_res_output(
|
1149 |
+
inference_state, consolidated_out["pred_masks_video_res"]
|
1150 |
+
)
|
1151 |
+
updated_frames.append((frame_idx, video_res_masks))
|
1152 |
+
|
1153 |
+
return inference_state["obj_ids"], updated_frames
|
1154 |
+
|
1155 |
+
def _clear_non_cond_mem_around_input(self, inference_state, frame_idx):
|
1156 |
+
"""
|
1157 |
+
Remove the non-conditioning memory around the input frame. When users provide
|
1158 |
+
correction clicks, the surrounding frames' non-conditioning memories can still
|
1159 |
+
contain outdated object appearance information and could confuse the model.
|
1160 |
+
|
1161 |
+
This method clears those non-conditioning memories surrounding the interacted
|
1162 |
+
frame to avoid giving the model both old and new information about the object.
|
1163 |
+
"""
|
1164 |
+
r = self.memory_temporal_stride_for_eval
|
1165 |
+
frame_idx_begin = frame_idx - r * self.num_maskmem
|
1166 |
+
frame_idx_end = frame_idx + r * self.num_maskmem
|
1167 |
+
output_dict = inference_state["output_dict"]
|
1168 |
+
non_cond_frame_outputs = output_dict["non_cond_frame_outputs"]
|
1169 |
+
for t in range(frame_idx_begin, frame_idx_end + 1):
|
1170 |
+
non_cond_frame_outputs.pop(t, None)
|
1171 |
+
for obj_output_dict in inference_state["output_dict_per_obj"].values():
|
1172 |
+
obj_output_dict["non_cond_frame_outputs"].pop(t, None)
|
eval/grounded_sam/sam2/utils/__init__.py
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
eval/grounded_sam/sam2/utils/amg.py
ADDED
@@ -0,0 +1,348 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
import math
|
8 |
+
from copy import deepcopy
|
9 |
+
from itertools import product
|
10 |
+
from typing import Any, Dict, Generator, ItemsView, List, Tuple
|
11 |
+
|
12 |
+
import numpy as np
|
13 |
+
import torch
|
14 |
+
|
15 |
+
# Very lightly adapted from https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/utils/amg.py
|
16 |
+
|
17 |
+
|
18 |
+
class MaskData:
|
19 |
+
"""
|
20 |
+
A structure for storing masks and their related data in batched format.
|
21 |
+
Implements basic filtering and concatenation.
|
22 |
+
"""
|
23 |
+
|
24 |
+
def __init__(self, **kwargs) -> None:
|
25 |
+
for v in kwargs.values():
|
26 |
+
assert isinstance(
|
27 |
+
v, (list, np.ndarray, torch.Tensor)
|
28 |
+
), "MaskData only supports list, numpy arrays, and torch tensors."
|
29 |
+
self._stats = dict(**kwargs)
|
30 |
+
|
31 |
+
def __setitem__(self, key: str, item: Any) -> None:
|
32 |
+
assert isinstance(
|
33 |
+
item, (list, np.ndarray, torch.Tensor)
|
34 |
+
), "MaskData only supports list, numpy arrays, and torch tensors."
|
35 |
+
self._stats[key] = item
|
36 |
+
|
37 |
+
def __delitem__(self, key: str) -> None:
|
38 |
+
del self._stats[key]
|
39 |
+
|
40 |
+
def __getitem__(self, key: str) -> Any:
|
41 |
+
return self._stats[key]
|
42 |
+
|
43 |
+
def items(self) -> ItemsView[str, Any]:
|
44 |
+
return self._stats.items()
|
45 |
+
|
46 |
+
def filter(self, keep: torch.Tensor) -> None:
|
47 |
+
for k, v in self._stats.items():
|
48 |
+
if v is None:
|
49 |
+
self._stats[k] = None
|
50 |
+
elif isinstance(v, torch.Tensor):
|
51 |
+
self._stats[k] = v[torch.as_tensor(keep, device=v.device)]
|
52 |
+
elif isinstance(v, np.ndarray):
|
53 |
+
self._stats[k] = v[keep.detach().cpu().numpy()]
|
54 |
+
elif isinstance(v, list) and keep.dtype == torch.bool:
|
55 |
+
self._stats[k] = [a for i, a in enumerate(v) if keep[i]]
|
56 |
+
elif isinstance(v, list):
|
57 |
+
self._stats[k] = [v[i] for i in keep]
|
58 |
+
else:
|
59 |
+
raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.")
|
60 |
+
|
61 |
+
def cat(self, new_stats: "MaskData") -> None:
|
62 |
+
for k, v in new_stats.items():
|
63 |
+
if k not in self._stats or self._stats[k] is None:
|
64 |
+
self._stats[k] = deepcopy(v)
|
65 |
+
elif isinstance(v, torch.Tensor):
|
66 |
+
self._stats[k] = torch.cat([self._stats[k], v], dim=0)
|
67 |
+
elif isinstance(v, np.ndarray):
|
68 |
+
self._stats[k] = np.concatenate([self._stats[k], v], axis=0)
|
69 |
+
elif isinstance(v, list):
|
70 |
+
self._stats[k] = self._stats[k] + deepcopy(v)
|
71 |
+
else:
|
72 |
+
raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.")
|
73 |
+
|
74 |
+
def to_numpy(self) -> None:
|
75 |
+
for k, v in self._stats.items():
|
76 |
+
if isinstance(v, torch.Tensor):
|
77 |
+
self._stats[k] = v.float().detach().cpu().numpy()
|
78 |
+
|
79 |
+
|
80 |
+
def is_box_near_crop_edge(
|
81 |
+
boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0
|
82 |
+
) -> torch.Tensor:
|
83 |
+
"""Filter masks at the edge of a crop, but not at the edge of the original image."""
|
84 |
+
crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)
|
85 |
+
orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)
|
86 |
+
boxes = uncrop_boxes_xyxy(boxes, crop_box).float()
|
87 |
+
near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)
|
88 |
+
near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)
|
89 |
+
near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)
|
90 |
+
return torch.any(near_crop_edge, dim=1)
|
91 |
+
|
92 |
+
|
93 |
+
def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor:
|
94 |
+
box_xywh = deepcopy(box_xyxy)
|
95 |
+
box_xywh[2] = box_xywh[2] - box_xywh[0]
|
96 |
+
box_xywh[3] = box_xywh[3] - box_xywh[1]
|
97 |
+
return box_xywh
|
98 |
+
|
99 |
+
|
100 |
+
def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:
|
101 |
+
assert len(args) > 0 and all(
|
102 |
+
len(a) == len(args[0]) for a in args
|
103 |
+
), "Batched iteration must have inputs of all the same size."
|
104 |
+
n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)
|
105 |
+
for b in range(n_batches):
|
106 |
+
yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args]
|
107 |
+
|
108 |
+
|
109 |
+
def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:
|
110 |
+
"""
|
111 |
+
Encodes masks to an uncompressed RLE, in the format expected by
|
112 |
+
pycoco tools.
|
113 |
+
"""
|
114 |
+
# Put in fortran order and flatten h,w
|
115 |
+
b, h, w = tensor.shape
|
116 |
+
tensor = tensor.permute(0, 2, 1).flatten(1)
|
117 |
+
|
118 |
+
# Compute change indices
|
119 |
+
diff = tensor[:, 1:] ^ tensor[:, :-1]
|
120 |
+
change_indices = diff.nonzero()
|
121 |
+
|
122 |
+
# Encode run length
|
123 |
+
out = []
|
124 |
+
for i in range(b):
|
125 |
+
cur_idxs = change_indices[change_indices[:, 0] == i, 1]
|
126 |
+
cur_idxs = torch.cat(
|
127 |
+
[
|
128 |
+
torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device),
|
129 |
+
cur_idxs + 1,
|
130 |
+
torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device),
|
131 |
+
]
|
132 |
+
)
|
133 |
+
btw_idxs = cur_idxs[1:] - cur_idxs[:-1]
|
134 |
+
counts = [] if tensor[i, 0] == 0 else [0]
|
135 |
+
counts.extend(btw_idxs.detach().cpu().tolist())
|
136 |
+
out.append({"size": [h, w], "counts": counts})
|
137 |
+
return out
|
138 |
+
|
139 |
+
|
140 |
+
def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:
|
141 |
+
"""Compute a binary mask from an uncompressed RLE."""
|
142 |
+
h, w = rle["size"]
|
143 |
+
mask = np.empty(h * w, dtype=bool)
|
144 |
+
idx = 0
|
145 |
+
parity = False
|
146 |
+
for count in rle["counts"]:
|
147 |
+
mask[idx : idx + count] = parity
|
148 |
+
idx += count
|
149 |
+
parity ^= True
|
150 |
+
mask = mask.reshape(w, h)
|
151 |
+
return mask.transpose() # Put in C order
|
152 |
+
|
153 |
+
|
154 |
+
def area_from_rle(rle: Dict[str, Any]) -> int:
|
155 |
+
return sum(rle["counts"][1::2])
|
156 |
+
|
157 |
+
|
158 |
+
def calculate_stability_score(
|
159 |
+
masks: torch.Tensor, mask_threshold: float, threshold_offset: float
|
160 |
+
) -> torch.Tensor:
|
161 |
+
"""
|
162 |
+
Computes the stability score for a batch of masks. The stability
|
163 |
+
score is the IoU between the binary masks obtained by thresholding
|
164 |
+
the predicted mask logits at high and low values.
|
165 |
+
"""
|
166 |
+
# One mask is always contained inside the other.
|
167 |
+
# Save memory by preventing unnecessary cast to torch.int64
|
168 |
+
intersections = (
|
169 |
+
(masks > (mask_threshold + threshold_offset))
|
170 |
+
.sum(-1, dtype=torch.int16)
|
171 |
+
.sum(-1, dtype=torch.int32)
|
172 |
+
)
|
173 |
+
unions = (
|
174 |
+
(masks > (mask_threshold - threshold_offset))
|
175 |
+
.sum(-1, dtype=torch.int16)
|
176 |
+
.sum(-1, dtype=torch.int32)
|
177 |
+
)
|
178 |
+
return intersections / unions
|
179 |
+
|
180 |
+
|
181 |
+
def build_point_grid(n_per_side: int) -> np.ndarray:
|
182 |
+
"""Generates a 2D grid of points evenly spaced in [0,1]x[0,1]."""
|
183 |
+
offset = 1 / (2 * n_per_side)
|
184 |
+
points_one_side = np.linspace(offset, 1 - offset, n_per_side)
|
185 |
+
points_x = np.tile(points_one_side[None, :], (n_per_side, 1))
|
186 |
+
points_y = np.tile(points_one_side[:, None], (1, n_per_side))
|
187 |
+
points = np.stack([points_x, points_y], axis=-1).reshape(-1, 2)
|
188 |
+
return points
|
189 |
+
|
190 |
+
|
191 |
+
def build_all_layer_point_grids(
|
192 |
+
n_per_side: int, n_layers: int, scale_per_layer: int
|
193 |
+
) -> List[np.ndarray]:
|
194 |
+
"""Generates point grids for all crop layers."""
|
195 |
+
points_by_layer = []
|
196 |
+
for i in range(n_layers + 1):
|
197 |
+
n_points = int(n_per_side / (scale_per_layer**i))
|
198 |
+
points_by_layer.append(build_point_grid(n_points))
|
199 |
+
return points_by_layer
|
200 |
+
|
201 |
+
|
202 |
+
def generate_crop_boxes(
|
203 |
+
im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float
|
204 |
+
) -> Tuple[List[List[int]], List[int]]:
|
205 |
+
"""
|
206 |
+
Generates a list of crop boxes of different sizes. Each layer
|
207 |
+
has (2**i)**2 boxes for the ith layer.
|
208 |
+
"""
|
209 |
+
crop_boxes, layer_idxs = [], []
|
210 |
+
im_h, im_w = im_size
|
211 |
+
short_side = min(im_h, im_w)
|
212 |
+
|
213 |
+
# Original image
|
214 |
+
crop_boxes.append([0, 0, im_w, im_h])
|
215 |
+
layer_idxs.append(0)
|
216 |
+
|
217 |
+
def crop_len(orig_len, n_crops, overlap):
|
218 |
+
return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops))
|
219 |
+
|
220 |
+
for i_layer in range(n_layers):
|
221 |
+
n_crops_per_side = 2 ** (i_layer + 1)
|
222 |
+
overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))
|
223 |
+
|
224 |
+
crop_w = crop_len(im_w, n_crops_per_side, overlap)
|
225 |
+
crop_h = crop_len(im_h, n_crops_per_side, overlap)
|
226 |
+
|
227 |
+
crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)]
|
228 |
+
crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)]
|
229 |
+
|
230 |
+
# Crops in XYWH format
|
231 |
+
for x0, y0 in product(crop_box_x0, crop_box_y0):
|
232 |
+
box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)]
|
233 |
+
crop_boxes.append(box)
|
234 |
+
layer_idxs.append(i_layer + 1)
|
235 |
+
|
236 |
+
return crop_boxes, layer_idxs
|
237 |
+
|
238 |
+
|
239 |
+
def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor:
|
240 |
+
x0, y0, _, _ = crop_box
|
241 |
+
offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)
|
242 |
+
# Check if boxes has a channel dimension
|
243 |
+
if len(boxes.shape) == 3:
|
244 |
+
offset = offset.unsqueeze(1)
|
245 |
+
return boxes + offset
|
246 |
+
|
247 |
+
|
248 |
+
def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor:
|
249 |
+
x0, y0, _, _ = crop_box
|
250 |
+
offset = torch.tensor([[x0, y0]], device=points.device)
|
251 |
+
# Check if points has a channel dimension
|
252 |
+
if len(points.shape) == 3:
|
253 |
+
offset = offset.unsqueeze(1)
|
254 |
+
return points + offset
|
255 |
+
|
256 |
+
|
257 |
+
def uncrop_masks(
|
258 |
+
masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int
|
259 |
+
) -> torch.Tensor:
|
260 |
+
x0, y0, x1, y1 = crop_box
|
261 |
+
if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:
|
262 |
+
return masks
|
263 |
+
# Coordinate transform masks
|
264 |
+
pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0)
|
265 |
+
pad = (x0, pad_x - x0, y0, pad_y - y0)
|
266 |
+
return torch.nn.functional.pad(masks, pad, value=0)
|
267 |
+
|
268 |
+
|
269 |
+
def remove_small_regions(
|
270 |
+
mask: np.ndarray, area_thresh: float, mode: str
|
271 |
+
) -> Tuple[np.ndarray, bool]:
|
272 |
+
"""
|
273 |
+
Removes small disconnected regions and holes in a mask. Returns the
|
274 |
+
mask and an indicator of if the mask has been modified.
|
275 |
+
"""
|
276 |
+
import cv2 # type: ignore
|
277 |
+
|
278 |
+
assert mode in ["holes", "islands"]
|
279 |
+
correct_holes = mode == "holes"
|
280 |
+
working_mask = (correct_holes ^ mask).astype(np.uint8)
|
281 |
+
n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8)
|
282 |
+
sizes = stats[:, -1][1:] # Row 0 is background label
|
283 |
+
small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh]
|
284 |
+
if len(small_regions) == 0:
|
285 |
+
return mask, False
|
286 |
+
fill_labels = [0] + small_regions
|
287 |
+
if not correct_holes:
|
288 |
+
fill_labels = [i for i in range(n_labels) if i not in fill_labels]
|
289 |
+
# If every region is below threshold, keep largest
|
290 |
+
if len(fill_labels) == 0:
|
291 |
+
fill_labels = [int(np.argmax(sizes)) + 1]
|
292 |
+
mask = np.isin(regions, fill_labels)
|
293 |
+
return mask, True
|
294 |
+
|
295 |
+
|
296 |
+
def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]:
|
297 |
+
from pycocotools import mask as mask_utils # type: ignore
|
298 |
+
|
299 |
+
h, w = uncompressed_rle["size"]
|
300 |
+
rle = mask_utils.frPyObjects(uncompressed_rle, h, w)
|
301 |
+
rle["counts"] = rle["counts"].decode("utf-8") # Necessary to serialize with json
|
302 |
+
return rle
|
303 |
+
|
304 |
+
|
305 |
+
def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:
|
306 |
+
"""
|
307 |
+
Calculates boxes in XYXY format around masks. Return [0,0,0,0] for
|
308 |
+
an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4.
|
309 |
+
"""
|
310 |
+
# torch.max below raises an error on empty inputs, just skip in this case
|
311 |
+
if torch.numel(masks) == 0:
|
312 |
+
return torch.zeros(*masks.shape[:-2], 4, device=masks.device)
|
313 |
+
|
314 |
+
# Normalize shape to CxHxW
|
315 |
+
shape = masks.shape
|
316 |
+
h, w = shape[-2:]
|
317 |
+
if len(shape) > 2:
|
318 |
+
masks = masks.flatten(0, -3)
|
319 |
+
else:
|
320 |
+
masks = masks.unsqueeze(0)
|
321 |
+
|
322 |
+
# Get top and bottom edges
|
323 |
+
in_height, _ = torch.max(masks, dim=-1)
|
324 |
+
in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :]
|
325 |
+
bottom_edges, _ = torch.max(in_height_coords, dim=-1)
|
326 |
+
in_height_coords = in_height_coords + h * (~in_height)
|
327 |
+
top_edges, _ = torch.min(in_height_coords, dim=-1)
|
328 |
+
|
329 |
+
# Get left and right edges
|
330 |
+
in_width, _ = torch.max(masks, dim=-2)
|
331 |
+
in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :]
|
332 |
+
right_edges, _ = torch.max(in_width_coords, dim=-1)
|
333 |
+
in_width_coords = in_width_coords + w * (~in_width)
|
334 |
+
left_edges, _ = torch.min(in_width_coords, dim=-1)
|
335 |
+
|
336 |
+
# If the mask is empty the right edge will be to the left of the left edge.
|
337 |
+
# Replace these boxes with [0, 0, 0, 0]
|
338 |
+
empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)
|
339 |
+
out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)
|
340 |
+
out = out * (~empty_filter).unsqueeze(-1)
|
341 |
+
|
342 |
+
# Return to original shape
|
343 |
+
if len(shape) > 2:
|
344 |
+
out = out.reshape(*shape[:-2], 4)
|
345 |
+
else:
|
346 |
+
out = out[0]
|
347 |
+
|
348 |
+
return out
|
eval/grounded_sam/sam2/utils/misc.py
ADDED
@@ -0,0 +1,349 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
import os
|
8 |
+
import warnings
|
9 |
+
from threading import Thread
|
10 |
+
|
11 |
+
import numpy as np
|
12 |
+
import torch
|
13 |
+
from PIL import Image
|
14 |
+
from tqdm import tqdm
|
15 |
+
|
16 |
+
|
17 |
+
def get_sdpa_settings():
|
18 |
+
if torch.cuda.is_available():
|
19 |
+
old_gpu = torch.cuda.get_device_properties(0).major < 7
|
20 |
+
# only use Flash Attention on Ampere (8.0) or newer GPUs
|
21 |
+
use_flash_attn = torch.cuda.get_device_properties(0).major >= 8
|
22 |
+
if not use_flash_attn:
|
23 |
+
warnings.warn(
|
24 |
+
"Flash Attention is disabled as it requires a GPU with Ampere (8.0) CUDA capability.",
|
25 |
+
category=UserWarning,
|
26 |
+
stacklevel=2,
|
27 |
+
)
|
28 |
+
# keep math kernel for PyTorch versions before 2.2 (Flash Attention v2 is only
|
29 |
+
# available on PyTorch 2.2+, while Flash Attention v1 cannot handle all cases)
|
30 |
+
pytorch_version = tuple(int(v) for v in torch.__version__.split(".")[:2])
|
31 |
+
if pytorch_version < (2, 2):
|
32 |
+
warnings.warn(
|
33 |
+
f"You are using PyTorch {torch.__version__} without Flash Attention v2 support. "
|
34 |
+
"Consider upgrading to PyTorch 2.2+ for Flash Attention v2 (which could be faster).",
|
35 |
+
category=UserWarning,
|
36 |
+
stacklevel=2,
|
37 |
+
)
|
38 |
+
math_kernel_on = pytorch_version < (2, 2) or not use_flash_attn
|
39 |
+
else:
|
40 |
+
old_gpu = True
|
41 |
+
use_flash_attn = False
|
42 |
+
math_kernel_on = True
|
43 |
+
|
44 |
+
return old_gpu, use_flash_attn, math_kernel_on
|
45 |
+
|
46 |
+
|
47 |
+
def get_connected_components(mask):
|
48 |
+
"""
|
49 |
+
Get the connected components (8-connectivity) of binary masks of shape (N, 1, H, W).
|
50 |
+
|
51 |
+
Inputs:
|
52 |
+
- mask: A binary mask tensor of shape (N, 1, H, W), where 1 is foreground and 0 is
|
53 |
+
background.
|
54 |
+
|
55 |
+
Outputs:
|
56 |
+
- labels: A tensor of shape (N, 1, H, W) containing the connected component labels
|
57 |
+
for foreground pixels and 0 for background pixels.
|
58 |
+
- counts: A tensor of shape (N, 1, H, W) containing the area of the connected
|
59 |
+
components for foreground pixels and 0 for background pixels.
|
60 |
+
"""
|
61 |
+
from sam2 import _C
|
62 |
+
|
63 |
+
return _C.get_connected_componnets(mask.to(torch.uint8).contiguous())
|
64 |
+
|
65 |
+
|
66 |
+
def mask_to_box(masks: torch.Tensor):
|
67 |
+
"""
|
68 |
+
compute bounding box given an input mask
|
69 |
+
|
70 |
+
Inputs:
|
71 |
+
- masks: [B, 1, H, W] masks, dtype=torch.Tensor
|
72 |
+
|
73 |
+
Returns:
|
74 |
+
- box_coords: [B, 1, 4], contains (x, y) coordinates of top left and bottom right box corners, dtype=torch.Tensor
|
75 |
+
"""
|
76 |
+
B, _, h, w = masks.shape
|
77 |
+
device = masks.device
|
78 |
+
xs = torch.arange(w, device=device, dtype=torch.int32)
|
79 |
+
ys = torch.arange(h, device=device, dtype=torch.int32)
|
80 |
+
grid_xs, grid_ys = torch.meshgrid(xs, ys, indexing="xy")
|
81 |
+
grid_xs = grid_xs[None, None, ...].expand(B, 1, h, w)
|
82 |
+
grid_ys = grid_ys[None, None, ...].expand(B, 1, h, w)
|
83 |
+
min_xs, _ = torch.min(torch.where(masks, grid_xs, w).flatten(-2), dim=-1)
|
84 |
+
max_xs, _ = torch.max(torch.where(masks, grid_xs, -1).flatten(-2), dim=-1)
|
85 |
+
min_ys, _ = torch.min(torch.where(masks, grid_ys, h).flatten(-2), dim=-1)
|
86 |
+
max_ys, _ = torch.max(torch.where(masks, grid_ys, -1).flatten(-2), dim=-1)
|
87 |
+
bbox_coords = torch.stack((min_xs, min_ys, max_xs, max_ys), dim=-1)
|
88 |
+
|
89 |
+
return bbox_coords
|
90 |
+
|
91 |
+
|
92 |
+
def _load_img_as_tensor(img_path, image_size):
|
93 |
+
img_pil = Image.open(img_path)
|
94 |
+
img_np = np.array(img_pil.convert("RGB").resize((image_size, image_size)))
|
95 |
+
if img_np.dtype == np.uint8: # np.uint8 is expected for JPEG images
|
96 |
+
img_np = img_np / 255.0
|
97 |
+
else:
|
98 |
+
raise RuntimeError(f"Unknown image dtype: {img_np.dtype} on {img_path}")
|
99 |
+
img = torch.from_numpy(img_np).permute(2, 0, 1)
|
100 |
+
video_width, video_height = img_pil.size # the original video size
|
101 |
+
return img, video_height, video_width
|
102 |
+
|
103 |
+
|
104 |
+
class AsyncVideoFrameLoader:
|
105 |
+
"""
|
106 |
+
A list of video frames to be load asynchronously without blocking session start.
|
107 |
+
"""
|
108 |
+
|
109 |
+
def __init__(
|
110 |
+
self,
|
111 |
+
img_paths,
|
112 |
+
image_size,
|
113 |
+
offload_video_to_cpu,
|
114 |
+
img_mean,
|
115 |
+
img_std,
|
116 |
+
compute_device,
|
117 |
+
):
|
118 |
+
self.img_paths = img_paths
|
119 |
+
self.image_size = image_size
|
120 |
+
self.offload_video_to_cpu = offload_video_to_cpu
|
121 |
+
self.img_mean = img_mean
|
122 |
+
self.img_std = img_std
|
123 |
+
# items in `self.images` will be loaded asynchronously
|
124 |
+
self.images = [None] * len(img_paths)
|
125 |
+
# catch and raise any exceptions in the async loading thread
|
126 |
+
self.exception = None
|
127 |
+
# video_height and video_width be filled when loading the first image
|
128 |
+
self.video_height = None
|
129 |
+
self.video_width = None
|
130 |
+
self.compute_device = compute_device
|
131 |
+
|
132 |
+
# load the first frame to fill video_height and video_width and also
|
133 |
+
# to cache it (since it's most likely where the user will click)
|
134 |
+
self.__getitem__(0)
|
135 |
+
|
136 |
+
# load the rest of frames asynchronously without blocking the session start
|
137 |
+
def _load_frames():
|
138 |
+
try:
|
139 |
+
for n in tqdm(range(len(self.images)), desc="frame loading (JPEG)", disable=True):
|
140 |
+
self.__getitem__(n)
|
141 |
+
except Exception as e:
|
142 |
+
self.exception = e
|
143 |
+
|
144 |
+
self.thread = Thread(target=_load_frames, daemon=True)
|
145 |
+
self.thread.start()
|
146 |
+
|
147 |
+
def __getitem__(self, index):
|
148 |
+
if self.exception is not None:
|
149 |
+
raise RuntimeError("Failure in frame loading thread") from self.exception
|
150 |
+
|
151 |
+
img = self.images[index]
|
152 |
+
if img is not None:
|
153 |
+
return img
|
154 |
+
|
155 |
+
img, video_height, video_width = _load_img_as_tensor(
|
156 |
+
self.img_paths[index], self.image_size
|
157 |
+
)
|
158 |
+
self.video_height = video_height
|
159 |
+
self.video_width = video_width
|
160 |
+
# normalize by mean and std
|
161 |
+
img -= self.img_mean
|
162 |
+
img /= self.img_std
|
163 |
+
if not self.offload_video_to_cpu:
|
164 |
+
img = img.to(self.compute_device, non_blocking=True)
|
165 |
+
self.images[index] = img
|
166 |
+
return img
|
167 |
+
|
168 |
+
def __len__(self):
|
169 |
+
return len(self.images)
|
170 |
+
|
171 |
+
|
172 |
+
def load_video_frames(
|
173 |
+
video_path,
|
174 |
+
image_size,
|
175 |
+
offload_video_to_cpu,
|
176 |
+
img_mean=(0.485, 0.456, 0.406),
|
177 |
+
img_std=(0.229, 0.224, 0.225),
|
178 |
+
async_loading_frames=False,
|
179 |
+
compute_device=torch.device("cuda"),
|
180 |
+
):
|
181 |
+
"""
|
182 |
+
Load the video frames from video_path. The frames are resized to image_size as in
|
183 |
+
the model and are loaded to GPU if offload_video_to_cpu=False. This is used by the demo.
|
184 |
+
"""
|
185 |
+
is_bytes = isinstance(video_path, bytes)
|
186 |
+
is_str = isinstance(video_path, str)
|
187 |
+
is_mp4_path = is_str and os.path.splitext(video_path)[-1] in [".mp4", ".MP4"]
|
188 |
+
if is_bytes or is_mp4_path:
|
189 |
+
return load_video_frames_from_video_file(
|
190 |
+
video_path=video_path,
|
191 |
+
image_size=image_size,
|
192 |
+
offload_video_to_cpu=offload_video_to_cpu,
|
193 |
+
img_mean=img_mean,
|
194 |
+
img_std=img_std,
|
195 |
+
compute_device=compute_device,
|
196 |
+
)
|
197 |
+
elif is_str and os.path.isdir(video_path):
|
198 |
+
return load_video_frames_from_jpg_images(
|
199 |
+
video_path=video_path,
|
200 |
+
image_size=image_size,
|
201 |
+
offload_video_to_cpu=offload_video_to_cpu,
|
202 |
+
img_mean=img_mean,
|
203 |
+
img_std=img_std,
|
204 |
+
async_loading_frames=async_loading_frames,
|
205 |
+
compute_device=compute_device,
|
206 |
+
)
|
207 |
+
else:
|
208 |
+
raise NotImplementedError(
|
209 |
+
"Only MP4 video and JPEG folder are supported at this moment"
|
210 |
+
)
|
211 |
+
|
212 |
+
|
213 |
+
def load_video_frames_from_jpg_images(
|
214 |
+
video_path,
|
215 |
+
image_size,
|
216 |
+
offload_video_to_cpu,
|
217 |
+
img_mean=(0.485, 0.456, 0.406),
|
218 |
+
img_std=(0.229, 0.224, 0.225),
|
219 |
+
async_loading_frames=False,
|
220 |
+
compute_device=torch.device("cuda"),
|
221 |
+
):
|
222 |
+
"""
|
223 |
+
Load the video frames from a directory of JPEG files ("<frame_index>.jpg" format).
|
224 |
+
|
225 |
+
The frames are resized to image_size x image_size and are loaded to GPU if
|
226 |
+
`offload_video_to_cpu` is `False` and to CPU if `offload_video_to_cpu` is `True`.
|
227 |
+
|
228 |
+
You can load a frame asynchronously by setting `async_loading_frames` to `True`.
|
229 |
+
"""
|
230 |
+
if isinstance(video_path, str) and os.path.isdir(video_path):
|
231 |
+
jpg_folder = video_path
|
232 |
+
else:
|
233 |
+
raise NotImplementedError(
|
234 |
+
"Only JPEG frames are supported at this moment. For video files, you may use "
|
235 |
+
"ffmpeg (https://ffmpeg.org/) to extract frames into a folder of JPEG files, such as \n"
|
236 |
+
"```\n"
|
237 |
+
"ffmpeg -i <your_video>.mp4 -q:v 2 -start_number 0 <output_dir>/'%05d.jpg'\n"
|
238 |
+
"```\n"
|
239 |
+
"where `-q:v` generates high-quality JPEG frames and `-start_number 0` asks "
|
240 |
+
"ffmpeg to start the JPEG file from 00000.jpg."
|
241 |
+
)
|
242 |
+
|
243 |
+
frame_names = [
|
244 |
+
p
|
245 |
+
for p in os.listdir(jpg_folder)
|
246 |
+
if os.path.splitext(p)[-1] in [".jpg", ".jpeg", ".JPG", ".JPEG"]
|
247 |
+
]
|
248 |
+
frame_names.sort(key=lambda p: int(os.path.splitext(p)[0]))
|
249 |
+
num_frames = len(frame_names)
|
250 |
+
if num_frames == 0:
|
251 |
+
raise RuntimeError(f"no images found in {jpg_folder}")
|
252 |
+
img_paths = [os.path.join(jpg_folder, frame_name) for frame_name in frame_names]
|
253 |
+
img_mean = torch.tensor(img_mean, dtype=torch.float32)[:, None, None]
|
254 |
+
img_std = torch.tensor(img_std, dtype=torch.float32)[:, None, None]
|
255 |
+
|
256 |
+
if async_loading_frames:
|
257 |
+
lazy_images = AsyncVideoFrameLoader(
|
258 |
+
img_paths,
|
259 |
+
image_size,
|
260 |
+
offload_video_to_cpu,
|
261 |
+
img_mean,
|
262 |
+
img_std,
|
263 |
+
compute_device,
|
264 |
+
)
|
265 |
+
return lazy_images, lazy_images.video_height, lazy_images.video_width
|
266 |
+
|
267 |
+
images = torch.zeros(num_frames, 3, image_size, image_size, dtype=torch.float32)
|
268 |
+
for n, img_path in enumerate(tqdm(img_paths, desc="frame loading (JPEG)", disable=True)):
|
269 |
+
images[n], video_height, video_width = _load_img_as_tensor(img_path, image_size)
|
270 |
+
if not offload_video_to_cpu:
|
271 |
+
images = images.to(compute_device)
|
272 |
+
img_mean = img_mean.to(compute_device)
|
273 |
+
img_std = img_std.to(compute_device)
|
274 |
+
# normalize by mean and std
|
275 |
+
images -= img_mean
|
276 |
+
images /= img_std
|
277 |
+
return images, video_height, video_width
|
278 |
+
|
279 |
+
|
280 |
+
def load_video_frames_from_video_file(
|
281 |
+
video_path,
|
282 |
+
image_size,
|
283 |
+
offload_video_to_cpu,
|
284 |
+
img_mean=(0.485, 0.456, 0.406),
|
285 |
+
img_std=(0.229, 0.224, 0.225),
|
286 |
+
compute_device=torch.device("cuda"),
|
287 |
+
):
|
288 |
+
"""Load the video frames from a video file."""
|
289 |
+
import decord
|
290 |
+
|
291 |
+
img_mean = torch.tensor(img_mean, dtype=torch.float32)[:, None, None]
|
292 |
+
img_std = torch.tensor(img_std, dtype=torch.float32)[:, None, None]
|
293 |
+
# Get the original video height and width
|
294 |
+
decord.bridge.set_bridge("torch")
|
295 |
+
video_height, video_width, _ = decord.VideoReader(video_path).next().shape
|
296 |
+
# Iterate over all frames in the video
|
297 |
+
images = []
|
298 |
+
for frame in decord.VideoReader(video_path, width=image_size, height=image_size):
|
299 |
+
images.append(frame.permute(2, 0, 1))
|
300 |
+
|
301 |
+
images = torch.stack(images, dim=0).float() / 255.0
|
302 |
+
if not offload_video_to_cpu:
|
303 |
+
images = images.to(compute_device)
|
304 |
+
img_mean = img_mean.to(compute_device)
|
305 |
+
img_std = img_std.to(compute_device)
|
306 |
+
# normalize by mean and std
|
307 |
+
images -= img_mean
|
308 |
+
images /= img_std
|
309 |
+
return images, video_height, video_width
|
310 |
+
|
311 |
+
|
312 |
+
def fill_holes_in_mask_scores(mask, max_area):
|
313 |
+
"""
|
314 |
+
A post processor to fill small holes in mask scores with area under `max_area`.
|
315 |
+
"""
|
316 |
+
# Holes are those connected components in background with area <= self.max_area
|
317 |
+
# (background regions are those with mask scores <= 0)
|
318 |
+
assert max_area > 0, "max_area must be positive"
|
319 |
+
|
320 |
+
input_mask = mask
|
321 |
+
try:
|
322 |
+
labels, areas = get_connected_components(mask <= 0)
|
323 |
+
is_hole = (labels > 0) & (areas <= max_area)
|
324 |
+
# We fill holes with a small positive mask score (0.1) to change them to foreground.
|
325 |
+
mask = torch.where(is_hole, 0.1, mask)
|
326 |
+
except Exception as e:
|
327 |
+
# Skip the post-processing step on removing small holes if the CUDA kernel fails
|
328 |
+
warnings.warn(
|
329 |
+
f"{e}\n\nSkipping the post-processing step due to the error above. You can "
|
330 |
+
"still use SAM 2 and it's OK to ignore the error above, although some post-processing "
|
331 |
+
"functionality may be limited (which doesn't affect the results in most cases; see "
|
332 |
+
"https://github.com/facebookresearch/sam2/blob/main/INSTALL.md).",
|
333 |
+
category=UserWarning,
|
334 |
+
stacklevel=2,
|
335 |
+
)
|
336 |
+
mask = input_mask
|
337 |
+
|
338 |
+
return mask
|
339 |
+
|
340 |
+
|
341 |
+
def concat_points(old_point_inputs, new_points, new_labels):
|
342 |
+
"""Add new points and labels to previous point inputs (add at the end)."""
|
343 |
+
if old_point_inputs is None:
|
344 |
+
points, labels = new_points, new_labels
|
345 |
+
else:
|
346 |
+
points = torch.cat([old_point_inputs["point_coords"], new_points], dim=1)
|
347 |
+
labels = torch.cat([old_point_inputs["point_labels"], new_labels], dim=1)
|
348 |
+
|
349 |
+
return {"point_coords": points, "point_labels": labels}
|
eval/grounded_sam/sam2/utils/transforms.py
ADDED
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
import warnings
|
8 |
+
|
9 |
+
import torch
|
10 |
+
import torch.nn as nn
|
11 |
+
import torch.nn.functional as F
|
12 |
+
from torchvision.transforms import Normalize, Resize, ToTensor
|
13 |
+
|
14 |
+
|
15 |
+
class SAM2Transforms(nn.Module):
|
16 |
+
def __init__(
|
17 |
+
self, resolution, mask_threshold, max_hole_area=0.0, max_sprinkle_area=0.0
|
18 |
+
):
|
19 |
+
"""
|
20 |
+
Transforms for SAM2.
|
21 |
+
"""
|
22 |
+
super().__init__()
|
23 |
+
self.resolution = resolution
|
24 |
+
self.mask_threshold = mask_threshold
|
25 |
+
self.max_hole_area = max_hole_area
|
26 |
+
self.max_sprinkle_area = max_sprinkle_area
|
27 |
+
self.mean = [0.485, 0.456, 0.406]
|
28 |
+
self.std = [0.229, 0.224, 0.225]
|
29 |
+
self.to_tensor = ToTensor()
|
30 |
+
self.transforms = torch.jit.script(
|
31 |
+
nn.Sequential(
|
32 |
+
Resize((self.resolution, self.resolution)),
|
33 |
+
Normalize(self.mean, self.std),
|
34 |
+
)
|
35 |
+
)
|
36 |
+
|
37 |
+
def __call__(self, x):
|
38 |
+
x = self.to_tensor(x)
|
39 |
+
return self.transforms(x)
|
40 |
+
|
41 |
+
def forward_batch(self, img_list):
|
42 |
+
img_batch = [self.transforms(self.to_tensor(img)) for img in img_list]
|
43 |
+
img_batch = torch.stack(img_batch, dim=0)
|
44 |
+
return img_batch
|
45 |
+
|
46 |
+
def transform_coords(
|
47 |
+
self, coords: torch.Tensor, normalize=False, orig_hw=None
|
48 |
+
) -> torch.Tensor:
|
49 |
+
"""
|
50 |
+
Expects a torch tensor with length 2 in the last dimension. The coordinates can be in absolute image or normalized coordinates,
|
51 |
+
If the coords are in absolute image coordinates, normalize should be set to True and original image size is required.
|
52 |
+
|
53 |
+
Returns
|
54 |
+
Un-normalized coordinates in the range of [0, 1] which is expected by the SAM2 model.
|
55 |
+
"""
|
56 |
+
if normalize:
|
57 |
+
assert orig_hw is not None
|
58 |
+
h, w = orig_hw
|
59 |
+
coords = coords.clone()
|
60 |
+
coords[..., 0] = coords[..., 0] / w
|
61 |
+
coords[..., 1] = coords[..., 1] / h
|
62 |
+
|
63 |
+
coords = coords * self.resolution # unnormalize coords
|
64 |
+
return coords
|
65 |
+
|
66 |
+
def transform_boxes(
|
67 |
+
self, boxes: torch.Tensor, normalize=False, orig_hw=None
|
68 |
+
) -> torch.Tensor:
|
69 |
+
"""
|
70 |
+
Expects a tensor of shape Bx4. The coordinates can be in absolute image or normalized coordinates,
|
71 |
+
if the coords are in absolute image coordinates, normalize should be set to True and original image size is required.
|
72 |
+
"""
|
73 |
+
boxes = self.transform_coords(boxes.reshape(-1, 2, 2), normalize, orig_hw)
|
74 |
+
return boxes
|
75 |
+
|
76 |
+
def postprocess_masks(self, masks: torch.Tensor, orig_hw) -> torch.Tensor:
|
77 |
+
"""
|
78 |
+
Perform PostProcessing on output masks.
|
79 |
+
"""
|
80 |
+
from sam2.utils.misc import get_connected_components
|
81 |
+
|
82 |
+
masks = masks.float()
|
83 |
+
input_masks = masks
|
84 |
+
mask_flat = masks.flatten(0, 1).unsqueeze(1) # flatten as 1-channel image
|
85 |
+
try:
|
86 |
+
if self.max_hole_area > 0:
|
87 |
+
# Holes are those connected components in background with area <= self.fill_hole_area
|
88 |
+
# (background regions are those with mask scores <= self.mask_threshold)
|
89 |
+
labels, areas = get_connected_components(
|
90 |
+
mask_flat <= self.mask_threshold
|
91 |
+
)
|
92 |
+
is_hole = (labels > 0) & (areas <= self.max_hole_area)
|
93 |
+
is_hole = is_hole.reshape_as(masks)
|
94 |
+
# We fill holes with a small positive mask score (10.0) to change them to foreground.
|
95 |
+
masks = torch.where(is_hole, self.mask_threshold + 10.0, masks)
|
96 |
+
|
97 |
+
if self.max_sprinkle_area > 0:
|
98 |
+
labels, areas = get_connected_components(
|
99 |
+
mask_flat > self.mask_threshold
|
100 |
+
)
|
101 |
+
is_hole = (labels > 0) & (areas <= self.max_sprinkle_area)
|
102 |
+
is_hole = is_hole.reshape_as(masks)
|
103 |
+
# We fill holes with negative mask score (-10.0) to change them to background.
|
104 |
+
masks = torch.where(is_hole, self.mask_threshold - 10.0, masks)
|
105 |
+
except Exception as e:
|
106 |
+
# Skip the post-processing step if the CUDA kernel fails
|
107 |
+
warnings.warn(
|
108 |
+
f"{e}\n\nSkipping the post-processing step due to the error above. You can "
|
109 |
+
"still use SAM 2 and it's OK to ignore the error above, although some post-processing "
|
110 |
+
"functionality may be limited (which doesn't affect the results in most cases; see "
|
111 |
+
"https://github.com/facebookresearch/sam2/blob/main/INSTALL.md).",
|
112 |
+
category=UserWarning,
|
113 |
+
stacklevel=2,
|
114 |
+
)
|
115 |
+
masks = input_masks
|
116 |
+
orig_dtype = masks.dtype
|
117 |
+
masks = F.interpolate(masks.float(), orig_hw, mode="bilinear", align_corners=False)
|
118 |
+
masks = masks.to(orig_dtype)
|
119 |
+
return masks
|