Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -17,12 +17,12 @@ import gradio as gr
|
|
17 |
import open_clip
|
18 |
from sam2.build_sam import build_sam2
|
19 |
from mask_adapter.modeling.meta_arch.mask_adapter_head import build_mask_adapter
|
|
|
20 |
|
21 |
|
22 |
|
23 |
|
24 |
def setup_cfg(config_file):
|
25 |
-
# load config from file and command-line arguments
|
26 |
cfg = get_cfg()
|
27 |
add_deeplab_config(cfg)
|
28 |
add_maskformer2_config(cfg)
|
@@ -32,7 +32,9 @@ def setup_cfg(config_file):
|
|
32 |
cfg.freeze()
|
33 |
return cfg
|
34 |
|
35 |
-
|
|
|
|
|
36 |
def inference_automatic(input_img, class_names):
|
37 |
mp.set_start_method("spawn", force=True)
|
38 |
config_file = './configs/ground-truth-warmup/mask-adapter/mask_adapter_convnext_large_cocopan_eval_ade20k.yaml'
|
@@ -46,10 +48,11 @@ def inference_automatic(input_img, class_names):
|
|
46 |
|
47 |
return Image.fromarray(np.uint8(visualized_output.get_image())).convert('RGB')
|
48 |
|
49 |
-
|
|
|
|
|
50 |
def inference_point(input_img, evt: gr.SelectData,):
|
51 |
-
|
52 |
-
# You can adjust your segmentation logic based on clicked points.
|
53 |
x, y = evt.index[0], evt.index[1]
|
54 |
points = [[x, y]]
|
55 |
print(f"Selected point: {points}")
|
@@ -78,37 +81,32 @@ sam2_model = None
|
|
78 |
clip_model = None
|
79 |
mask_adapter = None
|
80 |
|
81 |
-
# 加载和初始化函数
|
82 |
def initialize_models(sam_path, adapter_pth, model_cfg, cfg):
|
83 |
cfg = setup_cfg(cfg)
|
84 |
global sam2_model, clip_model, mask_adapter
|
85 |
|
86 |
-
# SAM2初始化
|
87 |
if sam2_model is None:
|
88 |
sam2_model = build_sam2(model_cfg, sam_path, device="cuda", apply_postprocessing=False)
|
89 |
print("SAM2 model initialized.")
|
90 |
|
91 |
-
# CLIP模型初始化
|
92 |
if clip_model is None:
|
93 |
clip_model, _, _ = open_clip.create_model_and_transforms("convnext_large_d_320", pretrained="laion2b_s29b_b131k_ft_soup")
|
|
|
94 |
clip_model = clip_model.to("cuda")
|
95 |
print("CLIP model initialized.")
|
96 |
|
97 |
-
# Mask Adapter模型初始化
|
98 |
if mask_adapter is None:
|
99 |
-
mask_adapter = build_mask_adapter(cfg, "MASKAdapterHead").cuda
|
100 |
-
|
101 |
adapter_state_dict = torch.load(adapter_pth)
|
102 |
mask_adapter.load_state_dict(adapter_state_dict)
|
103 |
print("Mask Adapter model initialized.")
|
104 |
|
105 |
-
# 初始化配置和模型
|
106 |
model_cfg = "configs/sam2.1/sam2.1_hiera_l.yaml"
|
107 |
sam_path = './sam2.1_hiera_large.pt'
|
108 |
adapter_pth = './model_0279999_with_sem_new.pth'
|
109 |
cfg = './configs/ground-truth-warmup/mask-adapter/mask_adapter_convnext_large_cocopan_eval_ade20k.yaml'
|
110 |
|
111 |
-
# 调用初始化函数
|
112 |
initialize_models(sam_path, adapter_pth, model_cfg, cfg)
|
113 |
|
114 |
# Examples for testing
|
|
|
17 |
import open_clip
|
18 |
from sam2.build_sam import build_sam2
|
19 |
from mask_adapter.modeling.meta_arch.mask_adapter_head import build_mask_adapter
|
20 |
+
import spaces
|
21 |
|
22 |
|
23 |
|
24 |
|
25 |
def setup_cfg(config_file):
|
|
|
26 |
cfg = get_cfg()
|
27 |
add_deeplab_config(cfg)
|
28 |
add_maskformer2_config(cfg)
|
|
|
32 |
cfg.freeze()
|
33 |
return cfg
|
34 |
|
35 |
+
@spaces.GPU
|
36 |
+
@torch.no_grad()
|
37 |
+
@torch.autocast(device_type="cuda", dtype=torch.float16)
|
38 |
def inference_automatic(input_img, class_names):
|
39 |
mp.set_start_method("spawn", force=True)
|
40 |
config_file = './configs/ground-truth-warmup/mask-adapter/mask_adapter_convnext_large_cocopan_eval_ade20k.yaml'
|
|
|
48 |
|
49 |
return Image.fromarray(np.uint8(visualized_output.get_image())).convert('RGB')
|
50 |
|
51 |
+
@spaces.GPU
|
52 |
+
@torch.no_grad()
|
53 |
+
@torch.autocast(device_type="cuda", dtype=torch.float16)
|
54 |
def inference_point(input_img, evt: gr.SelectData,):
|
55 |
+
|
|
|
56 |
x, y = evt.index[0], evt.index[1]
|
57 |
points = [[x, y]]
|
58 |
print(f"Selected point: {points}")
|
|
|
81 |
clip_model = None
|
82 |
mask_adapter = None
|
83 |
|
|
|
84 |
def initialize_models(sam_path, adapter_pth, model_cfg, cfg):
|
85 |
cfg = setup_cfg(cfg)
|
86 |
global sam2_model, clip_model, mask_adapter
|
87 |
|
|
|
88 |
if sam2_model is None:
|
89 |
sam2_model = build_sam2(model_cfg, sam_path, device="cuda", apply_postprocessing=False)
|
90 |
print("SAM2 model initialized.")
|
91 |
|
|
|
92 |
if clip_model is None:
|
93 |
clip_model, _, _ = open_clip.create_model_and_transforms("convnext_large_d_320", pretrained="laion2b_s29b_b131k_ft_soup")
|
94 |
+
clip_model = clip_model.eval()
|
95 |
clip_model = clip_model.to("cuda")
|
96 |
print("CLIP model initialized.")
|
97 |
|
|
|
98 |
if mask_adapter is None:
|
99 |
+
mask_adapter = build_mask_adapter(cfg, "MASKAdapterHead").to("cuda")
|
100 |
+
mask_adapter = mask_adapter.eval()
|
101 |
adapter_state_dict = torch.load(adapter_pth)
|
102 |
mask_adapter.load_state_dict(adapter_state_dict)
|
103 |
print("Mask Adapter model initialized.")
|
104 |
|
|
|
105 |
model_cfg = "configs/sam2.1/sam2.1_hiera_l.yaml"
|
106 |
sam_path = './sam2.1_hiera_large.pt'
|
107 |
adapter_pth = './model_0279999_with_sem_new.pth'
|
108 |
cfg = './configs/ground-truth-warmup/mask-adapter/mask_adapter_convnext_large_cocopan_eval_ade20k.yaml'
|
109 |
|
|
|
110 |
initialize_models(sam_path, adapter_pth, model_cfg, cfg)
|
111 |
|
112 |
# Examples for testing
|