Spaces:
Sleeping
Sleeping
update
Browse files- models/vsa_model.py +4 -3
models/vsa_model.py
CHANGED
@@ -126,7 +126,7 @@ class VLM:
|
|
126 |
load_8bit: bool = False,
|
127 |
load_4bit: bool = True,
|
128 |
temperature: float = 0.2,
|
129 |
-
max_new_tokens: int =
|
130 |
):
|
131 |
disable_torch_init()
|
132 |
model_name = get_model_name_from_path(model_path)
|
@@ -307,7 +307,7 @@ class VisionSearchAssistant:
|
|
307 |
def __init__(
|
308 |
self,
|
309 |
search_model: str = "internlm/internlm2_5-1_8b-chat",
|
310 |
-
ground_model: str = "IDEA-Research/grounding-dino-
|
311 |
ground_device: str = "cuda:1",
|
312 |
vlm_model: str = "liuhaotian/llava-v1.6-vicuna-7b",
|
313 |
vlm_device: str = "cuda:2",
|
@@ -324,7 +324,8 @@ class VisionSearchAssistant:
|
|
324 |
self.use_correlate = True
|
325 |
|
326 |
self.searcher = WebSearcher(
|
327 |
-
model_path = self.search_model
|
|
|
328 |
)
|
329 |
self.grounder = VisualGrounder(
|
330 |
model_path = self.ground_model,
|
|
|
126 |
load_8bit: bool = False,
|
127 |
load_4bit: bool = True,
|
128 |
temperature: float = 0.2,
|
129 |
+
max_new_tokens: int = 2000,
|
130 |
):
|
131 |
disable_torch_init()
|
132 |
model_name = get_model_name_from_path(model_path)
|
|
|
307 |
def __init__(
|
308 |
self,
|
309 |
search_model: str = "internlm/internlm2_5-1_8b-chat",
|
310 |
+
ground_model: str = "IDEA-Research/grounding-dino-tiny",
|
311 |
ground_device: str = "cuda:1",
|
312 |
vlm_model: str = "liuhaotian/llava-v1.6-vicuna-7b",
|
313 |
vlm_device: str = "cuda:2",
|
|
|
324 |
self.use_correlate = True
|
325 |
|
326 |
self.searcher = WebSearcher(
|
327 |
+
model_path = self.search_model,
|
328 |
+
lang = 'en'
|
329 |
)
|
330 |
self.grounder = VisualGrounder(
|
331 |
model_path = self.ground_model,
|