Spaces:
Sleeping
Sleeping
Added feature to switch between cpu and gpu based on availability
Browse files- internvl_utils.py +3 -1
- models/InternVL3/intervl3.py +1 -1
internvl_utils.py
CHANGED
@@ -82,7 +82,9 @@ def load_image(image, input_size=448, max_num=12):
|
|
82 |
images = dynamic_preprocess(pil_image, image_size=input_size, use_thumbnail=True, max_num=max_num)
|
83 |
pixel_values = [transform(image) for image in images]
|
84 |
pixel_values = torch.stack(pixel_values)
|
85 |
-
pixel_values = pixel_values.to(torch.bfloat16)
|
|
|
|
|
86 |
return pixel_values
|
87 |
|
88 |
def split_model(model_name):
|
|
|
82 |
images = dynamic_preprocess(pil_image, image_size=input_size, use_thumbnail=True, max_num=max_num)
|
83 |
pixel_values = [transform(image) for image in images]
|
84 |
pixel_values = torch.stack(pixel_values)
|
85 |
+
pixel_values = pixel_values.to(torch.bfloat16)
|
86 |
+
if torch.cuda.is_available():
|
87 |
+
pixel_values = pixel_values.to("cuda")
|
88 |
return pixel_values
|
89 |
|
90 |
def split_model(model_name):
|
models/InternVL3/intervl3.py
CHANGED
@@ -25,7 +25,7 @@ class InternVL3(BaseModel):
|
|
25 |
low_cpu_mem_usage=True,
|
26 |
use_flash_attn=True,
|
27 |
trust_remote_code=True,
|
28 |
-
device_map="
|
29 |
).eval()
|
30 |
self.tokenizer = AutoTokenizer.from_pretrained(
|
31 |
self.model_name,
|
|
|
25 |
low_cpu_mem_usage=True,
|
26 |
use_flash_attn=True,
|
27 |
trust_remote_code=True,
|
28 |
+
device_map="cuda" if torch.cuda.is_available() else "cpu",
|
29 |
).eval()
|
30 |
self.tokenizer = AutoTokenizer.from_pretrained(
|
31 |
self.model_name,
|