Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -16,6 +16,7 @@ import cv2
|
|
16 |
from transformers import (
|
17 |
Qwen2VLForConditionalGeneration,
|
18 |
Qwen2_5_VLForConditionalGeneration,
|
|
|
19 |
AutoModelForImageTextToText,
|
20 |
AutoProcessor,
|
21 |
TextIteratorStreamer,
|
@@ -57,9 +58,9 @@ model_t = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
|
57 |
).to(device).eval()
|
58 |
|
59 |
# Load olmOCR-7B-0225-preview
|
60 |
-
MODEL_ID_O = "
|
61 |
processor_o = AutoProcessor.from_pretrained(MODEL_ID_O, trust_remote_code=True)
|
62 |
-
model_o =
|
63 |
MODEL_ID_O,
|
64 |
trust_remote_code=True,
|
65 |
torch_dtype=torch.float16
|
|
|
16 |
from transformers import (
|
17 |
Qwen2VLForConditionalGeneration,
|
18 |
Qwen2_5_VLForConditionalGeneration,
|
19 |
+
Gemma3nForConditionalGeneration,
|
20 |
AutoModelForImageTextToText,
|
21 |
AutoProcessor,
|
22 |
TextIteratorStreamer,
|
|
|
58 |
).to(device).eval()
|
59 |
|
60 |
# Load olmOCR-7B-0225-preview
|
61 |
+
MODEL_ID_O = "google/gemma-3n-E4B-it"
|
62 |
processor_o = AutoProcessor.from_pretrained(MODEL_ID_O, trust_remote_code=True)
|
63 |
+
model_o = Gemma3nForConditionalGeneration.from_pretrained(
|
64 |
MODEL_ID_O,
|
65 |
trust_remote_code=True,
|
66 |
torch_dtype=torch.float16
|