Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -47,6 +47,15 @@ model_x = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
|
47 |
torch_dtype=torch.float16
|
48 |
).to(device).eval()
|
49 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
# Load olmOCR-7B-0225-preview
|
51 |
MODEL_ID_O = "allenai/olmOCR-7B-0225-preview"
|
52 |
processor_o = AutoProcessor.from_pretrained(MODEL_ID_O, trust_remote_code=True)
|
@@ -96,6 +105,9 @@ def generate_image(model_name: str, text: str, image: Image.Image,
|
|
96 |
elif model_name == "olmOCR-7B-0225":
|
97 |
processor = processor_o
|
98 |
model = model_o
|
|
|
|
|
|
|
99 |
else:
|
100 |
yield "Invalid model selected.", "Invalid model selected."
|
101 |
return
|
@@ -149,6 +161,9 @@ def generate_video(model_name: str, text: str, video_path: str,
|
|
149 |
elif model_name == "olmOCR-7B-0225":
|
150 |
processor = processor_o
|
151 |
model = model_o
|
|
|
|
|
|
|
152 |
else:
|
153 |
yield "Invalid model selected.", "Invalid model selected."
|
154 |
return
|
@@ -260,16 +275,18 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
|
|
260 |
markdown_output = gr.Markdown(label="Formatted Result (Result.Md)")
|
261 |
|
262 |
model_choice = gr.Radio(
|
263 |
-
choices=["DREX-062225-exp", "VIREX-062225-exp", "olmOCR-7B-0225"],
|
264 |
label="Select Model",
|
265 |
value="DREX-062225-exp"
|
266 |
)
|
267 |
|
268 |
-
gr.Markdown("**Model Info 💻** | [Report Bug](https://huggingface.co/spaces/prithivMLmods/Doc-VLMs/discussions)")
|
269 |
gr.Markdown("> [DREX-062225-exp](https://huggingface.co/prithivMLmods/DREX-062225-exp): the drex-062225-exp (document retrieval and extraction expert) model is a specialized fine-tuned version of docscopeocr-7b-050425-exp, optimized for document retrieval, content extraction, and analysis recognition. built on top of the qwen2.5-vl architecture.")
|
270 |
gr.Markdown("> [VIREX-062225-exp](https://huggingface.co/prithivMLmods/VIREX-062225-exp): the virex-062225-exp (video information retrieval and extraction expert - experimental) model is a fine-tuned version of qwen2.5-vl-7b-instruct, specifically optimized for advanced video understanding, image comprehension, sense of reasoning, and natural language decision-making through cot reasoning.")
|
|
|
271 |
gr.Markdown("> [olmOCR-7B-0225](https://huggingface.co/allenai/olmOCR-7B-0225-preview): the olmocr-7b-0225-preview model is based on qwen2-vl-7b, optimized for document-level optical character recognition (ocr), long-context vision-language understanding, and accurate image-to-text conversion with mathematical latex formatting. designed with a focus on high-fidelity visual-textual comprehension.")
|
272 |
-
|
|
|
273 |
image_submit.click(
|
274 |
fn=generate_image,
|
275 |
inputs=[model_choice, image_query, image_upload, max_new_tokens, temperature, top_p, top_k, repetition_penalty],
|
|
|
47 |
torch_dtype=torch.float16
|
48 |
).to(device).eval()
|
49 |
|
50 |
+
# Load typhoon-ocr-3b
|
51 |
+
MODEL_ID_T = "scb10x/typhoon-ocr-3b"
|
52 |
+
processor_t = AutoProcessor.from_pretrained(MODEL_ID_T, trust_remote_code=True)
|
53 |
+
model_t = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
54 |
+
MODEL_ID_T,
|
55 |
+
trust_remote_code=True,
|
56 |
+
torch_dtype=torch.float16
|
57 |
+
).to(device).eval()
|
58 |
+
|
59 |
# Load olmOCR-7B-0225-preview
|
60 |
MODEL_ID_O = "allenai/olmOCR-7B-0225-preview"
|
61 |
processor_o = AutoProcessor.from_pretrained(MODEL_ID_O, trust_remote_code=True)
|
|
|
105 |
elif model_name == "olmOCR-7B-0225":
|
106 |
processor = processor_o
|
107 |
model = model_o
|
108 |
+
elif model_name == "typhoon-ocr-3b":
|
109 |
+
processor = processor_t
|
110 |
+
model = model_t
|
111 |
else:
|
112 |
yield "Invalid model selected.", "Invalid model selected."
|
113 |
return
|
|
|
161 |
elif model_name == "olmOCR-7B-0225":
|
162 |
processor = processor_o
|
163 |
model = model_o
|
164 |
+
elif model_name == "typhoon-ocr-3b":
|
165 |
+
processor = processor_t
|
166 |
+
model = model_t
|
167 |
else:
|
168 |
yield "Invalid model selected.", "Invalid model selected."
|
169 |
return
|
|
|
275 |
markdown_output = gr.Markdown(label="Formatted Result (Result.Md)")
|
276 |
|
277 |
model_choice = gr.Radio(
|
278 |
+
choices=["DREX-062225-exp", "VIREX-062225-exp", "typhoon-ocr-3b", "olmOCR-7B-0225"],
|
279 |
label="Select Model",
|
280 |
value="DREX-062225-exp"
|
281 |
)
|
282 |
|
283 |
+
gr.Markdown("**Model Info 💻** | [Report Bug](https://huggingface.co/spaces/prithivMLmods/Doc-VLMs/discussions)")
|
284 |
gr.Markdown("> [DREX-062225-exp](https://huggingface.co/prithivMLmods/DREX-062225-exp): the drex-062225-exp (document retrieval and extraction expert) model is a specialized fine-tuned version of docscopeocr-7b-050425-exp, optimized for document retrieval, content extraction, and analysis recognition. built on top of the qwen2.5-vl architecture.")
|
285 |
gr.Markdown("> [VIREX-062225-exp](https://huggingface.co/prithivMLmods/VIREX-062225-exp): the virex-062225-exp (video information retrieval and extraction expert - experimental) model is a fine-tuned version of qwen2.5-vl-7b-instruct, specifically optimized for advanced video understanding, image comprehension, sense of reasoning, and natural language decision-making through cot reasoning.")
|
286 |
+
gr.Markdown("> [typhoon-ocr-3b](https://huggingface.co/scb10x/typhoon-ocr-3b): a bilingual document parsing model built specifically for real-world documents in thai and english, inspired by models like olmocr, based on qwen2.5-vl-instruction. this model is intended to be used with a specific prompt only.")
|
287 |
gr.Markdown("> [olmOCR-7B-0225](https://huggingface.co/allenai/olmOCR-7B-0225-preview): the olmocr-7b-0225-preview model is based on qwen2-vl-7b, optimized for document-level optical character recognition (ocr), long-context vision-language understanding, and accurate image-to-text conversion with mathematical latex formatting. designed with a focus on high-fidelity visual-textual comprehension.")
|
288 |
+
gr.Markdown(">[!note] all the models in space are not guaranteed to perform well in video inference use cases.")
|
289 |
+
|
290 |
image_submit.click(
|
291 |
fn=generate_image,
|
292 |
inputs=[model_choice, image_query, image_upload, max_new_tokens, temperature, top_p, top_k, repetition_penalty],
|