Doc-VLMs-OCR / app.py
prithivMLmods's picture
Update app.py
a68aebf verified
raw
history blame
12.3 kB
import os
import random
import uuid
import json
import time
import asyncio
from threading import Thread
import gradio as gr
import spaces
import torch
import numpy as np
from PIL import Image
import cv2
from transformers import (
Qwen2VLForConditionalGeneration,
Qwen2_5_VLForConditionalGeneration,
Gemma3ForConditionalGeneration,
AutoModelForImageTextToText,
AutoProcessor,
TextIteratorStreamer,
)
from transformers.image_utils import load_image
# Optionally enable synchronous CUDA errors for debugging:
os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
# Constants for text generation
MAX_MAX_NEW_TOKENS = 2048
DEFAULT_MAX_NEW_TOKENS = 1024
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# -------------------------------------------------------------------
# Load models and processors
# -------------------------------------------------------------------
# VIREX (Video Information Retrieval & Extraction)
MODEL_ID_VIREX = "prithivMLmods/VIREX-062225-exp"
processor_virex = AutoProcessor.from_pretrained(MODEL_ID_VIREX, trust_remote_code=True)
model_virex = Qwen2_5_VLForConditionalGeneration.from_pretrained(
MODEL_ID_VIREX,
trust_remote_code=True,
torch_dtype=torch.float16
).to(device).eval()
# DREX (Document Retrieval & Extraction Expert)
MODEL_ID_DREX = "prithivMLmods/DREX-062225-exp"
processor_drex = AutoProcessor.from_pretrained(MODEL_ID_DREX, trust_remote_code=True)
model_drex = Qwen2_5_VLForConditionalGeneration.from_pretrained(
MODEL_ID_DREX,
trust_remote_code=True,
torch_dtype=torch.float16
).to(device).eval()
# Typhoon-OCR-3B (Thai/English OCR parser)
MODEL_ID_TYPHOON = "sarvamai/sarvam-translate"
processor_typhoon = AutoProcessor.from_pretrained(MODEL_ID_TYPHOON, trust_remote_code=True)
model_typhoon = Gemma3ForConditionalGeneration.from_pretrained(
MODEL_ID_TYPHOON,
trust_remote_code=True,
torch_dtype=torch.float16
).to(device).eval()
# olmOCR-7B-0225-preview (document OCR + LaTeX)
MODEL_ID_OLM = "allenai/olmOCR-7B-0225-preview"
processor_olm = AutoProcessor.from_pretrained(MODEL_ID_OLM, trust_remote_code=True)
model_olm = Qwen2VLForConditionalGeneration.from_pretrained(
MODEL_ID_OLM,
trust_remote_code=True,
torch_dtype=torch.float16
).to(device).eval()
# -------------------------------------------------------------------
# Video downsampling helper
# -------------------------------------------------------------------
def downsample_video(video_path):
"""
Downsamples the video to 10 evenly spaced frames.
Returns a list of (PIL.Image, timestamp) tuples.
"""
vidcap = cv2.VideoCapture(video_path)
total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
fps = vidcap.get(cv2.CAP_PROP_FPS) or 30.0
frames = []
frame_indices = np.linspace(0, total_frames - 1, 10, dtype=int)
for idx in frame_indices:
vidcap.set(cv2.CAP_PROP_POS_FRAMES, idx)
success, img = vidcap.read()
if not success:
continue
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
frames.append((Image.fromarray(img), round(idx / fps, 2)))
vidcap.release()
return frames
# -------------------------------------------------------------------
# Generation loops
# -------------------------------------------------------------------
def _make_generation_kwargs(processor, inputs, streamer, max_new_tokens, do_sample=False, temperature=1.0, top_p=1.0, top_k=0, repetition_penalty=1.0):
# ensure pad/eos tokens are defined
tok = processor.tokenizer
return {
**inputs,
"streamer": streamer,
"max_new_tokens": max_new_tokens,
"do_sample": do_sample,
"temperature": temperature,
"top_p": top_p,
"top_k": top_k,
"repetition_penalty": repetition_penalty,
"pad_token_id": tok.eos_token_id,
"eos_token_id": tok.eos_token_id,
}
@spaces.GPU
def generate_image(model_name: str, text: str, image: Image.Image,
max_new_tokens: int = DEFAULT_MAX_NEW_TOKENS,
temperature: float = 0.6,
top_p: float = 0.9,
top_k: int = 50,
repetition_penalty: float = 1.2):
# select
if model_name.startswith("VIREX"):
processor, model = processor_virex, model_virex
elif model_name.startswith("DREX"):
processor, model = processor_drex, model_drex
elif model_name.startswith("olmOCR"):
processor, model = processor_olm, model_olm
elif model_name.startswith("Typhoon"):
processor, model = processor_typhoon, model_typhoon
else:
yield "Invalid model selected.", "Invalid model selected."
return
if image is None:
yield "Please upload an image.", ""
return
# build the chat-style prompt
messages = [{
"role": "user",
"content": [
{"type": "image", "image": image},
{"type": "text", "text": text},
]
}]
prompt = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = processor(
text=[prompt],
images=[image],
return_tensors="pt",
padding=True,
truncation=False,
max_length=MAX_INPUT_TOKEN_LENGTH
).to(device)
streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)
gen_kwargs = _make_generation_kwargs(
processor, inputs, streamer, max_new_tokens,
do_sample=True,
temperature=temperature,
top_p=top_p,
top_k=top_k,
repetition_penalty=repetition_penalty
)
# launch
Thread(target=model.generate, kwargs=gen_kwargs).start()
buffer = ""
for chunk in streamer:
buffer += chunk
yield buffer, buffer
@spaces.GPU
def generate_video(model_name: str, text: str, video_path: str,
max_new_tokens: int = DEFAULT_MAX_NEW_TOKENS,
temperature: float = 0.6,
top_p: float = 0.9,
top_k: int = 50,
repetition_penalty: float = 1.2):
# select model
if model_name.startswith("VIREX"):
processor, model = processor_virex, model_virex
elif model_name.startswith("DREX"):
processor, model = processor_drex, model_drex
elif model_name.startswith("olmOCR"):
processor, model = processor_olm, model_olm
elif model_name.startswith("Typhoon"):
processor, model = processor_typhoon, model_typhoon
else:
yield "Invalid model selected.", "Invalid model selected."
return
if video_path is None:
yield "Please upload a video.", ""
return
# downsample frames
frames = downsample_video(video_path)
# system + user
messages = [
{"role": "system", "content": [{"type":"text", "text":"You are a helpful assistant."}]},
{"role": "user", "content": [{"type":"text", "text": text}]}
]
for img, ts in frames:
messages[1]["content"].append({"type":"text", "text":f"Frame {ts}s:"})
messages[1]["content"].append({"type":"image", "image":img})
inputs = processor.apply_chat_template(
messages,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
return_tensors="pt",
truncation=False,
max_length=MAX_INPUT_TOKEN_LENGTH
).to(device)
streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)
gen_kwargs = _make_generation_kwargs(
processor, inputs, streamer, max_new_tokens,
do_sample=True,
temperature=temperature,
top_p=top_p,
top_k=top_k,
repetition_penalty=repetition_penalty
)
Thread(target=model.generate, kwargs=gen_kwargs).start()
buffer = ""
for chunk in streamer:
buffer += chunk.replace("<|im_end|>", "")
yield buffer, buffer
# -------------------------------------------------------------------
# Examples, CSS, and launch
# -------------------------------------------------------------------
image_examples = [
["Convert this page to doc [text] precisely.", "images/3.png"],
["Convert this page to doc [text] precisely.", "images/4.png"],
["Convert this page to doc [text] precisely.", "images/1.png"],
["Convert chart to OTSL.", "images/2.png"]
]
video_examples = [
["Explain the video in detail.", "videos/2.mp4"],
["Explain the ad in detail.", "videos/1.mp4"]
]
css = """
.submit-btn {
background-color: #2980b9 !important;
color: white !important;
}
.submit-btn:hover {
background-color: #3498db !important;
}
.canvas-output {
border: 2px solid #4682B4;
border-radius: 10px;
padding: 20px;
}
"""
with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
gr.Markdown("# **[Doc VLMs OCR](https://huggingface.co/collections/prithivMLmods/multimodal-implementations-67c9982ea04b39f0608badb0)**")
with gr.Row():
with gr.Column():
with gr.Tabs():
with gr.TabItem("Image Inference"):
image_query = gr.Textbox(label="Query Input", placeholder="Enter your query here...")
image_upload = gr.Image(type="pil", label="Image")
image_submit = gr.Button("Submit", elem_classes="submit-btn")
gr.Examples(examples=image_examples, inputs=[image_query, image_upload])
with gr.TabItem("Video Inference"):
video_query = gr.Textbox(label="Query Input", placeholder="Enter your query here...")
video_upload = gr.Video(label="Video")
video_submit = gr.Button("Submit", elem_classes="submit-btn")
gr.Examples(examples=video_examples, inputs=[video_query, video_upload])
with gr.Accordion("Advanced options", open=False):
max_new_tokens = gr.Slider(label="Max new tokens", minimum=1, maximum=MAX_MAX_NEW_TOKENS, step=1, value=DEFAULT_MAX_NEW_TOKENS)
temperature = gr.Slider(label="Temperature", minimum=0.1, maximum=4.0, step=0.1, value=0.6)
top_p = gr.Slider(label="Top-p (nucleus sampling)", minimum=0.05, maximum=1.0, step=0.05, value=0.9)
top_k = gr.Slider(label="Top-k", minimum=1, maximum=1000, step=1, value=50)
repetition_penalty = gr.Slider(label="Repetition penalty", minimum=1.0, maximum=2.0, step=0.05, value=1.2)
with gr.Column(elem_classes="canvas-output"):
gr.Markdown("## Result Canvas")
output = gr.Textbox(label="Raw Output Stream", interactive=False, lines=2)
markdown_output = gr.Markdown(label="Formatted Result (Result.Md)")
model_choice = gr.Radio(
choices=["DREX-062225-7B-exp", "olmOCR-7B-0225-preview", "VIREX-062225-7B-exp", "Typhoon-OCR-3B"],
label="Select Model",
value="DREX-062225-7B-exp"
)
gr.Markdown("**Model Info 💻** | [Report Bug](https://huggingface.co/spaces/prithivMLmods/Doc-VLMs/discussions)")
gr.Markdown("> [DREX-062225-7B-exp](https://huggingface.co/prithivMLmods/DREX-062225-exp): ...")
gr.Markdown("> [VIREX-062225-7B-exp](https://huggingface.co/prithivMLmods/VIREX-062225-exp): ...")
gr.Markdown("> [Typhoon-OCR-3B](https://huggingface.co/scb10x/typhoon-ocr-3b): ...")
gr.Markdown("> [olmOCR-7B-0225](https://huggingface.co/allenai/olmOCR-7B-0225-preview): ...")
gr.Markdown("> ⚠️ note: video inference may be less reliable.")
image_submit.click(
fn=generate_image,
inputs=[model_choice, image_query, image_upload, max_new_tokens, temperature, top_p, top_k, repetition_penalty],
outputs=[output, markdown_output]
)
video_submit.click(
fn=generate_video,
inputs=[model_choice, video_query, video_upload, max_new_tokens, temperature, top_p, top_k, repetition_penalty],
outputs=[output, markdown_output]
)
if __name__ == "__main__":
demo.queue(max_size=30).launch(share=True, mcp_server=True, ssr_mode=False, show_error=True)