prithivMLmods commited on
Commit
0356fa0
·
verified ·
1 Parent(s): ec56362

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -82,8 +82,8 @@ model_a = AutoModelForImageTextToText.from_pretrained(
82
  ).to(device).eval()
83
 
84
  #-----------------------------subfolder-----------------------------#
85
- # Load MonkeyOCR-1.2B-0709
86
- MODEL_ID_W = "echo840/MonkeyOCR-1.2B-0709"
87
  SUBFOLDER = "Recognition"
88
  processor_w = AutoProcessor.from_pretrained(MODEL_ID_W, trust_remote_code=True, subfolder=SUBFOLDER)
89
  model_w = Qwen2_5_VLForConditionalGeneration.from_pretrained(
@@ -137,7 +137,7 @@ def generate_image(model_name: str, text: str, image: Image.Image,
137
  elif model_name == "Aya-Vision-8B":
138
  processor = processor_a
139
  model = model_a
140
- elif model_name == "MonkeyOCR-1.2B-0709":
141
  processor = processor_w
142
  model = model_w
143
  else:
@@ -198,7 +198,7 @@ def generate_video(model_name: str, text: str, video_path: str,
198
  elif model_name == "Aya-Vision-8B":
199
  processor = processor_a
200
  model = model_a
201
- elif model_name == "MonkeyOCR-1.2B-0709":
202
  processor = processor_w
203
  model = model_w
204
  else:
@@ -311,13 +311,13 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
311
  with gr.Accordion("Formatted Result (Result.md)", open=False):
312
  markdown_output = gr.Markdown(label="Formatted Result (Result.Md)")
313
  model_choice = gr.Radio(
314
- choices=["Nanonets-OCR-s", "MonkeyOCR-1.2B-0709", "Qwen2-VL-OCR", "RolmOCR-7B", "Aya-Vision-8B"],
315
  label="Select Model",
316
  value="Nanonets-OCR-s"
317
  )
318
  gr.Markdown("**Model Info 💻** | [Report Bug](https://huggingface.co/spaces/prithivMLmods/Multimodal-OCR/discussions)")
319
  gr.Markdown("> [Nanonets-OCR-s](https://huggingface.co/nanonets/Nanonets-OCR-s): nanonets-ocr-s is a powerful, state-of-the-art image-to-markdown ocr model that goes far beyond traditional text extraction. it transforms documents into structured markdown with intelligent content recognition and semantic tagging.")
320
- gr.Markdown("> [MonkeyOCR-1.2B-0709](https://huggingface.co/echo840/MonkeyOCR-1.2B-0709): MonkeyOCR adopts a structure-recognition-relation (SRR) triplet paradigm, which simplifies the multi-tool pipeline of modular approaches while avoiding the inefficiency of using large multimodal models for full-page document processing.")
321
  gr.Markdown("> [Qwen2-VL-OCR-2B-Instruct](https://huggingface.co/prithivMLmods/Qwen2-VL-OCR-2B-Instruct): qwen2-vl-ocr-2b-instruct model is a fine-tuned version of qwen2-vl-2b-instruct, tailored for tasks that involve [messy] optical character recognition (ocr), image-to-text conversion, and math problem solving with latex formatting.")
322
  gr.Markdown("> [RolmOCR](https://huggingface.co/reducto/RolmOCR): rolmocr, high-quality, openly available approach to parsing pdfs and other complex documents optical character recognition. it is designed to handle a wide range of document types, including scanned documents, handwritten text, and complex layouts.")
323
  gr.Markdown("> [Aya-Vision](https://huggingface.co/CohereLabs/aya-vision-8b): cohere labs aya vision 8b is an open weights research release of an 8-billion parameter model with advanced capabilities optimized for a variety of vision-language use cases, including ocr, captioning, visual reasoning, summarization, question answering, code, and more.")
 
82
  ).to(device).eval()
83
 
84
  #-----------------------------subfolder-----------------------------#
85
+ # Load MonkeyOCR-3B-0709
86
+ MODEL_ID_W = "echo840/MonkeyOCR-3B-0709"
87
  SUBFOLDER = "Recognition"
88
  processor_w = AutoProcessor.from_pretrained(MODEL_ID_W, trust_remote_code=True, subfolder=SUBFOLDER)
89
  model_w = Qwen2_5_VLForConditionalGeneration.from_pretrained(
 
137
  elif model_name == "Aya-Vision-8B":
138
  processor = processor_a
139
  model = model_a
140
+ elif model_name == "MonkeyOCR-3B-0709":
141
  processor = processor_w
142
  model = model_w
143
  else:
 
198
  elif model_name == "Aya-Vision-8B":
199
  processor = processor_a
200
  model = model_a
201
+ elif model_name == "MonkeyOCR-3B-0709":
202
  processor = processor_w
203
  model = model_w
204
  else:
 
311
  with gr.Accordion("Formatted Result (Result.md)", open=False):
312
  markdown_output = gr.Markdown(label="Formatted Result (Result.Md)")
313
  model_choice = gr.Radio(
314
+ choices=["Nanonets-OCR-s", "MonkeyOCR-3B-0709", "Qwen2-VL-OCR", "RolmOCR-7B", "Aya-Vision-8B"],
315
  label="Select Model",
316
  value="Nanonets-OCR-s"
317
  )
318
  gr.Markdown("**Model Info 💻** | [Report Bug](https://huggingface.co/spaces/prithivMLmods/Multimodal-OCR/discussions)")
319
  gr.Markdown("> [Nanonets-OCR-s](https://huggingface.co/nanonets/Nanonets-OCR-s): nanonets-ocr-s is a powerful, state-of-the-art image-to-markdown ocr model that goes far beyond traditional text extraction. it transforms documents into structured markdown with intelligent content recognition and semantic tagging.")
320
+ gr.Markdown("> [MonkeyOCR-3B-0709](https://huggingface.co/echo840/MonkeyOCR-3B-0709): MonkeyOCR adopts a structure-recognition-relation (SRR) triplet paradigm, which simplifies the multi-tool pipeline of modular approaches while avoiding the inefficiency of using large multimodal models for full-page document processing.")
321
  gr.Markdown("> [Qwen2-VL-OCR-2B-Instruct](https://huggingface.co/prithivMLmods/Qwen2-VL-OCR-2B-Instruct): qwen2-vl-ocr-2b-instruct model is a fine-tuned version of qwen2-vl-2b-instruct, tailored for tasks that involve [messy] optical character recognition (ocr), image-to-text conversion, and math problem solving with latex formatting.")
322
  gr.Markdown("> [RolmOCR](https://huggingface.co/reducto/RolmOCR): rolmocr, high-quality, openly available approach to parsing pdfs and other complex documents optical character recognition. it is designed to handle a wide range of document types, including scanned documents, handwritten text, and complex layouts.")
323
  gr.Markdown("> [Aya-Vision](https://huggingface.co/CohereLabs/aya-vision-8b): cohere labs aya vision 8b is an open weights research release of an 8-billion parameter model with advanced capabilities optimized for a variety of vision-language use cases, including ocr, captioning, visual reasoning, summarization, question answering, code, and more.")