prithivMLmods commited on
Commit
3063dc5
·
verified ·
1 Parent(s): b9c2fd6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -72,8 +72,8 @@ model_a = AutoModelForImageTextToText.from_pretrained(
72
  torch_dtype=torch.float16
73
  ).to(device).eval()
74
 
75
- # Load AIGVE-MACS
76
- MODEL_ID_W = "xiaoliux/AIGVE-MACS"
77
  processor_w = AutoProcessor.from_pretrained(MODEL_ID_W, trust_remote_code=True)
78
  model_w = Qwen2_5_VLForConditionalGeneration.from_pretrained(
79
  MODEL_ID_W,
@@ -134,7 +134,7 @@ def generate_image(model_name: str, text: str, image: Image.Image,
134
  elif model_name == "Aya-Vision-8B":
135
  processor = processor_a
136
  model = model_a
137
- elif model_name == "AIGVE-MACS-7B":
138
  processor = processor_w
139
  model = model_w
140
  else:
@@ -195,7 +195,7 @@ def generate_video(model_name: str, text: str, video_path: str,
195
  elif model_name == "Aya-Vision-8B":
196
  processor = processor_a
197
  model = model_a
198
- elif model_name == "AIGVE-MACS-7B":
199
  processor = processor_w
200
  model = model_w
201
  else:
@@ -309,13 +309,13 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
309
  with gr.Accordion("(Result.md)", open=False):
310
  markdown_output = gr.Markdown(label="Formatted Result (Result.Md)")
311
  model_choice = gr.Radio(
312
- choices=["Nanonets-OCR-s", "Qwen2-VL-OCR-2B", "RolmOCR-7B", "AIGVE-MACS-7B", "Aya-Vision-8B"],
313
  label="Select Model",
314
  value="Nanonets-OCR-s"
315
  )
316
  gr.Markdown("**Model Info 💻** | [Report Bug](https://huggingface.co/spaces/prithivMLmods/Multimodal-OCR/discussions)")
317
  gr.Markdown("> [Nanonets-OCR-s](https://huggingface.co/nanonets/Nanonets-OCR-s): nanonets-ocr-s is a powerful, state-of-the-art image-to-markdown ocr model that goes far beyond traditional text extraction. it transforms documents into structured markdown with intelligent content recognition and semantic tagging.")
318
- gr.Markdown("> [AIGVE-MACS-7B](https://huggingface.co/xiaoliux/AIGVE-MACS): AIGVE-MACS is a unified Vision-Language Model (VLM) designed to evaluate AI-generated videos. It provides numerical scores (0–5) and natural language justifications across 9 human-aligned video quality aspects.")
319
  gr.Markdown("> [Qwen2-VL-OCR-2B](https://huggingface.co/prithivMLmods/Qwen2-VL-OCR-2B-Instruct): qwen2-vl-ocr-2b-instruct model is a fine-tuned version of qwen2-vl-2b-instruct, tailored for tasks that involve [messy] optical character recognition (ocr), image-to-text conversion, and math problem solving with latex formatting.")
320
  gr.Markdown("> [RolmOCR](https://huggingface.co/reducto/RolmOCR): rolmocr, high-quality, openly available approach to parsing pdfs and other complex documents optical character recognition. it is designed to handle a wide range of document types, including scanned documents, handwritten text, and complex layouts.")
321
  gr.Markdown("> [Aya-Vision](https://huggingface.co/CohereLabs/aya-vision-8b): cohere labs aya vision 8b is an open weights research release of an 8-billion parameter model with advanced capabilities optimized for a variety of vision-language use cases, including ocr, captioning, visual reasoning, summarization, question answering, code, and more.")
 
72
  torch_dtype=torch.float16
73
  ).to(device).eval()
74
 
75
+ # Load Lh41-1042-Magellanic-7B-0711
76
+ MODEL_ID_W = "prithivMLmods/Lh41-1042-Magellanic-7B-0711"
77
  processor_w = AutoProcessor.from_pretrained(MODEL_ID_W, trust_remote_code=True)
78
  model_w = Qwen2_5_VLForConditionalGeneration.from_pretrained(
79
  MODEL_ID_W,
 
134
  elif model_name == "Aya-Vision-8B":
135
  processor = processor_a
136
  model = model_a
137
+ elif model_name == "Lh41-1042-Magellanic-7B-0711":
138
  processor = processor_w
139
  model = model_w
140
  else:
 
195
  elif model_name == "Aya-Vision-8B":
196
  processor = processor_a
197
  model = model_a
198
+ elif model_name == "Lh41-1042-Magellanic-7B-0711":
199
  processor = processor_w
200
  model = model_w
201
  else:
 
309
  with gr.Accordion("(Result.md)", open=False):
310
  markdown_output = gr.Markdown(label="Formatted Result (Result.Md)")
311
  model_choice = gr.Radio(
312
+ choices=["Nanonets-OCR-s", "Qwen2-VL-OCR-2B", "RolmOCR-7B", "Lh41-1042-Magellanic-7B-0711", "Aya-Vision-8B"],
313
  label="Select Model",
314
  value="Nanonets-OCR-s"
315
  )
316
  gr.Markdown("**Model Info 💻** | [Report Bug](https://huggingface.co/spaces/prithivMLmods/Multimodal-OCR/discussions)")
317
  gr.Markdown("> [Nanonets-OCR-s](https://huggingface.co/nanonets/Nanonets-OCR-s): nanonets-ocr-s is a powerful, state-of-the-art image-to-markdown ocr model that goes far beyond traditional text extraction. it transforms documents into structured markdown with intelligent content recognition and semantic tagging.")
318
+ gr.Markdown("> [Lh41-1042-Magellanic-7B-0711](https://huggingface.co/prithivMLmods/Lh41-1042-Magellanic-7B-0711): lh41-1042-magellanic-7b-0711 model is a fine-tuned version of qwen2.5-vl-7b-instruct, optimized for image captioning, visual analysis, and image reasoning. built on top of the qwen2.5-vl, this experimental model enhances visual comprehension, focused training on 3,000k image pairs for superior image understanding")
319
  gr.Markdown("> [Qwen2-VL-OCR-2B](https://huggingface.co/prithivMLmods/Qwen2-VL-OCR-2B-Instruct): qwen2-vl-ocr-2b-instruct model is a fine-tuned version of qwen2-vl-2b-instruct, tailored for tasks that involve [messy] optical character recognition (ocr), image-to-text conversion, and math problem solving with latex formatting.")
320
  gr.Markdown("> [RolmOCR](https://huggingface.co/reducto/RolmOCR): rolmocr, high-quality, openly available approach to parsing pdfs and other complex documents optical character recognition. it is designed to handle a wide range of document types, including scanned documents, handwritten text, and complex layouts.")
321
  gr.Markdown("> [Aya-Vision](https://huggingface.co/CohereLabs/aya-vision-8b): cohere labs aya vision 8b is an open weights research release of an 8-billion parameter model with advanced capabilities optimized for a variety of vision-language use cases, including ocr, captioning, visual reasoning, summarization, question answering, code, and more.")