File size: 1,371 Bytes
f2460f7 8434495 f2460f7 3534c83 9184993 de0d96a 8434495 de0d96a 1b4f804 8434495 1b4f804 f2460f7 1b4f804 d04c2a3 3534c83 1b4f804 3534c83 8434495 1b4f804 8434495 f2460f7 8434495 f2460f7 2b3d2ae 8434495 de0d96a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 |
import gradio as gr
from transformers import AutoModel, AutoTokenizer
from PIL import Image
import numpy as np
# Load the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained('ucaslcl/GOT-OCR2_0', trust_remote_code=True)
model = AutoModel.from_pretrained('ucaslcl/GOT-OCR2_0', trust_remote_code=True, low_cpu_mem_usage=True, use_safetensors=True, pad_token_id=tokenizer.eos_token_id)
model = model.eval() # Ensure model is set to evaluation mode
# Define the OCR function with error handling
def perform_ocr(image):
try:
# Convert PIL image to RGB format (if necessary)
if image.mode != "RGB":
image = image.convert("RGB")
# Convert the image to a format suitable for the model (if needed)
image_array = np.array(image)
# Perform OCR using the model
res = model.chat(tokenizer, image_array, ocr_type='ocr') # Adjusted to pass the image array
return res
except Exception as e:
return f"An error occurred: {str(e)}"
# Define the Gradio interface
interface = gr.Interface(
fn=perform_ocr,
inputs=gr.Image(type="pil", label="Upload Image"),
outputs=gr.Textbox(label="Extracted Text"),
title="OCR and Document Search Web Application",
description="Upload an image to extract text using the GOT-OCR2_0 model."
)
# Launch the Gradio app
interface.launch()
|