Vinay15's picture
Update app.py
802d2bc verified
raw
history blame
1.6 kB
import gradio as gr
import torch
from transformers import AutoModel, AutoTokenizer
from PIL import Image
# Check GPU availability
if torch.cuda.is_available():
print("CUDA is available! GPU is present.")
print(f"Number of GPUs: {torch.cuda.device_count()}")
print(f"GPU Name: {torch.cuda.get_device_name(0)}")
else:
print("CUDA is not available. Running on CPU.")
# Load the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained('ucaslcl/GOT-OCR2_0', trust_remote_code=True)
# Initialize the model
if torch.cuda.is_available():
model = AutoModel.from_pretrained('ucaslcl/GOT-OCR2_0', trust_remote_code=True, low_cpu_mem_usage=True, device_map='cuda', use_safetensors=True, pad_token_id=tokenizer.eos_token_id)
model = model.eval().cuda()
else:
model = AutoModel.from_pretrained('ucaslcl/GOT-OCR2_0', trust_remote_code=True, low_cpu_mem_usage=True, pad_token_id=tokenizer.eos_token_id)
model = model.eval() # Keep model on CPU
# Define the OCR function
def perform_ocr(image):
# Convert PIL image to RGB format (if necessary)
if image.mode != "RGB":
image = image.convert("RGB")
# Perform OCR using the model
res = model.chat(tokenizer, image, ocr_type='ocr')
return res
# Define the Gradio interface
interface = gr.Interface(
fn=perform_ocr,
inputs=gr.Image(type="pil", label="Upload Image"),
outputs=gr.Textbox(label="Extracted Text"),
title="OCR and Document Search Web Application",
description="Upload an image to extract text using the GOT-OCR2_0 model."
)
# Launch the Gradio app
interface.launch()