import gradio as gr from transformers import AutoModel, AutoTokenizer from PIL import Image import torch # Check if CUDA is available if torch.cuda.is_available(): print("CUDA is available! GPU is present.") else: print("CUDA is not available. Running on CPU.") # Load the tokenizer and model tokenizer = AutoTokenizer.from_pretrained('ucaslcl/GOT-OCR2_0', trust_remote_code=True) model = AutoModel.from_pretrained('ucaslcl/GOT-OCR2_0', trust_remote_code=True, low_cpu_mem_usage=True, device_map='cuda', use_safetensors=True, pad_token_id=tokenizer.eos_token_id) model = model.eval().cuda() if torch.cuda.is_available() else model.eval() # Define the OCR function def perform_ocr(image): # Convert PIL image to RGB format (if necessary) if image.mode != "RGB": image = image.convert("RGB") # Save the image to a temporary file to pass to the model temp_image_path = "temp_image.png" image.save(temp_image_path) # Perform OCR using the model res = model.chat(tokenizer, temp_image_path, ocr_type='ocr') return res # Define the Gradio interface interface = gr.Interface( fn=perform_ocr, inputs=gr.Image(type="pil", label="Upload Image"), outputs=gr.Textbox(label="Extracted Text"), title="OCR and Document Search Web Application", description="Upload an image to extract text using the GOT-OCR2_0 model." ) # Launch the Gradio app interface.launch()