File size: 1,602 Bytes
9184993
802d2bc
9184993
 
 
802d2bc
 
 
 
 
 
 
 
9184993
 
802d2bc
 
 
 
 
 
 
 
9184993
 
 
 
 
 
 
 
6211e37
9184993
 
 
 
 
 
 
 
 
 
 
 
 
6211e37
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
import gradio as gr
import torch
from transformers import AutoModel, AutoTokenizer
from PIL import Image

# Check GPU availability
if torch.cuda.is_available():
    print("CUDA is available! GPU is present.")
    print(f"Number of GPUs: {torch.cuda.device_count()}")
    print(f"GPU Name: {torch.cuda.get_device_name(0)}")
else:
    print("CUDA is not available. Running on CPU.")

# Load the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained('ucaslcl/GOT-OCR2_0', trust_remote_code=True)

# Initialize the model
if torch.cuda.is_available():
    model = AutoModel.from_pretrained('ucaslcl/GOT-OCR2_0', trust_remote_code=True, low_cpu_mem_usage=True, device_map='cuda', use_safetensors=True, pad_token_id=tokenizer.eos_token_id)
    model = model.eval().cuda()
else:
    model = AutoModel.from_pretrained('ucaslcl/GOT-OCR2_0', trust_remote_code=True, low_cpu_mem_usage=True, pad_token_id=tokenizer.eos_token_id)
    model = model.eval()  # Keep model on CPU

# Define the OCR function
def perform_ocr(image):
    # Convert PIL image to RGB format (if necessary)
    if image.mode != "RGB":
        image = image.convert("RGB")

    # Perform OCR using the model
    res = model.chat(tokenizer, image, ocr_type='ocr')

    return res

# Define the Gradio interface
interface = gr.Interface(
    fn=perform_ocr,
    inputs=gr.Image(type="pil", label="Upload Image"),
    outputs=gr.Textbox(label="Extracted Text"),
    title="OCR and Document Search Web Application",
    description="Upload an image to extract text using the GOT-OCR2_0 model."
)

# Launch the Gradio app
interface.launch()