File size: 2,171 Bytes
34bb4b0
dedeb8b
 
 
34bb4b0
 
 
 
 
458bd3f
34bb4b0
 
 
458bd3f
 
34bb4b0
 
 
458bd3f
34bb4b0
 
 
 
458bd3f
34bb4b0
458bd3f
34bb4b0
 
 
458bd3f
 
 
 
 
 
 
 
 
34bb4b0
458bd3f
 
34bb4b0
458bd3f
 
 
34bb4b0
 
458bd3f
 
 
 
 
34bb4b0
 
 
458bd3f
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
import os
os.system('pip install paddlepaddle==2.4.2')
# os.system('pip install paddlepaddle==0.0.0 -f https://www.paddlepaddle.org.cn/whl/linux/cpu-mkl/develop.html')
os.system('pip install paddleocr')
from paddleocr import PaddleOCR, draw_ocr
from PIL import Image
import gradio as gr
import torch

# Download example image
torch.hub.download_url_to_file('https://i.imgur.com/aqMBT0i.jpg', 'example.jpg')

def inference(img, lang):
    # Initialize OCR with the selected language
    ocr = PaddleOCR(use_angle_cls=True, lang=lang, use_gpu=False)
    img_path = img
    result = ocr.ocr(img_path, cls=True)[0]

    # Extract boxes and text
    boxes = [line[0] for line in result]
    txts = [line[1][0] for line in result]
    scores = [line[1][1] for line in result]
    
    # Load image and draw OCR results
    image = Image.open(img_path).convert('RGB')
    im_show = draw_ocr(image, boxes, txts=None, scores=None, font_path='simfang.ttf')
    im_show = Image.fromarray(im_show)
    im_show.save('result.jpg')
    
    return 'result.jpg', '\n'.join(txts)

def update_example(lang):
    # Automatically load the example based on selected language
    if lang == 'es':
        return 'spanish.png'
    elif lang == 'en':
        return 'english.png'
    return None

# Title and description for the Gradio app
title = 'Gradio demo for PaddleOCR'
article = ""
description = "PaddleOCR demo supports Spanish and English. To use it, simply upload your image and choose a language from the dropdown menu, or click one of the examples to load them."

# Gradio Interface
app = gr.Interface(
    inference,
    inputs=[
        gr.Image(type='filepath', label='Input'), 
        gr.Dropdown(choices=['es', 'en'], value='en', label='Language', interactive=True)
    ],
    outputs=["image", "text"],
    title=title,
    description=description,
    article=article,
    examples=[['english.png','en'], ['spanish.png','es']],
    css=".output_image, .input_image {height: 40rem !important; width: 100% !important;}",
)

# Add function to update input based on language selection
app.set_input_callback(update_example, ['language'])

# Launch the app
app.launch(debug=True)