File size: 2,374 Bytes
70b98b2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
from paddleocr import PaddleOCR
import json
from PIL import Image
import gradio as gr
import numpy as np
import cv2

# 获取随机的颜色
def get_random_color():
    c = tuple(np.random.randint(0, 256, 3).tolist())
    return c

# 绘制ocr识别结果
def draw_ocr_bbox(image, boxes, colors):
    print(colors)
    box_num = len(boxes)
    for i in range(box_num):
        box = np.reshape(np.array(boxes[i]), [-1, 1, 2]).astype(np.int64)
        image = cv2.polylines(np.array(image), [box], True, colors[i], 2)
    return image

# torch.hub.download_url_to_file('https://i.imgur.com/aqMBT0i.jpg', 'example.jpg')

def inference(img: Image.Image, lang, confidence):
    ocr = PaddleOCR(use_angle_cls=True, lang=lang, use_gpu=False)
    # img_path = img.name
    img2np = np.array(img)
    result = ocr.ocr(img2np, cls=True)[0]
    # rgb
    image = img.convert('RGB')
    boxes = [line[0] for line in result]
    txts = [line[1][0] for line in result]
    scores = [line[1][1] for line in result]
    
    # 识别结果
    final_result = [dict(boxes=box, txt=txt, score=score, _c=get_random_color()) for box, txt, score in zip(boxes, txts, scores)]
    # 过滤 score < 0.5 的
    final_result = [item for item in final_result if item['score'] > confidence]

    im_show = draw_ocr_bbox(image, [item['boxes'] for item in final_result], [item['_c'] for item in final_result])
    im_show = Image.fromarray(im_show)
    data = [[json.dumps(item['boxes']), round(item['score'], 3), item['txt']] for item in final_result]
    return im_show, data

title = 'PaddleOCR'
description = 'Gradio demo for PaddleOCR.'

examples = [
    ['example_imgs/example.jpg','en', 0.5],
    ['example_imgs/ch.jpg','ch', 0.7],
    ['example_imgs/img_12.jpg','en', 0.7],
]

css = ".output_image, .input_image {height: 40rem !important; width: 100% !important;}"

demo = gr.Interface(
    inference,
    [gr.Image(type='pil', label='Input'),
     gr.Dropdown(choices=['ch', 'en', 'fr', 'german', 'korean', 'japan'], value='ch', label='language'),
     gr.Slider(0.1, 1, 0.5, step=0.1, label='confidence_threshold')
    ],
    # 输出
    [gr.Image(type='pil', label='Output'), gr.Dataframe(headers=[ 'bbox', 'score', 'text'], label='Result')],
    title=title,
    description=description,
    examples=examples,
    css=css,
    )
demo.queue(max_size=10)
demo.launch(debug=True, share=True)