Spaces:
Running
Running
File size: 7,577 Bytes
5d6a0bb 00e3b6c 5d6a0bb 9792e33 5d6a0bb 00e3b6c 5d6a0bb 00e3b6c 5d6a0bb 00e3b6c 5d6a0bb 00e3b6c 5d6a0bb 9792e33 7e03ca9 5d6a0bb 00e3b6c 0fd5b51 00e3b6c 9792e33 461c5b6 7e03ca9 00e3b6c 0fd5b51 60d5cf3 0fd5b51 6e3957c cabcd6b 997f57f 38181d4 7e03ca9 a3569da 7e03ca9 a3569da 7e03ca9 a3569da 0fd5b51 7e03ca9 0fd5b51 7e03ca9 0fd5b51 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 |
# -*- encoding: utf-8 -*-
import math
import random
import time
from pathlib import Path
import cv2
import gradio as gr
import numpy as np
from PIL import Image, ImageDraw, ImageFont
from rapidocr_onnxruntime import RapidOCR
def draw_ocr_box_txt(image, boxes, txts, font_path,
scores=None, text_score=0.5):
h, w = image.height, image.width
img_left = image.copy()
img_right = Image.new('RGB', (w, h), (255, 255, 255))
random.seed(0)
draw_left = ImageDraw.Draw(img_left)
draw_right = ImageDraw.Draw(img_right)
for idx, (box, txt) in enumerate(zip(boxes, txts)):
if scores is not None and float(scores[idx]) < text_score:
continue
color = (random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255))
box = [tuple(v) for v in box]
draw_left.polygon(box, fill=color)
draw_right.polygon([box[0][0], box[0][1],
box[1][0], box[1][1],
box[2][0], box[2][1],
box[3][0], box[3][1]],
outline=color)
box_height = math.sqrt((box[0][0] - box[3][0])**2
+ (box[0][1] - box[3][1])**2)
box_width = math.sqrt((box[0][0] - box[1][0])**2
+ (box[0][1] - box[1][1])**2)
if box_height > 2 * box_width:
font_size = max(int(box_width * 0.9), 10)
font = ImageFont.truetype(font_path, font_size,
encoding="utf-8")
cur_y = box[0][1]
for c in txt:
char_size = font.getsize(c)
draw_right.text((box[0][0] + 3, cur_y), c,
fill=(0, 0, 0), font=font)
cur_y += char_size[1]
else:
font_size = max(int(box_height * 0.8), 10)
font = ImageFont.truetype(font_path, font_size, encoding="utf-8")
draw_right.text([box[0][0], box[0][1]], txt,
fill=(0, 0, 0), font=font)
img_left = Image.blend(image, img_left, 0.5)
img_show = Image.new('RGB', (w * 2, h), (255, 255, 255))
img_show.paste(img_left, (0, 0, w, h))
img_show.paste(img_right, (w, 0, w * 2, h))
return np.array(img_show)
def visualize(image_path, boxes, txts, scores,
font_path="./FZYTK.TTF"):
image = Image.open(image_path)
draw_img = draw_ocr_box_txt(image, boxes,
txts, font_path,
scores,
text_score=0.5)
draw_img_save = Path("./inference_results/")
if not draw_img_save.exists():
draw_img_save.mkdir(parents=True, exist_ok=True)
time_stamp = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
image_save = str(draw_img_save / f'{time_stamp}_{Path(image_path).name}')
cv2.imwrite(image_save, draw_img[:, :, ::-1])
return image_save
def inference(img_path, box_thresh=0.5, unclip_ratio=1.6, text_score=0.5,
text_det=None, text_rec=None):
det_model_path = str(Path('models') / 'text_det' / text_det)
rec_model_path = str(Path('models') / 'text_rec' / text_rec)
if 'v2' in rec_model_path:
rec_image_shape = [3, 32, 320]
else:
rec_image_shape = [3, 48, 320]
print('Init Class')
s = time.time()
rapid_ocr = RapidOCR(det_model_path=det_model_path,
rec_model_path=rec_model_path,
rec_img_shape=rec_image_shape)
print(det_model_path, rec_model_path, rec_image_shape)
elapse = time.time() - s
print(elapse)
img = cv2.imread(img_path)
ocr_result, _ = rapid_ocr(img, box_thresh=box_thresh,
unclip_ratio=unclip_ratio,
text_score=text_score)
if not ocr_result:
return img_path, '未识别到有效文本'
dt_boxes, rec_res, scores = list(zip(*ocr_result))
img_save_path = visualize(img_path, dt_boxes, rec_res, scores)
output_text = [f'{one_rec} {float(score):.4f}'
for one_rec, score in zip(rec_res, scores)]
return img_save_path, output_text
examples = [['images/1.jpg'], ['images/ch_en_num.jpg']]
with gr.Blocks(title='RapidOCR') as demo:
gr.Markdown("""
<h1><center><a href="https://github.com/RapidAI/RapidOCR" target="_blank">Rapid⚡OCR</a></center></h1>
### Docs: [Docs](https://rapidocr.rtfd.io/)
### Parameters docs: [link](https://github.com/RapidAI/RapidOCR/tree/main/python#configyaml%E4%B8%AD%E5%B8%B8%E7%94%A8%E5%8F%82%E6%95%B0%E4%BB%8B%E7%BB%8D)
- **box_thresh**: 检测到的框是文本的概率,值越大,框中是文本的概率就越大。存在漏检时,调低该值。取值范围:[0, 1.0]
- **unclip_ratio**: 控制文本检测框的大小,值越大,检测框整体越大。在出现框截断文字的情况,调大该值。取值范围:[1.5, 2.0]
- **text_score**: 文本识别结果是正确的置信度,值越大,显示出的识别结果更准确。存在漏检时,调低该值。取值范围:[0, 1.0]
### 运行环境:
Python: 3.8 | onnxruntime: 1.14.1 | rapidocr_onnxruntime: 1.2.5""")
gr.Markdown('**超参数调节**')
with gr.Row():
box_thresh = gr.Slider(minimum=0, maximum=1.0, value=0.5,
label='box_thresh', step=0.1,
interactive=True,
info='[0, 1.0]')
unclip_ratio = gr.Slider(minimum=1.5, maximum=2.0, value=1.6,
label='unclip_ratio', step=0.1,
interactive=True,
info='[1.5, 2.0]')
text_score = gr.Slider(minimum=0, maximum=1.0, value=0.5,
label='text_score', step=0.1,
interactive=True,
info='[0, 1.0]')
gr.Markdown('**模型选择**')
with gr.Row():
text_det = gr.Dropdown(['ch_PP-OCRv3_det_infer.onnx',
'ch_PP-OCRv2_det_infer.onnx',
'ch_ppocr_server_v2.0_det_infer.onnx'],
label='选择文本检测模型',
value='ch_PP-OCRv3_det_infer.onnx',
interactive=True)
text_rec = gr.Dropdown(['ch_PP-OCRv3_rec_infer.onnx',
'ch_PP-OCRv2_rec_infer.onnx',
'ch_ppocr_server_v2.0_rec_infer.onnx'],
label='选择文本识别模型',
value='ch_PP-OCRv3_rec_infer.onnx',
interactive=True)
with gr.Row():
input_img = gr.Image(type='filepath', label='Input')
out_img = gr.Image(type='filepath', label='Output')
out_txt = gr.outputs.Textbox(type='text', label='RecText')
button = gr.Button('Submit')
button.click(fn=inference,
inputs=[input_img, box_thresh, unclip_ratio, text_score,
text_det, text_rec],
outputs=[out_img, out_txt])
gr.Examples(examples=examples,
inputs=[input_img, box_thresh, unclip_ratio, text_score,
text_det, text_rec],
outputs=[out_img, out_txt], fn=inference)
demo.launch(debug=True, enable_queue=True)
|