SWHL commited on
Commit
0fd5b51
·
1 Parent(s): 611af5d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -43
app.py CHANGED
@@ -1,8 +1,4 @@
1
  # -*- encoding: utf-8 -*-
2
- import os
3
-
4
- os.system('pip install -r requirements.txt')
5
-
6
  import math
7
  import random
8
  import time
@@ -93,6 +89,9 @@ def inference(img_path, box_thresh=0.5, unclip_ratio=1.6, text_score=0.5):
93
  ocr_result, _ = rapid_ocr(img, box_thresh=box_thresh,
94
  unclip_ratio=unclip_ratio,
95
  text_score=text_score)
 
 
 
96
  dt_boxes, rec_res, scores = list(zip(*ocr_result))
97
  img_save_path = visualize(img_path, dt_boxes, rec_res, scores)
98
  output_text = [f'{one_rec} {float(score):.4f}'
@@ -100,44 +99,42 @@ def inference(img_path, box_thresh=0.5, unclip_ratio=1.6, text_score=0.5):
100
  return img_save_path, output_text
101
 
102
 
103
- title = 'RapidOCR Demo (捷智OCR)'
104
- description = """
105
- - Docs: [Docs](https://rapidocr.rtfd.io/)
106
- - Parameters docs: [link](https://github.com/RapidAI/RapidOCR/tree/main/python#configyaml%E4%B8%AD%E5%B8%B8%E7%94%A8%E5%8F%82%E6%95%B0%E4%BB%8B%E7%BB%8D)
107
- - **box_thresh**: 检测到的框是文本的概率,值越大,框中是文本的概率就越大。存在漏检时,调低该值。取值范围:[0, 1.0]
108
- - **unclip_ratio**: 控制文本检测框的大小,值越大,检测框整体越大。在出现框截断文字的情况,调大该值。取值范围:[1.5, 2.0]
109
- - **text_score**: 文本识别结果是正确的置信度,值越大,显示出的识别结果更准确。存在漏检时,调低该值。取值范围:[0, 1.0]
110
- """
111
-
112
- article = """<p style='text-align: center'> Completely open source, free and support offline deployment of multi-platform and multi-language OCR SDK <a href='https://github.com/RapidAI/RapidOCR'>Github Repo</a></p>
113
- """
114
- css = ".output_image, .input_image {height: 40rem !important; width: 100% !important;}"
115
- examples = [['images/1.jpg']]
116
-
117
  rapid_ocr = RapidOCR()
 
118
 
119
- gr.Interface(
120
- inference,
121
- inputs=[
122
- gr.inputs.Image(type='filepath', label='Input'),
123
- gr.Slider(minimum=0, maximum=1.0, value=0.5,
124
- label='box_thresh', step=0.1,
125
- info='检测到的框是文本的概率,值越大,框中是文本的概率就越大。存在漏检时,调低该值。取值范围:[0, 1.0]'),
126
- gr.Slider(minimum=1.5, maximum=2.0, value=1.6,
127
- label='unclip_ratio', step=0.1,
128
- info='控制文本检测框的大小,值越大,检测框整体越大。在出现框截断文字的情况,调大该值。取值范围:[1.5, 2.0]'),
129
- gr.Slider(minimum=0, maximum=1.0, value=0.5,
130
- label='text_score', step=0.1,
131
- info='文本识别结果是正确的置信度,值越大,显示出的识别结果更准确。存在漏检时,调低该值。取值范围:[0, 1.0]'),
132
- ],
133
- outputs=[
134
- gr.outputs.Image(type='filepath', label='Output_image'),
135
- gr.outputs.Textbox(type='text', label='Output_text')
136
- ],
137
- title=title,
138
- description=description,
139
- examples=examples,
140
- article=article,
141
- css=css,
142
- allow_flagging='never',
143
- ).launch(debug=True, enable_queue=True)
 
 
 
 
 
 
 
 
 
 
 
 
1
  # -*- encoding: utf-8 -*-
 
 
 
 
2
  import math
3
  import random
4
  import time
 
89
  ocr_result, _ = rapid_ocr(img, box_thresh=box_thresh,
90
  unclip_ratio=unclip_ratio,
91
  text_score=text_score)
92
+ if not ocr_result:
93
+ return img_path, '未识别到有效文本'
94
+
95
  dt_boxes, rec_res, scores = list(zip(*ocr_result))
96
  img_save_path = visualize(img_path, dt_boxes, rec_res, scores)
97
  output_text = [f'{one_rec} {float(score):.4f}'
 
99
  return img_save_path, output_text
100
 
101
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
  rapid_ocr = RapidOCR()
103
+ examples = [['images/1.jpg']]
104
 
105
+ with gr.Blocks(title='RapidOCR') as demo:
106
+ gr.Markdown("""
107
+ <center><a href="https://github.com/RapidAI/RapidOCR" target="_blank"><img src="https://raw.githubusercontent.com/RapidAI/RapidOCR/main/assets/RapidOCR_LOGO.png" width="30%"></a></center>
108
+
109
+ ## Docs: [Docs](https://rapidocr.rtfd.io/)
110
+ ## Parameters docs: [link](https://github.com/RapidAI/RapidOCR/tree/main/python#configyaml%E4%B8%AD%E5%B8%B8%E7%94%A8%E5%8F%82%E6%95%B0%E4%BB%8B%E7%BB%8D)
111
+ #### `box_thresh`: 检测到的框是文本的概率,值越大,框中是文本的概率就越大。存在漏检时,调低该值。
112
+ #### `unclip_ratio`: 控制文本检测框的大小,值越大,检测框整体越大。在出现框截断文字的情况,调大该值。
113
+ #### `text_score`: 文本识别结果是正确的置信度,值越大,显示出的识别结果更准确。存在漏检时,调低该值。""")
114
+ with gr.Box():
115
+ with gr.Row():
116
+ box_thresh = gr.Slider(minimum=0, maximum=1.0, value=0.5,
117
+ label='box_thresh', step=0.1,
118
+ interactive=True,
119
+ info='[0, 1.0]')
120
+ unclip_ratio = gr.Slider(minimum=1.5, maximum=2.0, value=1.6,
121
+ label='unclip_ratio', step=0.1,
122
+ interactive=True,
123
+ info='[1.5, 2.0]')
124
+ text_score = gr.Slider(minimum=0, maximum=1.0, value=0.5,
125
+ label='text_score', step=0.1,
126
+ interactive=True,
127
+ info='[0, 1.0]')
128
+
129
+ input_img = gr.Image(type='filepath', label='Input')
130
+ out_img = gr.Image(type='filepath', label='Output_image')
131
+ out_txt = gr.outputs.Textbox(type='text', label='Output_text')
132
+ button = gr.Button('Submit')
133
+ button.click(fn=inference,
134
+ inputs=[input_img, box_thresh, unclip_ratio, text_score],
135
+ outputs=[out_img, out_txt])
136
+
137
+ gr.Examples(examples=examples,
138
+ inputs=[input_img, box_thresh, unclip_ratio, text_score],
139
+ outputs=[out_img, out_txt], fn=inference)
140
+ demo.launch(debug=True, enable_queue=True)