kothariyashhh commited on
Commit
d7ab7d7
·
verified ·
1 Parent(s): f2c872a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -35
app.py CHANGED
@@ -7,58 +7,43 @@ from PIL import Image
7
  import gradio as gr
8
  import torch
9
 
10
- # Download example image
11
  torch.hub.download_url_to_file('https://i.imgur.com/aqMBT0i.jpg', 'example.jpg')
12
 
13
  def inference(img, lang):
14
- # Initialize OCR with the selected language
15
- ocr = PaddleOCR(use_angle_cls=True, lang=lang, use_gpu=False)
16
  img_path = img
17
  result = ocr.ocr(img_path, cls=True)[0]
18
 
19
- # Extract boxes and text
20
  boxes = [line[0] for line in result]
21
  txts = [line[1][0] for line in result]
22
- scores = [line[1][1] for line in result]
23
 
24
- # Load image and draw OCR results
25
  image = Image.open(img_path).convert('RGB')
26
- im_show = draw_ocr(image, boxes, txts=None, scores=None, font_path='simfang.ttf')
 
27
  im_show = Image.fromarray(im_show)
28
  im_show.save('result.jpg')
29
 
30
- return 'result.jpg', '\n'.join(txts)
31
-
32
- def update_example(lang):
33
- # Automatically load the example based on selected language
34
- if lang == 'es':
35
- return 'spanish.png'
36
- elif lang == 'en':
37
- return 'english.png'
38
- return None
39
 
40
- # Title and description for the Gradio app
41
- title = 'Gradio demo for PaddleOCR'
42
  article = ""
43
- description = "PaddleOCR demo supports Spanish and English. To use it, simply upload your image and choose a language from the dropdown menu, or click one of the examples to load them."
44
-
45
- # Gradio Interface
46
  app = gr.Interface(
47
  inference,
48
- inputs=[
49
- gr.Image(type='filepath', label='Input'),
50
- gr.Dropdown(choices=['es', 'en'], value='en', label='Language', interactive=True)
51
- ],
52
- outputs=["image", "text"],
53
  title=title,
54
  description=description,
55
  article=article,
56
- examples=[['english.png','en'], ['spanish.png','es']],
57
- css=".output_image, .input_image {height: 40rem !important; width: 100% !important;}",
58
- )
59
-
60
- # Add function to update input based on language selection
61
- app.set_input_callback(update_example, ['language'])
62
-
63
- # Launch the app
64
- app.launch(debug=True)
 
7
  import gradio as gr
8
  import torch
9
 
 
10
  torch.hub.download_url_to_file('https://i.imgur.com/aqMBT0i.jpg', 'example.jpg')
11
 
12
  def inference(img, lang):
13
+ ocr = PaddleOCR(use_angle_cls=True, lang=lang,use_gpu=False)
 
14
  img_path = img
15
  result = ocr.ocr(img_path, cls=True)[0]
16
 
 
17
  boxes = [line[0] for line in result]
18
  txts = [line[1][0] for line in result]
19
+ #scores = [line[1][1] for line in result]
20
 
 
21
  image = Image.open(img_path).convert('RGB')
22
+ im_show = draw_ocr(image, boxes, txts=None, #scores=None, # https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.7/tools/infer/utility.py#L365
23
+ font_path='simfang.ttf')
24
  im_show = Image.fromarray(im_show)
25
  im_show.save('result.jpg')
26
 
27
+ return 'result.jpg', result, '\n'.join(txts)
28
+
29
+ # return 'result.jpg'
 
 
 
 
 
 
30
 
31
+ title = 'PaddleOCR'
32
+ description = 'PaddleOCR demo supports Spanish and English'
33
  article = ""
34
+ examples = [['english.png','en'],['spanish.png','es']]
35
+ css = ".output_image, .input_image {height: 40rem !important; width: 100% !important;}"
 
36
  app = gr.Interface(
37
  inference,
38
+ [gr.Image(type='filepath', label='Input'),gr.Dropdown(choices=['es', 'en'], type="value", value='ch', label='language')],
39
+ # gr.outputs.Image(type='file', label='Output'),
40
+ outputs=["image", "text", "text"],
 
 
41
  title=title,
42
  description=description,
43
  article=article,
44
+ examples=examples,
45
+ css=css,
46
+ # enable_queue=True
47
+ )
48
+ app.queue(max_size=10)
49
+ app.launch(debug=True)