File size: 2,317 Bytes
cffa665
 
 
 
 
 
 
 
 
 
 
fc84b02
 
cffa665
e56299c
 
cffa665
46c3a5a
9b1b4e2
46c3a5a
9b1b4e2
46c3a5a
cffa665
9b1b4e2
 
46c3a5a
cffa665
 
9b1b4e2
 
 
cffa665
9b1b4e2
 
 
cffa665
3d6eaf1
9b1b4e2
 
 
4c1df25
ebf4710
 
9b1b4e2
 
cffa665
46c3a5a
9b1b4e2
46c3a5a
cffa665
46c3a5a
cffa665
 
 
9b1b4e2
 
 
46c3a5a
 
 
 
 
 
 
 
 
 
 
 
cffa665
c25cfd7
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image, ImageFilter
import io
import time
import os
import copy
import pickle
import datetime
import urllib.request
import gradio as gr
import torch


from mmocr.apis import MMOCRInferencer
ocr = MMOCRInferencer(det='TextSnake', rec='ABINet_Vision')

url = (
    "https://upload.wikimedia.org/wikipedia/commons/thumb/5/5b/Draft_Marks_on_the_Bow_of_Kruzenshtern_Port_of_Tallinn_16_July_2011.jpg/1600px-Draft_Marks_on_the_Bow_of_Kruzenshtern_Port_of_Tallinn_16_July_2011.jpg"
)
path_input = "./example1.jpg"
urllib.request.urlretrieve(url, filename=path_input)

url = "https://upload.wikimedia.org/wikipedia/commons/3/3e/733_how-deep.jpg"
path_input = "./example2.jpg"
urllib.request.urlretrieve(url, filename=path_input)


path_img_output_folder = "./demo-out"
if not os.path.exists(path_img_output_folder):
    os.makedirs(path_img_output_folder)

path_img_input_folder = "./demo-input"
if not os.path.exists(path_img_input_folder):
    os.makedirs(path_img_input_folder)

def do_process(img):
    img_name = 'tmp.jpg'
    path_input = os.path.join(path_img_input_folder, img_name)
    path_output = os.path.join(path_img_output_folder, 'vis',img_name)
    img.save(path_input)
    img.save(path_output)
    # result = ocr(path_input, out_dir=path_img_output_folder, save_vis=True)
    img_res = Image(filename=path_output)
    return img_res

input_im = gr.inputs.Image(
    shape=None, image_mode="RGB", invert_colors=False, source="upload", type="pil"
)

output_img = gr.outputs.Image(label="Output of Integrated Gradients", type="pil")
# output_base = gr.outputs.Image(label="Baseline image", type="pil")
# output_label = gr.outputs.Label(label="Classification results", num_top_classes=3)

title = "Reading draught marks"
description = "Playground: Reading draught marks using pre-trained models. Tools: MMOCR, Gradio."
examples = [["./example1.jpg"], ["./example2.jpg"]]
article = "<p style='text-align: center'><a href='https://github.com/mawady' target='_blank'>By Dr. Mohamed Elawady</a></p>"
iface = gr.Interface(
    fn=do_process,
    inputs=[input_im],
    outputs=[output_img],
    live=False,
    interpretation=None,
    title=title,
    description=description,
    article=article,
    examples=examples,
)

iface.launch(debug=True)