File size: 3,408 Bytes
4697797
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
import gradio as gr
from utils import *

file_url = "https://storage.googleapis.com/derendering_model/derendering_supp.zip"
filename = "derendering_supp.zip"

download_file(file_url, filename)
unzip_file(filename)
print("Downloaded and unzipped the file.")

diagram = get_svg_content("derendering_supp/derender_diagram.svg")
org = get_svg_content("org/cor.svg")

org_content = f"""
{org}
"""


def demo(Dataset, Model):
    if Model == "Small-i":
        inkml_path = f"./derendering_supp/small-i_{Dataset}_inkml"
    elif Model == "Small-p":
        inkml_path = f"./derendering_supp/small-p_{Dataset}_inkml"
    elif Model == "Large-i":
        inkml_path = f"./derendering_supp/large-i_{Dataset}_inkml"

    path = f"./derendering_supp/{Dataset}/images_sample"
    samples = os.listdir(path)
    # Randomly pick a sample
    picked_samples = random.sample(samples, min(1, len(samples)))

    query_modes = ["d+t", "r+d", "vanilla"]
    plot_title = {"r+d": "Recognized: ", "d+t": "OCR Input: ", "vanilla": ""}
    text_outputs = []

    for name in picked_samples:
        img_path = os.path.join(path, name)
        img = load_and_pad_img_dir(img_path)

        for mode in query_modes:
            example_id = name.strip(".png")
            inkml_file = os.path.join(inkml_path, mode, example_id + ".inkml")
            text_field = parse_inkml_annotations(inkml_file)["textField"]
            output_text = f"{plot_title[mode]}{text_field}"
            text_outputs.append(output_text)  # Append text output for the current mode
            ink = inkml_to_ink(inkml_file)
            plot_ink_to_video(ink, mode + ".mp4", input_image=img)

    return (
        img,
        text_outputs[0],
        "d+t.mp4",
        text_outputs[1],
        "r+d.mp4",
        text_outputs[2],
        "vanilla.mp4",
    )


with gr.Blocks() as app:
    gr.HTML(org_content)
    gr.Markdown(
        f"""
        # InkSight: Offline-to-Online Handwriting Conversion by Learning to Read and Write<br>
        <div>{diagram}</div>
        🔔 This demo showcases the outputs of <b>Small-i</b>, <b>Small-p</b>, and <b>Large-i</b> on three public datasets (100 samples each).<br>
        ℹ️ Choose a model variant and dataset, then click 'Sample' to see an input with its corresponding outputs for all three inference types..<br>
        """
    )
    with gr.Row():
        dataset = gr.Dropdown(
            ["IMGUR5K", "IAM", "HierText"], label="Dataset", value="HierText"
        )
        model = gr.Dropdown(
            ["Small-i", "Large-i", "Small-p"],
            label="InkSight Model Variant",
            value="Small-i",
        )
        im = gr.Image(label="Input Image")
    with gr.Row():
        d_t_text = gr.Textbox(
            label="OCR recognition input to the model", interactive=False
        )
        r_d_text = gr.Textbox(label="Recognition from the model", interactive=False)
        vanilla_text = gr.Textbox(label="Vanilla", interactive=False)

    with gr.Row():
        d_t = gr.Video(label="Derender with Text", autoplay=True)
        r_d = gr.Video(label="Recognize and Derender", autoplay=True)
        vanilla = gr.Video(label="Vanilla", autoplay=True)

    with gr.Row():
        btn_sub = gr.Button("Sample")

    btn_sub.click(
        fn=demo,
        inputs=[dataset, model],
        outputs=[im, d_t_text, d_t, r_d_text, r_d, vanilla_text, vanilla],
    )

app.launch()