File size: 4,712 Bytes
08545c6
 
 
 
5953c71
08545c6
 
 
5953c71
08545c6
5953c71
 
 
 
58f7258
 
08545c6
 
 
 
 
 
 
 
58f7258
08545c6
 
 
 
58f7258
08545c6
58f7258
 
08545c6
 
 
 
 
 
5953c71
58f7258
08545c6
 
 
 
 
 
 
5953c71
 
 
 
 
08545c6
 
58f7258
 
08545c6
 
 
 
 
 
 
 
 
 
 
 
 
58f7258
08545c6
 
 
 
 
5953c71
58f7258
08545c6
 
 
 
 
58f7258
 
 
 
 
 
 
 
08545c6
 
 
58f7258
 
fc127f8
58f7258
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f93cef9
58f7258
f93cef9
 
 
 
 
 
 
 
58f7258
 
08545c6
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
import gradio as gr
import utils



# Araclip demo 
with gr.Blocks() as demo_araclip:

    gr.Markdown("## Choose the dataset")

    dadtaset_select = gr.Radio(["XTD dataset", "Flicker 8k dataset"], value="XTD dataset", label="Dataset", info="Which dataset you would like to search in?")

    gr.Markdown("## Input parameters")
    
    txt = gr.Textbox(label="Text Query")
    num = gr.Slider(label="Number of retrieved image", value=1, minimum=1, step=1)
    

    with gr.Row():
        btn = gr.Button("Retrieve images", scale=1)

    gr.Markdown("## Retrieved Images")

    gallery = gr.Gallery(
        show_label=False, elem_id="gallery"
    , columns=[5], rows=[1], object_fit="contain", height="auto")


    with gr.Row():
        lables = gr.Label(label="Text-image similarity") 

 
    btn.click(utils.predict, inputs=[txt, num, dadtaset_select], outputs=[gallery,lables])


    gr.Examples(
        examples=[["تخطي لاعب فريق بيتسبرج بايرتس منطقة اللوحة الرئيسية في مباراة بدوري البيسبول", 5], 
                  ["وقوف قطة بمخالبها على فأرة حاسوب على المكتب", 10],
                  ["صحن به شوربة صينية بالخضار، وإلى جانبه بطاطس مقلية وزجاجة ماء", 7]],
        inputs=[txt, num, dadtaset_select],
        outputs=[gallery,lables],
        fn=utils.predict,
        cache_examples=False,
    )

# mclip demo 
with gr.Blocks() as demo_mclip:

    gr.Markdown("## Choose the dataset")

    dadtaset_select = gr.Radio(["XTD dataset", "Flicker 8k dataset"], value="XTD dataset", label="Dataset", info="Which dataset you would like to search in?")


    gr.Markdown("## Input parameters")
    
    txt = gr.Textbox(label="Text Query")
    num = gr.Slider(label="Number of retrieved image", value=1, minimum=1, step=1)

    with gr.Row():
        btn = gr.Button("Retrieve images", scale=1)

    gr.Markdown("## Retrieved Images")

    gallery = gr.Gallery(
        label="Generated images", show_label=True, elem_id="gallery_mclip"
    , columns=[5], rows=[1], object_fit="contain", height="auto")

    
    lables = gr.Label() 

    btn.click(utils.predict_mclip, inputs=[txt, num, dadtaset_select], outputs=[gallery,lables])

    gr.Examples(
        examples=[["تخطي لاعب فريق بيتسبرج بايرتس منطقة اللوحة الرئيسية في مباراة بدوري البيسبول", 5], 
                  ["وقوف قطة بمخالبها على فأرة حاسوب على المكتب", 10],
                  ["صحن به شوربة صينية بالخضار، وإلى جانبه بطاطس مقلية وزجاجة ماء", 7]],
        inputs=[txt, num, dadtaset_select],
        outputs=[gallery,lables],
        fn=utils.predict_mclip,
        cache_examples=False,
    )


# Define custom CSS to increase the size of the tabs
custom_css = """
.gr-tabbed-interface .gr-tab {
    font-size: 50px;  /* Increase the font size */
    padding: 10px;    /* Increase the padding */
}
"""

# Group the demos in a TabbedInterface 
with gr.Blocks() as demo:

    # gr.Image("statics/logo_araclip.png")
    gr.Markdown("""
            <center> <img src="https://raw.githubusercontent.com/Arabic-Clip/AraCLIP-Demo/main/logo_araclip.png" alt="Imgur" style="width:200px"></center>
                """)
    gr.Markdown("<center> <font color=red size=10>AraClip: Arabic Image Retrieval Application</font></center>")

    gr.Markdown("""
            <font size=4>   To run the demo 🤗, please select the model, then the dataset you would like to search in, enter a text query, and specify the number of retrieved images.</font>
                    
                """)



    gr.TabbedInterface([demo_araclip, demo_mclip], ["Our Model", "Mclip model"], css=custom_css)

    gr.Markdown(
        """
            If you find this work helpful, please help us to ⭐ the repositories in <a href='https://github.com/Arabic-Clip' target='_blank'>Github Organization</a>. Thank you! 
            
            ---
            📝 **Citation**
            
            ## BibTeX
            
            ```bibtex
            @inproceedings{al2024araclip,
              title={AraCLIP: Cross-Lingual Learning for Effective Arabic Image Retrieval},
              author={Al-Barham, Muhammad and Afyouni, Imad and Almubarak, Khalid and Elnagar, Ashraf and Turky, Ayad and Hashem, Ibrahim},
              booktitle={Proceedings of The Second Arabic Natural Language Processing Conference},
              pages={102--110},
              year={2024}
            }
            """
            )
if __name__ == "__main__":
    
    demo.launch()