File size: 2,411 Bytes
44c11f2
 
fc3079d
ba57ea8
53ddc87
b3c9da2
1efe758
919efff
cd12ada
b3c9da2
919efff
b3c9da2
919efff
b3c9da2
919efff
 
b3c9da2
919efff
b3c9da2
919efff
 
29e00f0
44c11f2
b3c9da2
44c11f2
8527e35
919efff
8527e35
919efff
8527e35
fc3079d
919efff
 
b3c9da2
8527e35
919efff
8527e35
919efff
8527e35
1e0fe04
919efff
b3c9da2
44c11f2
919efff
b3c9da2
287b7cd
919efff
b3c9da2
 
53ddc87
b3c9da2
53ddc87
b3c9da2
53ddc87
919efff
b3c9da2
44c11f2
919efff
b3c9da2
53ddc87
b3c9da2
53ddc87
919efff
b3c9da2
919efff
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
import streamlit as st
from apps.utils import read_markdown
from streamlit_tensorboard import st_tensorboard
from .utils import Toc
def app(state=None):
    toc = Toc()
    st.info("Welcome to our Multilingual-VQA demo. Please use the navigation sidebar to move to our demo, or scroll below to read all about our project. 🤗")
    
    st.header("Table of contents")
    toc.placeholder()
    
    toc.header("Introduction and Motivation")
    st.write(read_markdown("intro/intro.md"))
    toc.subheader("Novel Contributions")
    st.write(read_markdown("intro/contributions.md"))
    
    toc.header("Methodology")

    toc.subheader("Pre-training")
    st.write(read_markdown("pretraining/intro.md"))
    # col1, col2 = st.beta_columns([5,5])
    st.image(
        "./misc/article/Multilingual-VQA.png",
        caption="Masked LM model for Image-text Pre-training.",
    )
    toc.subsubheader("MLM Dataset")
    st.write(read_markdown("pretraining/data.md"))
    toc.subsubheader("MLM Model")
    st.write(read_markdown("pretraining/model.md"))
    toc.subsubheader("MLM Training Logs")
    st_tensorboard(logdir='./logs/pretrain_logs', port=6006)
    
    
    toc.subheader("Finetuning")
    toc.subsubheader("VQA Dataset")
    st.write(read_markdown("finetuning/data.md"))
    toc.subsubheader("VQA Model")
    st.write(read_markdown("finetuning/model.md"))
    toc.subsubheader("VQA Training Logs")
    st_tensorboard(logdir='./logs/finetune_logs', port=6007)
    
    toc.header("Challenges and Technical Difficulties")
    st.write(read_markdown("challenges.md"))
    
    toc.header("Limitations")
    st.write(read_markdown("limitations.md"))
    
    toc.header("Conclusion, Future Work, and Social Impact")
    toc.subheader("Conclusion")
    st.write(read_markdown("conclusion_future_work/conclusion.md"))
    toc.subheader("Future Work")
    st.write(read_markdown("conclusion_future_work/future_work.md"))
    toc.subheader("Social Impact")
    st.write(read_markdown("conclusion_future_work/social_impact.md"))
    
    toc.header("References")
    st.write(read_markdown("references.md"))

    toc.header("Checkpoints")
    st.write(read_markdown("checkpoints/checkpoints.md"))
    toc.subheader("Other Checkpoints")
    st.write(read_markdown("checkpoints/other_checkpoints.md"))
    
    toc.header("Acknowledgements")
    st.write(read_markdown("acknowledgements.md"))
    toc.generate()