Update app.py
Browse files
app.py
CHANGED
@@ -30,7 +30,10 @@ examples_questions = [["What is Transformer?"],
|
|
30 |
["What are Encoder and Decoder?"],
|
31 |
["Describe more about the Transformer."],
|
32 |
["Why use self-attention?"],
|
33 |
-
|
|
|
|
|
|
|
34 |
|
35 |
template = \
|
36 |
"""Use the following pieces of context to answer the question at the end.
|
@@ -114,16 +117,7 @@ def generate_response(query, history, model, temperature, max_tokens, top_p, see
|
|
114 |
return response
|
115 |
|
116 |
|
117 |
-
# gr.Markdown("""
|
118 |
-
# ### 1. Attention Is All You Need (Vaswani et al., 2017)
|
119 |
-
# This groundbreaking paper introduced the **Transformer** architecture. It revolutionized natural language processing by enabling parallelization and significantly improving performance on tasks like translation, leading to models like *BERT* and *GPT*.
|
120 |
|
121 |
-
# ### 2. Generative Adversarial Nets (Goodfellow et al., 2014)
|
122 |
-
# This paper proposed **GANs**, a novel framework for generative modeling using two neural networks—a generator and a discriminator—that compete in a zero-sum game.
|
123 |
-
|
124 |
-
# ### 3. Parameter-Efficient Transfer Learning for NLP (Houlsby et al., 2019)
|
125 |
-
# This paper introduces **adapter modules**, a method for fine-tuning large pre-trained language models with significantly fewer parameters.
|
126 |
-
# """)
|
127 |
|
128 |
|
129 |
additional_inputs = [
|
@@ -141,8 +135,17 @@ with gr.Blocks(theme="Nymbo/Alyx_Theme") as demo:
|
|
141 |
# pdf_input = gr.Textbox(label="PDF File")
|
142 |
# index_button = gr.Button("Index PDF")
|
143 |
# load_sample = gr.Button("Alternatively, Load and Index [Attention Is All You Need.pdf] as a Sample")
|
144 |
-
load_sample = gr.Button("Load and Index
|
145 |
-
sample_description = gr.Markdown("
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
146 |
index_output = gr.Textbox(label="Indexing Status")
|
147 |
# index_button.click(index_pdf, inputs=pdf_input, outputs=index_output)
|
148 |
load_sample.click(load_sample_pdf, inputs=None, outputs=index_output)
|
|
|
30 |
["What are Encoder and Decoder?"],
|
31 |
["Describe more about the Transformer."],
|
32 |
["Why use self-attention?"],
|
33 |
+
["Describe Parameter-Efficient fine-tuning?"],
|
34 |
+
["Describe Generative Adversarial Networks?"],
|
35 |
+
["How does GAN work?"]
|
36 |
+
]
|
37 |
|
38 |
template = \
|
39 |
"""Use the following pieces of context to answer the question at the end.
|
|
|
117 |
return response
|
118 |
|
119 |
|
|
|
|
|
|
|
120 |
|
|
|
|
|
|
|
|
|
|
|
|
|
121 |
|
122 |
|
123 |
additional_inputs = [
|
|
|
135 |
# pdf_input = gr.Textbox(label="PDF File")
|
136 |
# index_button = gr.Button("Index PDF")
|
137 |
# load_sample = gr.Button("Alternatively, Load and Index [Attention Is All You Need.pdf] as a Sample")
|
138 |
+
load_sample = gr.Button("Load and Index the following three papers as a RAG Demo")
|
139 |
+
sample_description = gr.Markdown("""
|
140 |
+
## 1. Attention Is All You Need (Vaswani et al., 2017)
|
141 |
+
This groundbreaking paper introduced the **Transformer** architecture. It revolutionized natural language processing by enabling parallelization and significantly improving performance on tasks like translation, leading to models like *BERT* and *GPT*.
|
142 |
+
## 2. Generative Adversarial Nets (Goodfellow et al., 2014)
|
143 |
+
This paper proposed **GANs**, a novel framework for generative modeling using two neural networks—a generator and a discriminator—that compete in a zero-sum game.
|
144 |
+
## 3. Parameter-Efficient Transfer Learning for NLP (Houlsby et al., 2019)
|
145 |
+
This paper introduces **adapter modules**, a method for fine-tuning large pre-trained language models with significantly fewer parameters.
|
146 |
+
|
147 |
+
It could take several minutes to load and index the files.
|
148 |
+
""")
|
149 |
index_output = gr.Textbox(label="Indexing Status")
|
150 |
# index_button.click(index_pdf, inputs=pdf_input, outputs=index_output)
|
151 |
load_sample.click(load_sample_pdf, inputs=None, outputs=index_output)
|