Update app.py
Browse files
app.py
CHANGED
@@ -51,9 +51,6 @@ def process_query(query):
|
|
51 |
tf_idf_ranking_modified, bm25_ranking_modified, open_source_ranking_modified
|
52 |
)
|
53 |
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
agent1_context = wiki_data[0]
|
58 |
agent2_context = article
|
59 |
|
@@ -73,10 +70,7 @@ def process_query(query):
|
|
73 |
tf_idf_bm25_open_RRF_Ranking_modified_context = miniWikiCollectionDict[tf_idf_bm25_open_RRF_Ranking_modified[0]]
|
74 |
tf_idf_bm25_open_RRF_Ranking_combined_context = miniWikiCollectionDict[tf_idf_bm25_open_RRF_Ranking_combined[0]]
|
75 |
|
76 |
-
|
77 |
-
|
78 |
-
#Generating answers
|
79 |
-
|
80 |
agent1_answer = generate_answer_withContext(query, agent1_context)
|
81 |
agent2_answer = generate_answer_withContext(query, agent2_context)
|
82 |
|
@@ -98,7 +92,6 @@ def process_query(query):
|
|
98 |
|
99 |
zeroShot = generate_answer_zeroShot(query)
|
100 |
|
101 |
-
|
102 |
# Ranking the best answer
|
103 |
rankerAgentInput = {
|
104 |
"query": query,
|
@@ -122,8 +115,6 @@ def process_query(query):
|
|
122 |
|
123 |
best_model, best_answer = rankerAgent(rankerAgentInput)
|
124 |
|
125 |
-
|
126 |
-
|
127 |
return (
|
128 |
best_model,
|
129 |
best_answer,
|
@@ -145,57 +136,47 @@ def process_query(query):
|
|
145 |
zeroShot, "Zero-shot doesn't have a context."
|
146 |
)
|
147 |
|
148 |
-
#
|
149 |
def create_interface():
|
150 |
with gr.Blocks() as interface:
|
151 |
-
gr.Markdown("# Query Answering System")
|
152 |
-
gr.Markdown("Enter a query to get the best model and the best answer using multiple retrieval models and ranking techniques.")
|
153 |
query_input = gr.Textbox(label="Enter your query")
|
154 |
-
|
155 |
-
|
156 |
-
best_model_output = gr.Textbox(label="Best Model")
|
157 |
-
best_answer_output = gr.Textbox(label="Best Answer")
|
158 |
-
|
159 |
-
gr.Markdown("---") # Horizontal line
|
160 |
-
gr.Markdown("## All Answers and Contexts")
|
161 |
|
162 |
-
def create_answer_row(label
|
163 |
with gr.Row():
|
164 |
-
answer_textbox = gr.Textbox(label=f"{label} Answer", interactive=False)
|
165 |
context_button = gr.Button(f"Show {label} Context")
|
166 |
-
context_textbox = gr.Textbox(label=f"{label} Context", visible=False)
|
167 |
|
|
|
168 |
context_button.click(
|
169 |
-
fn=lambda: gr.update(visible=True, value=
|
170 |
-
inputs=
|
171 |
-
outputs=context_textbox
|
172 |
)
|
173 |
return answer_textbox, context_textbox
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
tf_idf_rrf_combined_output, tf_idf_rrf_combined_context_box = create_answer_row("TF-IDF + BM25 + Open RRF (Combined)", tf_idf_bm25_open_RRF_Ranking_combined_context)
|
196 |
-
|
197 |
-
with gr.Row():
|
198 |
-
zero_shot_output, zero_shot_context_box = create_answer_row("Zero Shot", "Zero-shot doesn't have a context.")
|
199 |
|
200 |
gr.Button("Submit").click(
|
201 |
fn=process_query,
|
@@ -203,26 +184,28 @@ def create_interface():
|
|
203 |
outputs=[
|
204 |
best_model_output,
|
205 |
best_answer_output,
|
206 |
-
agent1_output,
|
207 |
-
agent2_output,
|
208 |
-
boolean_output,
|
209 |
-
tf_idf_output,
|
210 |
-
bm25_output,
|
211 |
-
vision_output,
|
212 |
-
open_source_output,
|
213 |
-
boolean_mod_output,
|
214 |
-
tf_idf_mod_output,
|
215 |
-
bm25_mod_output,
|
216 |
-
vision_mod_output,
|
217 |
-
open_source_mod_output,
|
218 |
-
tf_idf_rrf_output,
|
219 |
-
tf_idf_rrf_mod_output,
|
220 |
-
tf_idf_rrf_combined_output,
|
221 |
-
zero_shot_output,
|
222 |
]
|
223 |
)
|
224 |
-
|
225 |
return interface
|
226 |
|
|
|
227 |
if __name__ == "__main__":
|
228 |
-
create_interface()
|
|
|
|
51 |
tf_idf_ranking_modified, bm25_ranking_modified, open_source_ranking_modified
|
52 |
)
|
53 |
|
|
|
|
|
|
|
54 |
agent1_context = wiki_data[0]
|
55 |
agent2_context = article
|
56 |
|
|
|
70 |
tf_idf_bm25_open_RRF_Ranking_modified_context = miniWikiCollectionDict[tf_idf_bm25_open_RRF_Ranking_modified[0]]
|
71 |
tf_idf_bm25_open_RRF_Ranking_combined_context = miniWikiCollectionDict[tf_idf_bm25_open_RRF_Ranking_combined[0]]
|
72 |
|
73 |
+
# Generating answers
|
|
|
|
|
|
|
74 |
agent1_answer = generate_answer_withContext(query, agent1_context)
|
75 |
agent2_answer = generate_answer_withContext(query, agent2_context)
|
76 |
|
|
|
92 |
|
93 |
zeroShot = generate_answer_zeroShot(query)
|
94 |
|
|
|
95 |
# Ranking the best answer
|
96 |
rankerAgentInput = {
|
97 |
"query": query,
|
|
|
115 |
|
116 |
best_model, best_answer = rankerAgent(rankerAgentInput)
|
117 |
|
|
|
|
|
118 |
return (
|
119 |
best_model,
|
120 |
best_answer,
|
|
|
136 |
zeroShot, "Zero-shot doesn't have a context."
|
137 |
)
|
138 |
|
139 |
+
# Interface creation
|
140 |
def create_interface():
|
141 |
with gr.Blocks() as interface:
|
|
|
|
|
142 |
query_input = gr.Textbox(label="Enter your query")
|
143 |
+
best_model_output = gr.Textbox(label="Best Model", interactive=False)
|
144 |
+
best_answer_output = gr.Textbox(label="Best Answer", interactive=False)
|
|
|
|
|
|
|
|
|
|
|
145 |
|
146 |
+
def create_answer_row(label):
|
147 |
with gr.Row():
|
148 |
+
answer_textbox = gr.Textbox(label=f"{label} Answer", interactive=False)
|
149 |
context_button = gr.Button(f"Show {label} Context")
|
150 |
+
context_textbox = gr.Textbox(label=f"{label} Context", visible=False)
|
151 |
|
152 |
+
# Event to show the context
|
153 |
context_button.click(
|
154 |
+
fn=lambda x: gr.update(visible=True, value=x),
|
155 |
+
inputs=None,
|
156 |
+
outputs=context_textbox
|
157 |
)
|
158 |
return answer_textbox, context_textbox
|
159 |
+
|
160 |
+
agent1_output, agent1_context_output = create_answer_row("Agent 1")
|
161 |
+
|
162 |
+
agent2_output, agent2_context_output = create_answer_row("Agent 2")
|
163 |
+
boolean_output, boolean_context_output = create_answer_row("Boolean")
|
164 |
+
tf_idf_output, tf_idf_context_output = create_answer_row("TF-IDF")
|
165 |
+
bm25_output, bm25_context_output = create_answer_row("BM25")
|
166 |
+
vision_output, vision_context_output = create_answer_row("Vision")
|
167 |
+
open_source_output, open_source_context_output = create_answer_row("Open Source")
|
168 |
+
|
169 |
+
boolean_mod_output, boolean_mod_context_output = create_answer_row("Boolean (Modified)")
|
170 |
+
tf_idf_mod_output, tf_idf_mod_context_output = create_answer_row("TF-IDF (Modified)")
|
171 |
+
bm25_mod_output, bm25_mod_context_output = create_answer_row("BM25 (Modified)")
|
172 |
+
vision_mod_output, vision_mod_context_output = create_answer_row("Vision (Modified)")
|
173 |
+
open_source_mod_output, open_source_context_output = create_answer_row("Open Source (Modified)")
|
174 |
+
|
175 |
+
tf_idf_rrf_output, tf_idf_rrf_context_output = create_answer_row("TF-IDF + BM25 + Open RRF")
|
176 |
+
tf_idf_rrf_mod_output, tf_idf_rrf_mod_context_output = create_answer_row("TF-IDF + BM25 + Open RRF (Modified)")
|
177 |
+
tf_idf_rrf_combined_output, tf_idf_rrf_combined_context_output = create_answer_row("TF-IDF + BM25 + Open RRF (Combined)")
|
178 |
+
|
179 |
+
zero_shot_output, zero_shot_context_output = create_answer_row("Zero Shot")
|
|
|
|
|
|
|
|
|
180 |
|
181 |
gr.Button("Submit").click(
|
182 |
fn=process_query,
|
|
|
184 |
outputs=[
|
185 |
best_model_output,
|
186 |
best_answer_output,
|
187 |
+
agent1_output, agent1_context_output,
|
188 |
+
agent2_output, agent2_context_output,
|
189 |
+
boolean_output, boolean_context_output,
|
190 |
+
tf_idf_output, tf_idf_context_output,
|
191 |
+
bm25_output, bm25_context_output,
|
192 |
+
vision_output, vision_context_output,
|
193 |
+
open_source_output, open_source_context_output,
|
194 |
+
boolean_mod_output, boolean_mod_context_output,
|
195 |
+
tf_idf_mod_output, tf_idf_mod_context_output,
|
196 |
+
bm25_mod_output, bm25_mod_context_output,
|
197 |
+
vision_mod_output, vision_mod_context_output,
|
198 |
+
open_source_mod_output, open_source_context_output,
|
199 |
+
tf_idf_rrf_output, tf_idf_rrf_context_output,
|
200 |
+
tf_idf_rrf_mod_output, tf_idf_rrf_mod_context_output,
|
201 |
+
tf_idf_rrf_combined_output, tf_idf_rrf_combined_context_output,
|
202 |
+
zero_shot_output, zero_shot_context_output
|
203 |
]
|
204 |
)
|
205 |
+
|
206 |
return interface
|
207 |
|
208 |
+
# Launch the interface
|
209 |
if __name__ == "__main__":
|
210 |
+
interface = create_interface()
|
211 |
+
interface.launch()
|