ykl7 commited on
Commit
eb9293b
Β·
1 Parent(s): f8c2a86
Files changed (1) hide show
  1. app.py +76 -75
app.py CHANGED
@@ -10,9 +10,6 @@ from prompts import templates
10
  from typing import Any
11
  from string import Template
12
 
13
- st.header(" Scientific Claim Verification ")
14
- st.caption("Team UMBC-SBU-UT")
15
-
16
  def safe_parse_json(model_answer):
17
  """.."""
18
  try:
@@ -65,11 +62,9 @@ def check_password():
65
  st.error("πŸ˜• User not known or password incorrect")
66
  return False
67
 
68
-
69
  def select_models():
70
  """Returns only when a valid option is selected from both dropdowns."""
71
 
72
- #placeholders
73
  retriever_options = ["Choose one...", "BM25 Retriever", "Off-the-shelf Retriever", "Finetuned Retriever", "No Retriever"]
74
  reasoner_options = ["Choose one...", "Claude Sonnet", "GPT-4o", "o3-mini"]
75
 
@@ -87,7 +82,7 @@ def select_models():
87
  key="reasoner"
88
  )
89
 
90
- #next button
91
 
92
  if st.button("Next"):
93
  # Check that both selections are not the placeholder.
@@ -102,44 +97,6 @@ def select_models():
102
  st.info("Click 'Next' once you have made your selections.")
103
  return None, None
104
 
105
- if not check_password():
106
- st.stop()
107
-
108
-
109
- if "selected_models" not in st.session_state:
110
- selected_retriever, selected_reasoner = select_models()
111
- # If valid selections are returned, store them and reset the change flag.
112
- if selected_retriever is not None and selected_reasoner is not None:
113
- st.session_state.selected_models = (selected_retriever, selected_reasoner)
114
- st.rerun()
115
- else:
116
- st.stop() # Halt further execution until valid selections are made.
117
- else:
118
- selected_retriever, selected_reasoner = st.session_state.selected_models
119
-
120
- #START OF AGENTIC DEMO
121
-
122
- column1, column2 = st.columns(2)
123
- column1.caption(f"Retriever Selected: {selected_retriever}")
124
- column2.caption(f"Reasoner Selected: {selected_reasoner}")
125
-
126
-
127
- if st.button("Change Selection", key="change_selection_btn"):
128
- st.session_state.pop("selected_models", None)
129
- st.session_state.pop("retriever", None)
130
- st.session_state.pop("reasoner", None)
131
- st.session_state.messages = [{"role": "assistant", "content": "Let's start verifying the claims here! πŸ‘‡"}]
132
- st.rerun()
133
-
134
- # Initialize chat history
135
- if "messages" not in st.session_state:
136
- st.session_state.messages = [{"role": "assistant", "content": "Let's start verifying the claims here! πŸ‘‡"}]
137
-
138
- # Display chat messages from history on app rerun
139
- for message in st.session_state.messages:
140
- with st.chat_message(message["role"]):
141
- st.markdown(message["content"])
142
-
143
  def retriever(query: str, selected_retriever: str):
144
  """Simulate a 'retriever' step, searching for relevant information."""
145
  with st.chat_message("assistant"):
@@ -232,15 +189,47 @@ def reasoner(query: str, documents: list[str], llm_client: Any):
232
  # You could return reasoning info here.
233
  return reasoning, decision
234
 
235
- # Accept user input
236
- if prompt := st.chat_input("Type here"):
237
- # Add user message to chat history
238
- display_message = prompt + " \n"+ " \n"+ f"Retriever: {selected_retriever}, Reasoner: {selected_reasoner}"
239
- st.session_state.messages.append({"role": "user", "content": display_message})
240
- # Display user message in chat message container
241
- with st.chat_message("user"):
242
- st.markdown(display_message)
243
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
244
  options = {}
245
  options["max_tokens"] = 500
246
  options["temperature"] = 0.0
@@ -264,26 +253,38 @@ if prompt := st.chat_input("Type here"):
264
 
265
  llm_client = LLMReasoner(options)
266
 
267
- retrieved_documents = retriever(prompt, selected_retriever)
268
- reasoning, decision = reasoner(prompt, retrieved_documents, llm_client)
269
-
270
- # Display assistant response in chat message container
271
- with st.chat_message("assistant"):
272
- message_placeholder = st.empty()
273
- full_response = ""
274
- if decision.lower() == 'support':
275
- assistant_response = f'The claim is CORRECT because {reasoning}'
276
- elif decision.lower() == 'contradict':
277
- assistant_response = f'The claim is INCORRECT because {reasoning}'
278
- else:
279
- assistant_response = f'Sorry, the query failed due to an issue with connecting to the LLM service.'
280
-
281
- # Simulate stream of response with milliseconds delay
282
- for chunk in assistant_response.split():
283
- full_response += chunk + " "
284
- time.sleep(0.05)
285
- # Add a blinking cursor to simulate typing
286
- message_placeholder.markdown(full_response + "β–Œ")
287
- message_placeholder.markdown(full_response)
288
- # Add assistant response to chat history
289
- st.session_state.messages.append({"role": "assistant", "content": full_response})
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  from typing import Any
11
  from string import Template
12
 
 
 
 
13
  def safe_parse_json(model_answer):
14
  """.."""
15
  try:
 
62
  st.error("πŸ˜• User not known or password incorrect")
63
  return False
64
 
 
65
  def select_models():
66
  """Returns only when a valid option is selected from both dropdowns."""
67
 
 
68
  retriever_options = ["Choose one...", "BM25 Retriever", "Off-the-shelf Retriever", "Finetuned Retriever", "No Retriever"]
69
  reasoner_options = ["Choose one...", "Claude Sonnet", "GPT-4o", "o3-mini"]
70
 
 
82
  key="reasoner"
83
  )
84
 
85
+ # next button
86
 
87
  if st.button("Next"):
88
  # Check that both selections are not the placeholder.
 
97
  st.info("Click 'Next' once you have made your selections.")
98
  return None, None
99
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100
  def retriever(query: str, selected_retriever: str):
101
  """Simulate a 'retriever' step, searching for relevant information."""
102
  with st.chat_message("assistant"):
 
189
  # You could return reasoning info here.
190
  return reasoning, decision
191
 
192
+ def main():
193
+ st.header(" Scientific Claim Verification ")
194
+ st.caption("Team UMBC-SBU-UT")
195
+
196
+ if not check_password():
197
+ st.stop()
 
 
198
 
199
+ if "selected_models" not in st.session_state:
200
+ selected_retriever, selected_reasoner = select_models()
201
+ # If valid selections are returned, store them and reset the change flag.
202
+ if selected_retriever is not None and selected_reasoner is not None:
203
+ st.session_state.selected_models = (selected_retriever, selected_reasoner)
204
+ st.rerun()
205
+ else:
206
+ st.stop() # Halt further execution until valid selections are made.
207
+ else:
208
+ selected_retriever, selected_reasoner = st.session_state.selected_models
209
+
210
+ # START OF AGENTIC DEMO
211
+
212
+ column1, column2 = st.columns(2)
213
+ column1.caption(f"Retriever Selected: {selected_retriever}")
214
+ column2.caption(f"Reasoner Selected: {selected_reasoner}")
215
+
216
+ if st.button("Change Selection", key="change_selection_btn"):
217
+ st.session_state.pop("selected_models", None)
218
+ st.session_state.pop("retriever", None)
219
+ st.session_state.pop("reasoner", None)
220
+ st.session_state.messages = [{"role": "assistant", "content": "Let's start verifying the claims here! πŸ‘‡"}]
221
+ st.rerun()
222
+
223
+ # Initialize chat history
224
+ if "messages" not in st.session_state:
225
+ st.session_state.messages = [{"role": "assistant", "content": "Let's start verifying the claims here! πŸ‘‡"}]
226
+
227
+ # Display chat messages from history on app rerun
228
+ for message in st.session_state.messages:
229
+ with st.chat_message(message["role"]):
230
+ st.markdown(message["content"])
231
+
232
+ # Model configurations on agentic demo page
233
  options = {}
234
  options["max_tokens"] = 500
235
  options["temperature"] = 0.0
 
253
 
254
  llm_client = LLMReasoner(options)
255
 
256
+ # Accept user input
257
+ if prompt := st.chat_input("Type here"):
258
+ # Add user message to chat history
259
+ display_message = prompt + " \n"+ " \n"+ f"Retriever: {selected_retriever}, Reasoner: {selected_reasoner}"
260
+ st.session_state.messages.append({"role": "user", "content": display_message})
261
+ # Display user message in chat message container
262
+ with st.chat_message("user"):
263
+ st.markdown(display_message)
264
+
265
+ retrieved_documents = retriever(prompt, selected_retriever)
266
+ reasoning, decision = reasoner(prompt, retrieved_documents, llm_client)
267
+
268
+ # Display assistant response in chat message container
269
+ with st.chat_message("assistant"):
270
+ message_placeholder = st.empty()
271
+ full_response = ""
272
+ if decision.lower() == 'support':
273
+ assistant_response = f'The claim is CORRECT because {reasoning}'
274
+ elif decision.lower() == 'contradict':
275
+ assistant_response = f'The claim is INCORRECT because {reasoning}'
276
+ else:
277
+ assistant_response = f'Sorry, the query failed due to an issue with connecting to the LLM service.'
278
+
279
+ # Simulate stream of response with milliseconds delay
280
+ for chunk in assistant_response.split():
281
+ full_response += chunk + " "
282
+ time.sleep(0.05)
283
+ # Add a blinking cursor to simulate typing
284
+ message_placeholder.markdown(full_response + "β–Œ")
285
+ message_placeholder.markdown(full_response)
286
+ # Add assistant response to chat history
287
+ st.session_state.messages.append({"role": "assistant", "content": full_response})
288
+
289
+ if __name__ == '__main__':
290
+ main()