Spaces:
Sleeping
Sleeping
clean up responses
Browse files
app.py
CHANGED
@@ -174,12 +174,13 @@ def reasoner(query: str, documents: list[str], llm_client: Any):
|
|
174 |
|
175 |
llm_response = llm_client.run_inference(prompt)
|
176 |
|
177 |
-
message = message + '\n' + llm_response
|
178 |
|
179 |
answer_dict = safe_parse_json(llm_response)
|
180 |
decision = answer_dict.get("decision", "")
|
|
|
181 |
|
182 |
-
message = message + '\n' + decision
|
183 |
|
184 |
for chunk in message.split():
|
185 |
text += chunk + " "
|
@@ -188,7 +189,7 @@ def reasoner(query: str, documents: list[str], llm_client: Any):
|
|
188 |
placeholder.markdown(text + "▌")
|
189 |
placeholder.markdown(text)
|
190 |
# You could return reasoning info here.
|
191 |
-
return
|
192 |
|
193 |
# Accept user input
|
194 |
if prompt := st.chat_input("Type here"):
|
@@ -215,23 +216,19 @@ if prompt := st.chat_input("Type here"):
|
|
215 |
options["model_family"] = "OpenAI"
|
216 |
options["model_name"] = "gpt-4o-2024-05-13"
|
217 |
|
218 |
-
|
219 |
llm_client = LLMReasoner(options)
|
220 |
|
221 |
-
|
222 |
retrieved_documents = retriever(prompt, selected_retriever)
|
223 |
-
reasoning = reasoner(prompt, retrieved_documents, llm_client)
|
224 |
|
225 |
# Display assistant response in chat message container
|
226 |
with st.chat_message("assistant"):
|
227 |
message_placeholder = st.empty()
|
228 |
full_response = ""
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
]
|
234 |
-
)
|
235 |
|
236 |
# Simulate stream of response with milliseconds delay
|
237 |
for chunk in assistant_response.split():
|
@@ -242,4 +239,3 @@ if prompt := st.chat_input("Type here"):
|
|
242 |
message_placeholder.markdown(full_response)
|
243 |
# Add assistant response to chat history
|
244 |
st.session_state.messages.append({"role": "assistant", "content": full_response})
|
245 |
-
|
|
|
174 |
|
175 |
llm_response = llm_client.run_inference(prompt)
|
176 |
|
177 |
+
# message = message + '\n' + llm_response
|
178 |
|
179 |
answer_dict = safe_parse_json(llm_response)
|
180 |
decision = answer_dict.get("decision", "")
|
181 |
+
reasoning = answer_dict.get("reasoning", "")
|
182 |
|
183 |
+
# message = message + '\n' + decision
|
184 |
|
185 |
for chunk in message.split():
|
186 |
text += chunk + " "
|
|
|
189 |
placeholder.markdown(text + "▌")
|
190 |
placeholder.markdown(text)
|
191 |
# You could return reasoning info here.
|
192 |
+
return reasoning, decision
|
193 |
|
194 |
# Accept user input
|
195 |
if prompt := st.chat_input("Type here"):
|
|
|
216 |
options["model_family"] = "OpenAI"
|
217 |
options["model_name"] = "gpt-4o-2024-05-13"
|
218 |
|
|
|
219 |
llm_client = LLMReasoner(options)
|
220 |
|
|
|
221 |
retrieved_documents = retriever(prompt, selected_retriever)
|
222 |
+
reasoning, decision = reasoner(prompt, retrieved_documents, llm_client)
|
223 |
|
224 |
# Display assistant response in chat message container
|
225 |
with st.chat_message("assistant"):
|
226 |
message_placeholder = st.empty()
|
227 |
full_response = ""
|
228 |
+
if decision.lower() == 'support':
|
229 |
+
assistant_response = f'The claim is correct because {reasoning}'
|
230 |
+
elif decision.lower() == 'contradict':
|
231 |
+
assistant_response = f'The claim is incorrect because {reasoning}'
|
|
|
|
|
232 |
|
233 |
# Simulate stream of response with milliseconds delay
|
234 |
for chunk in assistant_response.split():
|
|
|
239 |
message_placeholder.markdown(full_response)
|
240 |
# Add assistant response to chat history
|
241 |
st.session_state.messages.append({"role": "assistant", "content": full_response})
|
|