Update src/streamlit_app.py
Browse files- src/streamlit_app.py +10 -10
src/streamlit_app.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import streamlit as st
|
2 |
from styles import apply_styling
|
3 |
from utils import remove_reasoning_and_sources, clean_explanation
|
4 |
-
from session_state import initialize_session_state, add_message_to_history
|
5 |
from chat_display import display_chat_history, show_typing_indicator, display_legal_disclaimer
|
6 |
from model import (
|
7 |
orchestrator_chat,
|
@@ -72,8 +72,6 @@ show_typing_indicator()
|
|
72 |
if prompt := st.chat_input("Describe your symptoms or ask a medical question..."):
|
73 |
# Set conversation lock to prioritize conversation over report generation
|
74 |
st.session_state.conversation_lock = True
|
75 |
-
# Explicitly hide report form when a new chat is initiated
|
76 |
-
st.session_state.show_report_form = False
|
77 |
|
78 |
# Add user message to history using the database-backed function
|
79 |
add_message_to_history({"role": "user", "content": prompt})
|
@@ -87,14 +85,16 @@ if prompt := st.chat_input("Describe your symptoms or ask a medical question..."
|
|
87 |
# Check if we need to process a response (this block runs after the rerun if processing is True)
|
88 |
if st.session_state.processing:
|
89 |
try:
|
90 |
-
#
|
91 |
-
|
92 |
|
93 |
-
if
|
94 |
-
# The
|
95 |
-
|
96 |
-
|
97 |
-
|
|
|
|
|
98 |
current_query = current_user_prompt_message["content"]
|
99 |
|
100 |
reply, explanation, follow_up_questions, evidence = orchestrator_chat(
|
|
|
1 |
import streamlit as st
|
2 |
from styles import apply_styling
|
3 |
from utils import remove_reasoning_and_sources, clean_explanation
|
4 |
+
from session_state import initialize_session_state, add_message_to_history, get_full_history
|
5 |
from chat_display import display_chat_history, show_typing_indicator, display_legal_disclaimer
|
6 |
from model import (
|
7 |
orchestrator_chat,
|
|
|
72 |
if prompt := st.chat_input("Describe your symptoms or ask a medical question..."):
|
73 |
# Set conversation lock to prioritize conversation over report generation
|
74 |
st.session_state.conversation_lock = True
|
|
|
|
|
75 |
|
76 |
# Add user message to history using the database-backed function
|
77 |
add_message_to_history({"role": "user", "content": prompt})
|
|
|
85 |
# Check if we need to process a response (this block runs after the rerun if processing is True)
|
86 |
if st.session_state.processing:
|
87 |
try:
|
88 |
+
# Get the full history from database
|
89 |
+
full_history = get_full_history()
|
90 |
|
91 |
+
if full_history: # Ensure history is not empty
|
92 |
+
current_user_prompt_message = full_history[-1] # The last message is the current user's prompt
|
93 |
+
|
94 |
+
# The history for the orchestrator is all messages EXCEPT the last one
|
95 |
+
history_for_orchestrator = full_history[:-1]
|
96 |
+
|
97 |
+
# The query for the orchestrator is the content of the last user message
|
98 |
current_query = current_user_prompt_message["content"]
|
99 |
|
100 |
reply, explanation, follow_up_questions, evidence = orchestrator_chat(
|