Spaces:
Sleeping
Sleeping
Suraj Yadav
commited on
Commit
·
6d104af
1
Parent(s):
9e4c053
update Streamlit UI: To display chat conversation for Tools calls for - Chatbot with Tools
Browse files
src/basicchatbot/ui/streamlitui/display_result.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import streamlit as st
|
2 |
from typing import Literal
|
3 |
from langgraph.graph.state import CompiledStateGraph
|
4 |
-
from langchain_core.messages import AIMessage
|
5 |
|
6 |
|
7 |
class DisplayResultStreamlit:
|
@@ -49,9 +49,48 @@ class DisplayResultStreamlit:
|
|
49 |
except Exception as e:
|
50 |
st.error(f"Error processing response: {str(e)}")
|
51 |
|
|
|
|
|
52 |
def _handle_chatbot_with_tool(self) -> None:
|
53 |
"""Handle the tool-enhanced chatbot interaction flow."""
|
54 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
|
56 |
def display_result_on_ui(self) -> None:
|
57 |
usecase_handlers = {
|
|
|
1 |
import streamlit as st
|
2 |
from typing import Literal
|
3 |
from langgraph.graph.state import CompiledStateGraph
|
4 |
+
from langchain_core.messages import AIMessage,HumanMessage,ToolMessage
|
5 |
|
6 |
|
7 |
class DisplayResultStreamlit:
|
|
|
49 |
except Exception as e:
|
50 |
st.error(f"Error processing response: {str(e)}")
|
51 |
|
52 |
+
from langchain_core.messages import AIMessage, HumanMessage, ToolMessage
|
53 |
+
|
54 |
def _handle_chatbot_with_tool(self) -> None:
|
55 |
"""Handle the tool-enhanced chatbot interaction flow."""
|
56 |
+
|
57 |
+
# Display chat history before processing new input
|
58 |
+
self._display_chat_history()
|
59 |
+
|
60 |
+
# Append user message to session state and display it
|
61 |
+
# st.session_state.message_history.append({"role": "user", "message": self.user_message})
|
62 |
+
# with st.chat_message("user"):
|
63 |
+
# st.markdown(self.user_message)
|
64 |
+
# self._display_message("user", self.user_message)
|
65 |
+
|
66 |
+
try:
|
67 |
+
initial_state = {"messages": self.user_message}
|
68 |
+
|
69 |
+
result = self.graph.invoke(input=initial_state)
|
70 |
+
|
71 |
+
for message in result["messages"]:
|
72 |
+
if isinstance(message, HumanMessage):
|
73 |
+
st.session_state.message_history.append({"role": "user", "message": message.content})
|
74 |
+
self._display_message("user", message.content)
|
75 |
+
|
76 |
+
elif isinstance(message, AIMessage) and message.content:
|
77 |
+
st.session_state.message_history.append({"role": "assistant", "message": message.content})
|
78 |
+
self._display_message("assistant", message.content)
|
79 |
+
|
80 |
+
elif isinstance(message, ToolMessage):
|
81 |
+
# Display tool-specific response
|
82 |
+
tool_message = f"**Tool Response:**\n{message.content}"
|
83 |
+
|
84 |
+
st.session_state.message_history.append({"role": "assistant", "message": tool_message})
|
85 |
+
with st.chat_message("assistant"):
|
86 |
+
st.markdown("🔧 **Tool Call Start**")
|
87 |
+
st.markdown(message.content)
|
88 |
+
st.markdown("🔧 **Tool Call End**")
|
89 |
+
|
90 |
+
except Exception as e:
|
91 |
+
st.error(f"Error processing response: {str(e)}")
|
92 |
+
|
93 |
+
|
94 |
|
95 |
def display_result_on_ui(self) -> None:
|
96 |
usecase_handlers = {
|