mgbam commited on
Commit
90dcb0c
Β·
verified Β·
1 Parent(s): 5e58a2d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +137 -32
app.py CHANGED
@@ -1,9 +1,6 @@
1
- # Install necessary libraries (if not already installed)
2
- # pip install langchain streamlit requests langgraph typing-extensions
3
-
4
- from langchain.embeddings.openai import OpenAIEmbeddings
5
- from langchain.vectorstores import Chroma
6
- from langchain.schema import HumanMessage, AIMessage, ToolMessage
7
  from langchain.text_splitter import RecursiveCharacterTextSplitter
8
  from langgraph.graph import END, StateGraph, START
9
  from langgraph.prebuilt import ToolNode
@@ -16,7 +13,9 @@ import streamlit as st
16
  import requests
17
  from langchain.tools.retriever import create_retriever_tool
18
 
 
19
  # Create Dummy Data
 
20
  research_texts = [
21
  "Research Report: Results of a New AI Model Improving Image Recognition Accuracy to 98%",
22
  "Academic Paper Summary: Why Transformers Became the Mainstream Architecture in Natural Language Processing",
@@ -29,6 +28,9 @@ development_texts = [
29
  "Product Y: In the Performance Optimization Stage Before Release"
30
  ]
31
 
 
 
 
32
  # Text splitting settings
33
  splitter = RecursiveCharacterTextSplitter(chunk_size=100, chunk_overlap=10)
34
 
@@ -36,53 +38,64 @@ splitter = RecursiveCharacterTextSplitter(chunk_size=100, chunk_overlap=10)
36
  research_docs = splitter.create_documents(research_texts)
37
  development_docs = splitter.create_documents(development_texts)
38
 
39
- # Create vector stores using OpenAI embeddings
40
  embeddings = OpenAIEmbeddings(
41
- model="text-embedding-3-large"
 
 
42
  )
 
 
43
  research_vectorstore = Chroma.from_documents(
44
  documents=research_docs,
45
  embedding=embeddings,
46
  collection_name="research_collection"
47
  )
 
48
  development_vectorstore = Chroma.from_documents(
49
  documents=development_docs,
50
  embedding=embeddings,
51
  collection_name="development_collection"
52
  )
53
 
 
54
  research_retriever = research_vectorstore.as_retriever()
55
  development_retriever = development_vectorstore.as_retriever()
56
 
57
- # Create retriever tools
 
 
58
  research_tool = create_retriever_tool(
59
- research_retriever, # Retriever object
60
- "research_db_tool", # Tool name
61
- "Search information from the research database." # Description
62
  )
 
63
  development_tool = create_retriever_tool(
64
  development_retriever,
65
  "development_db_tool",
66
  "Search information from the development database."
67
  )
68
 
69
- # Combine the created tools
70
  tools = [research_tool, development_tool]
71
 
72
- # Define the agent state type
 
 
73
  class AgentState(TypedDict):
74
  messages: Annotated[Sequence[AIMessage | HumanMessage | ToolMessage], add_messages]
75
 
76
- # Define the agent function for processing user questions
77
  def agent(state: AgentState):
78
  print("---CALL AGENT---")
79
  messages = state["messages"]
 
80
  if isinstance(messages[0], tuple):
81
  user_message = messages[0][1]
82
  else:
83
  user_message = messages[0].content
84
 
85
- # Structured prompt for the agent
86
  prompt = f"""Given this user question: "{user_message}"
87
  If it's about research or academic topics, respond EXACTLY in this format:
88
  SEARCH_RESEARCH: <search terms>
@@ -92,44 +105,58 @@ SEARCH_DEV: <search terms>
92
 
93
  Otherwise, just answer directly.
94
  """
 
95
  headers = {
96
  "Accept": "application/json",
97
  "Authorization": f"Bearer sk-1cddf19f9dc4466fa3ecea6fe10abec0",
98
  "Content-Type": "application/json"
99
  }
 
100
  data = {
101
  "model": "deepseek-chat",
102
  "messages": [{"role": "user", "content": prompt}],
103
  "temperature": 0.7,
104
  "max_tokens": 1024
105
  }
 
106
  response = requests.post(
107
  "https://api.deepseek.com/v1/chat/completions",
108
  headers=headers,
109
  json=data,
110
  verify=False
111
  )
 
112
  if response.status_code == 200:
113
  response_text = response.json()['choices'][0]['message']['content']
114
  print("Raw response:", response_text)
 
 
115
  if "SEARCH_RESEARCH:" in response_text:
116
  query = response_text.split("SEARCH_RESEARCH:")[1].strip()
 
117
  results = research_retriever.invoke(query)
118
  return {"messages": [AIMessage(content=f'Action: research_db_tool\n{{"query": "{query}"}}\n\nResults: {str(results)}')]}
 
119
  elif "SEARCH_DEV:" in response_text:
120
  query = response_text.split("SEARCH_DEV:")[1].strip()
 
121
  results = development_retriever.invoke(query)
122
  return {"messages": [AIMessage(content=f'Action: development_db_tool\n{{"query": "{query}"}}\n\nResults: {str(results)}')]}
 
123
  else:
124
  return {"messages": [AIMessage(content=response_text)]}
125
  else:
126
  raise Exception(f"API call failed: {response.text}")
127
 
128
- # Grading function to decide next step
 
 
129
  def simple_grade_documents(state: AgentState):
130
  messages = state["messages"]
131
  last_message = messages[-1]
132
  print("Evaluating message:", last_message.content)
 
 
133
  if "Results: [Document" in last_message.content:
134
  print("---DOCS FOUND, GO TO GENERATE---")
135
  return "generate"
@@ -137,33 +164,44 @@ def simple_grade_documents(state: AgentState):
137
  print("---NO DOCS FOUND, TRY REWRITE---")
138
  return "rewrite"
139
 
140
- # Generation function to synthesize a final answer
 
 
141
  def generate(state: AgentState):
142
  print("---GENERATE FINAL ANSWER---")
143
  messages = state["messages"]
144
- question = messages[0].content
145
  last_message = messages[-1]
 
 
146
  docs = ""
147
  if "Results: [" in last_message.content:
148
  results_start = last_message.content.find("Results: [")
149
  docs = last_message.content[results_start:]
150
  print("Documents found:", docs)
 
151
  headers = {
152
  "Accept": "application/json",
153
  "Authorization": f"Bearer sk-1cddf19f9dc4466fa3ecea6fe10abec0",
154
  "Content-Type": "application/json"
155
  }
 
156
  prompt = f"""Based on these research documents, summarize the latest advancements in AI:
157
  Question: {question}
158
  Documents: {docs}
159
  Focus on extracting and synthesizing the key findings from the research papers.
160
  """
 
161
  data = {
162
  "model": "deepseek-chat",
163
- "messages": [{"role": "user", "content": prompt}],
 
 
 
164
  "temperature": 0.7,
165
  "max_tokens": 1024
166
  }
 
167
  print("Sending generate request to API...")
168
  response = requests.post(
169
  "https://api.deepseek.com/v1/chat/completions",
@@ -171,6 +209,7 @@ Focus on extracting and synthesizing the key findings from the research papers.
171
  json=data,
172
  verify=False
173
  )
 
174
  if response.status_code == 200:
175
  response_text = response.json()['choices'][0]['message']['content']
176
  print("Final Answer:", response_text)
@@ -178,22 +217,30 @@ Focus on extracting and synthesizing the key findings from the research papers.
178
  else:
179
  raise Exception(f"API call failed: {response.text}")
180
 
181
- # Rewrite function to refine unclear questions
 
 
182
  def rewrite(state: AgentState):
183
  print("---REWRITE QUESTION---")
184
  messages = state["messages"]
185
  original_question = messages[0].content if len(messages) > 0 else "N/A"
 
186
  headers = {
187
  "Accept": "application/json",
188
  "Authorization": f"Bearer sk-1cddf19f9dc4466fa3ecea6fe10abec0",
189
  "Content-Type": "application/json"
190
  }
 
191
  data = {
192
  "model": "deepseek-chat",
193
- "messages": [{"role": "user", "content": f"Rewrite this question to be more specific and clearer: {original_question}"}],
 
 
 
194
  "temperature": 0.7,
195
  "max_tokens": 1024
196
  }
 
197
  print("Sending rewrite request...")
198
  response = requests.post(
199
  "https://api.deepseek.com/v1/chat/completions",
@@ -201,8 +248,10 @@ def rewrite(state: AgentState):
201
  json=data,
202
  verify=False
203
  )
 
204
  print("Status Code:", response.status_code)
205
  print("Response:", response.text)
 
206
  if response.status_code == 200:
207
  response_text = response.json()['choices'][0]['message']['content']
208
  print("Rewritten question:", response_text)
@@ -210,12 +259,16 @@ def rewrite(state: AgentState):
210
  else:
211
  raise Exception(f"API call failed: {response.text}")
212
 
213
- # Custom condition to check if a tool action is called
 
 
214
  tools_pattern = re.compile(r"Action: .*")
 
215
  def custom_tools_condition(state: AgentState):
216
  messages = state["messages"]
217
  last_message = messages[-1]
218
  content = last_message.content
 
219
  print("Checking tools condition:", content)
220
  if tools_pattern.match(content):
221
  print("Moving to retrieve...")
@@ -223,62 +276,113 @@ def custom_tools_condition(state: AgentState):
223
  print("Moving to END...")
224
  return END
225
 
226
- # Build the workflow using LangGraph's StateGraph
 
 
227
  workflow = StateGraph(AgentState)
 
 
228
  workflow.add_node("agent", agent)
229
  retrieve_node = ToolNode(tools)
230
  workflow.add_node("retrieve", retrieve_node)
231
  workflow.add_node("rewrite", rewrite)
232
  workflow.add_node("generate", generate)
 
 
233
  workflow.add_edge(START, "agent")
234
- workflow.add_conditional_edges("agent", custom_tools_condition, {"tools": "retrieve", END: END})
 
 
 
 
 
 
 
 
 
 
 
235
  workflow.add_conditional_edges("retrieve", simple_grade_documents)
236
  workflow.add_edge("generate", END)
237
  workflow.add_edge("rewrite", "agent")
 
 
238
  app = workflow.compile()
239
 
240
- # Function to process a user question through the workflow
 
 
241
  def process_question(user_question, app, config):
 
242
  events = []
243
  for event in app.stream({"messages": [("user", user_question)]}, config):
244
  events.append(event)
245
  return events
246
 
247
- # Streamlit UI for interaction
 
 
248
  def main():
249
  st.set_page_config(
250
  page_title="AI Research & Development Assistant",
251
  layout="wide",
252
  initial_sidebar_state="expanded"
253
  )
 
 
254
  st.markdown("""
255
  <style>
256
- .stApp { background-color: #f8f9fa; }
257
- .stButton > button { width: 100%; margin-top: 20px; }
258
- .data-box { padding: 20px; border-radius: 10px; margin: 10px 0; }
259
- .research-box { background-color: #e3f2fd; border-left: 5px solid #1976d2; }
260
- .dev-box { background-color: #e8f5e9; border-left: 5px solid #43a047; }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
261
  </style>
262
  """, unsafe_allow_html=True)
263
 
 
264
  with st.sidebar:
265
  st.header("πŸ“š Available Data")
 
266
  st.subheader("Research Database")
267
  for text in research_texts:
268
  st.markdown(f'<div class="data-box research-box">{text}</div>', unsafe_allow_html=True)
 
269
  st.subheader("Development Database")
270
  for text in development_texts:
271
  st.markdown(f'<div class="data-box dev-box">{text}</div>', unsafe_allow_html=True)
272
 
 
273
  st.title("πŸ€– AI Research & Development Assistant")
274
  st.markdown("---")
 
 
275
  query = st.text_area("Enter your question:", height=100, placeholder="e.g., What is the latest advancement in AI research?")
 
276
  col1, col2 = st.columns([1, 2])
277
  with col1:
278
  if st.button("πŸ” Get Answer", use_container_width=True):
279
  if query:
280
  with st.spinner('Processing your question...'):
 
281
  events = process_question(query, app, {"configurable": {"thread_id": "1"}})
 
 
282
  for event in events:
283
  if 'agent' in event:
284
  with st.expander("πŸ”„ Processing Step", expanded=True):
@@ -293,6 +397,7 @@ def main():
293
  st.success(event['generate']['messages'][0].content)
294
  else:
295
  st.warning("⚠️ Please enter a question first!")
 
296
  with col2:
297
  st.markdown("""
298
  ### 🎯 How to Use
 
1
+ from langchain_openai import OpenAIEmbeddings
2
+ from langchain_community.vectorstores import Chroma
3
+ from langchain_core.messages import HumanMessage, AIMessage, ToolMessage
 
 
 
4
  from langchain.text_splitter import RecursiveCharacterTextSplitter
5
  from langgraph.graph import END, StateGraph, START
6
  from langgraph.prebuilt import ToolNode
 
13
  import requests
14
  from langchain.tools.retriever import create_retriever_tool
15
 
16
+ # --------------------------
17
  # Create Dummy Data
18
+ # --------------------------
19
  research_texts = [
20
  "Research Report: Results of a New AI Model Improving Image Recognition Accuracy to 98%",
21
  "Academic Paper Summary: Why Transformers Became the Mainstream Architecture in Natural Language Processing",
 
28
  "Product Y: In the Performance Optimization Stage Before Release"
29
  ]
30
 
31
+ # --------------------------
32
+ # Process the Data
33
+ # --------------------------
34
  # Text splitting settings
35
  splitter = RecursiveCharacterTextSplitter(chunk_size=100, chunk_overlap=10)
36
 
 
38
  research_docs = splitter.create_documents(research_texts)
39
  development_docs = splitter.create_documents(development_texts)
40
 
41
+ # Create vector embeddings
42
  embeddings = OpenAIEmbeddings(
43
+ model="text-embedding-3-large",
44
+ # For text-embedding-3 class models, you can specify dimensions if needed.
45
+ # dimensions=1024
46
  )
47
+
48
+ # Create vector stores
49
  research_vectorstore = Chroma.from_documents(
50
  documents=research_docs,
51
  embedding=embeddings,
52
  collection_name="research_collection"
53
  )
54
+
55
  development_vectorstore = Chroma.from_documents(
56
  documents=development_docs,
57
  embedding=embeddings,
58
  collection_name="development_collection"
59
  )
60
 
61
+ # Create retrievers from the vector stores
62
  research_retriever = research_vectorstore.as_retriever()
63
  development_retriever = development_vectorstore.as_retriever()
64
 
65
+ # --------------------------
66
+ # Create Retriever Tools
67
+ # --------------------------
68
  research_tool = create_retriever_tool(
69
+ research_retriever, # Retriever object
70
+ "research_db_tool", # Name of the tool to create
71
+ "Search information from the research database." # Description of the tool
72
  )
73
+
74
  development_tool = create_retriever_tool(
75
  development_retriever,
76
  "development_db_tool",
77
  "Search information from the development database."
78
  )
79
 
80
+ # Combine the tools into a list
81
  tools = [research_tool, development_tool]
82
 
83
+ # --------------------------
84
+ # Define the Agent Function
85
+ # --------------------------
86
  class AgentState(TypedDict):
87
  messages: Annotated[Sequence[AIMessage | HumanMessage | ToolMessage], add_messages]
88
 
 
89
  def agent(state: AgentState):
90
  print("---CALL AGENT---")
91
  messages = state["messages"]
92
+
93
  if isinstance(messages[0], tuple):
94
  user_message = messages[0][1]
95
  else:
96
  user_message = messages[0].content
97
 
98
+ # Structure prompt for consistent text output
99
  prompt = f"""Given this user question: "{user_message}"
100
  If it's about research or academic topics, respond EXACTLY in this format:
101
  SEARCH_RESEARCH: <search terms>
 
105
 
106
  Otherwise, just answer directly.
107
  """
108
+
109
  headers = {
110
  "Accept": "application/json",
111
  "Authorization": f"Bearer sk-1cddf19f9dc4466fa3ecea6fe10abec0",
112
  "Content-Type": "application/json"
113
  }
114
+
115
  data = {
116
  "model": "deepseek-chat",
117
  "messages": [{"role": "user", "content": prompt}],
118
  "temperature": 0.7,
119
  "max_tokens": 1024
120
  }
121
+
122
  response = requests.post(
123
  "https://api.deepseek.com/v1/chat/completions",
124
  headers=headers,
125
  json=data,
126
  verify=False
127
  )
128
+
129
  if response.status_code == 200:
130
  response_text = response.json()['choices'][0]['message']['content']
131
  print("Raw response:", response_text)
132
+
133
+ # Format the response into expected tool format
134
  if "SEARCH_RESEARCH:" in response_text:
135
  query = response_text.split("SEARCH_RESEARCH:")[1].strip()
136
+ # Use direct call to research retriever
137
  results = research_retriever.invoke(query)
138
  return {"messages": [AIMessage(content=f'Action: research_db_tool\n{{"query": "{query}"}}\n\nResults: {str(results)}')]}
139
+
140
  elif "SEARCH_DEV:" in response_text:
141
  query = response_text.split("SEARCH_DEV:")[1].strip()
142
+ # Use direct call to development retriever
143
  results = development_retriever.invoke(query)
144
  return {"messages": [AIMessage(content=f'Action: development_db_tool\n{{"query": "{query}"}}\n\nResults: {str(results)}')]}
145
+
146
  else:
147
  return {"messages": [AIMessage(content=response_text)]}
148
  else:
149
  raise Exception(f"API call failed: {response.text}")
150
 
151
+ # --------------------------
152
+ # Grading Function
153
+ # --------------------------
154
  def simple_grade_documents(state: AgentState):
155
  messages = state["messages"]
156
  last_message = messages[-1]
157
  print("Evaluating message:", last_message.content)
158
+
159
+ # Check if the content contains retrieved documents
160
  if "Results: [Document" in last_message.content:
161
  print("---DOCS FOUND, GO TO GENERATE---")
162
  return "generate"
 
164
  print("---NO DOCS FOUND, TRY REWRITE---")
165
  return "rewrite"
166
 
167
+ # --------------------------
168
+ # Generation Function
169
+ # --------------------------
170
  def generate(state: AgentState):
171
  print("---GENERATE FINAL ANSWER---")
172
  messages = state["messages"]
173
+ question = messages[0].content if isinstance(messages[0], tuple) else messages[0].content
174
  last_message = messages[-1]
175
+
176
+ # Extract the document content from the results
177
  docs = ""
178
  if "Results: [" in last_message.content:
179
  results_start = last_message.content.find("Results: [")
180
  docs = last_message.content[results_start:]
181
  print("Documents found:", docs)
182
+
183
  headers = {
184
  "Accept": "application/json",
185
  "Authorization": f"Bearer sk-1cddf19f9dc4466fa3ecea6fe10abec0",
186
  "Content-Type": "application/json"
187
  }
188
+
189
  prompt = f"""Based on these research documents, summarize the latest advancements in AI:
190
  Question: {question}
191
  Documents: {docs}
192
  Focus on extracting and synthesizing the key findings from the research papers.
193
  """
194
+
195
  data = {
196
  "model": "deepseek-chat",
197
+ "messages": [{
198
+ "role": "user",
199
+ "content": prompt
200
+ }],
201
  "temperature": 0.7,
202
  "max_tokens": 1024
203
  }
204
+
205
  print("Sending generate request to API...")
206
  response = requests.post(
207
  "https://api.deepseek.com/v1/chat/completions",
 
209
  json=data,
210
  verify=False
211
  )
212
+
213
  if response.status_code == 200:
214
  response_text = response.json()['choices'][0]['message']['content']
215
  print("Final Answer:", response_text)
 
217
  else:
218
  raise Exception(f"API call failed: {response.text}")
219
 
220
+ # --------------------------
221
+ # Rewrite Function
222
+ # --------------------------
223
  def rewrite(state: AgentState):
224
  print("---REWRITE QUESTION---")
225
  messages = state["messages"]
226
  original_question = messages[0].content if len(messages) > 0 else "N/A"
227
+
228
  headers = {
229
  "Accept": "application/json",
230
  "Authorization": f"Bearer sk-1cddf19f9dc4466fa3ecea6fe10abec0",
231
  "Content-Type": "application/json"
232
  }
233
+
234
  data = {
235
  "model": "deepseek-chat",
236
+ "messages": [{
237
+ "role": "user",
238
+ "content": f"Rewrite this question to be more specific and clearer: {original_question}"
239
+ }],
240
  "temperature": 0.7,
241
  "max_tokens": 1024
242
  }
243
+
244
  print("Sending rewrite request...")
245
  response = requests.post(
246
  "https://api.deepseek.com/v1/chat/completions",
 
248
  json=data,
249
  verify=False
250
  )
251
+
252
  print("Status Code:", response.status_code)
253
  print("Response:", response.text)
254
+
255
  if response.status_code == 200:
256
  response_text = response.json()['choices'][0]['message']['content']
257
  print("Rewritten question:", response_text)
 
259
  else:
260
  raise Exception(f"API call failed: {response.text}")
261
 
262
+ # --------------------------
263
+ # Tools Decision Function
264
+ # --------------------------
265
  tools_pattern = re.compile(r"Action: .*")
266
+
267
  def custom_tools_condition(state: AgentState):
268
  messages = state["messages"]
269
  last_message = messages[-1]
270
  content = last_message.content
271
+
272
  print("Checking tools condition:", content)
273
  if tools_pattern.match(content):
274
  print("Moving to retrieve...")
 
276
  print("Moving to END...")
277
  return END
278
 
279
+ # --------------------------
280
+ # LangGraph Workflow Setup
281
+ # --------------------------
282
  workflow = StateGraph(AgentState)
283
+
284
+ # Define the workflow nodes
285
  workflow.add_node("agent", agent)
286
  retrieve_node = ToolNode(tools)
287
  workflow.add_node("retrieve", retrieve_node)
288
  workflow.add_node("rewrite", rewrite)
289
  workflow.add_node("generate", generate)
290
+
291
+ # Set up the initial edge
292
  workflow.add_edge(START, "agent")
293
+
294
+ # Conditional edge from agent to either retrieve (if tool is called) or END
295
+ workflow.add_conditional_edges(
296
+ "agent",
297
+ custom_tools_condition,
298
+ {
299
+ "tools": "retrieve",
300
+ END: END
301
+ }
302
+ )
303
+
304
+ # After retrieval, decide to generate or rewrite based on document grading
305
  workflow.add_conditional_edges("retrieve", simple_grade_documents)
306
  workflow.add_edge("generate", END)
307
  workflow.add_edge("rewrite", "agent")
308
+
309
+ # Compile the workflow to make it executable
310
  app = workflow.compile()
311
 
312
+ # --------------------------
313
+ # Process Question Function
314
+ # --------------------------
315
  def process_question(user_question, app, config):
316
+ """Process user question through the workflow"""
317
  events = []
318
  for event in app.stream({"messages": [("user", user_question)]}, config):
319
  events.append(event)
320
  return events
321
 
322
+ # --------------------------
323
+ # Streamlit Application
324
+ # --------------------------
325
  def main():
326
  st.set_page_config(
327
  page_title="AI Research & Development Assistant",
328
  layout="wide",
329
  initial_sidebar_state="expanded"
330
  )
331
+
332
+ # Custom CSS
333
  st.markdown("""
334
  <style>
335
+ .stApp {
336
+ background-color: #f8f9fa;
337
+ }
338
+ .stButton > button {
339
+ width: 100%;
340
+ margin-top: 20px;
341
+ }
342
+ .data-box {
343
+ padding: 20px;
344
+ border-radius: 10px;
345
+ margin: 10px 0;
346
+ }
347
+ .research-box {
348
+ background-color: #e3f2fd;
349
+ border-left: 5px solid #1976d2;
350
+ }
351
+ .dev-box {
352
+ background-color: #e8f5e9;
353
+ border-left: 5px solid #43a047;
354
+ }
355
  </style>
356
  """, unsafe_allow_html=True)
357
 
358
+ # Sidebar with Data Display
359
  with st.sidebar:
360
  st.header("πŸ“š Available Data")
361
+
362
  st.subheader("Research Database")
363
  for text in research_texts:
364
  st.markdown(f'<div class="data-box research-box">{text}</div>', unsafe_allow_html=True)
365
+
366
  st.subheader("Development Database")
367
  for text in development_texts:
368
  st.markdown(f'<div class="data-box dev-box">{text}</div>', unsafe_allow_html=True)
369
 
370
+ # Main Content
371
  st.title("πŸ€– AI Research & Development Assistant")
372
  st.markdown("---")
373
+
374
+ # Query Input
375
  query = st.text_area("Enter your question:", height=100, placeholder="e.g., What is the latest advancement in AI research?")
376
+
377
  col1, col2 = st.columns([1, 2])
378
  with col1:
379
  if st.button("πŸ” Get Answer", use_container_width=True):
380
  if query:
381
  with st.spinner('Processing your question...'):
382
+ # Process query through workflow
383
  events = process_question(query, app, {"configurable": {"thread_id": "1"}})
384
+
385
+ # Display results
386
  for event in events:
387
  if 'agent' in event:
388
  with st.expander("πŸ”„ Processing Step", expanded=True):
 
397
  st.success(event['generate']['messages'][0].content)
398
  else:
399
  st.warning("⚠️ Please enter a question first!")
400
+
401
  with col2:
402
  st.markdown("""
403
  ### 🎯 How to Use