mgbam commited on
Commit
5e58a2d
·
verified ·
1 Parent(s): b7719bf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +163 -193
app.py CHANGED
@@ -1,111 +1,106 @@
1
- # Advanced Multi‑Modal Agentic RAG Chatbot
2
- # pip install -r requirements.txt
3
 
4
- import streamlit as st
5
- import requests
6
- import json
7
- import re
8
- import os
9
- from typing import Sequence
10
- from typing_extensions import TypedDict, Annotated
11
-
12
- from langchain_openai import OpenAIEmbeddings
13
- from langchain_community.vectorstores import Chroma
14
- from langchain_core.messages import HumanMessage, AIMessage, ToolMessage
15
  from langchain.text_splitter import RecursiveCharacterTextSplitter
16
  from langgraph.graph import END, StateGraph, START
17
  from langgraph.prebuilt import ToolNode
18
  from langgraph.graph.message import add_messages
 
 
 
 
 
 
 
19
 
20
- # -------------------------------------------------------------------
21
- # DATA SETUP: Static (research) and Dynamic (live updates) Databases
22
-
23
- # Static research data (e.g., academic papers, reports)
24
  research_texts = [
25
- "Research Report: New algorithm boosts image recognition to 99%.",
26
- "Paper: Transformers have redefined natural language processing paradigms.",
27
- "Deep dive: Quantum computing’s emerging role in machine learning."
28
  ]
29
 
30
- # Dynamic development/live data (e.g., real-time project updates)
31
  development_texts = [
32
- "Live Update: Project X API integration at 75% completion.",
33
- "Status: Project Y is undergoing stress testing for scalability.",
34
- "Alert: Immediate patch required for Project Z deployment issues."
35
  ]
36
 
37
- # Text splitting settings: adaptable for multi‑modal data (could extend to images)
38
  splitter = RecursiveCharacterTextSplitter(chunk_size=100, chunk_overlap=10)
 
 
39
  research_docs = splitter.create_documents(research_texts)
40
  development_docs = splitter.create_documents(development_texts)
41
 
42
- # Create vector stores using advanced embeddings
43
  embeddings = OpenAIEmbeddings(
44
  model="text-embedding-3-large"
45
  )
46
  research_vectorstore = Chroma.from_documents(
47
  documents=research_docs,
48
  embedding=embeddings,
49
- collection_name="research_collection_adv"
50
  )
51
  development_vectorstore = Chroma.from_documents(
52
  documents=development_docs,
53
  embedding=embeddings,
54
- collection_name="development_collection_adv"
55
  )
56
 
57
  research_retriever = research_vectorstore.as_retriever()
58
  development_retriever = development_vectorstore.as_retriever()
59
 
60
- # Create tool wrappers for the two databases
61
- from langchain.tools.retriever import create_retriever_tool
62
  research_tool = create_retriever_tool(
63
- research_retriever,
64
- "research_db_tool",
65
- "Search and retrieve static academic research documents."
66
  )
67
  development_tool = create_retriever_tool(
68
  development_retriever,
69
  "development_db_tool",
70
- "Retrieve dynamic, real‑time development updates."
71
  )
72
- tools = [research_tool, development_tool]
73
 
74
- # -------------------------------------------------------------------
75
- # AGENT DESIGN: Advanced Agent with Self‑Reflection & Multi‑Tool Coordination
76
 
77
- class AdvancedAgentState(TypedDict):
 
78
  messages: Annotated[Sequence[AIMessage | HumanMessage | ToolMessage], add_messages]
79
 
80
- def advanced_agent(state: AdvancedAgentState):
81
- """
82
- A smarter agent that:
83
- • Receives a multi-modal query (text and potentially images)
84
- • Self-reflects on the query to decide if a real-time lookup is needed
85
- • Chooses the appropriate tool or even combines results if required.
86
- """
87
- st.write(">> [Agent] Processing query...")
88
  messages = state["messages"]
89
- user_message = messages[0].content if not isinstance(messages[0], tuple) else messages[0][1]
 
 
 
 
 
 
 
 
 
 
 
90
 
91
- # Step 1: Initial Analysis and Self-Reflection
92
- analysis_prompt = f"""You are an advanced multi-modal reasoning engine.
93
- User Query: "{user_message}"
94
- Analyze the query and decide:
95
- - If it is about static academic research, output EXACTLY: ACTION_RESEARCH: <query>.
96
- - If it is about dynamic development or live updates, output EXACTLY: ACTION_LIVE: <query>.
97
- - Otherwise, output a direct answer with self-reflection.
98
- Also, add a brief self-reflection on your reasoning process.
99
  """
100
  headers = {
101
  "Accept": "application/json",
102
- "Authorization": "Bearer sk-ADVANCEDKEY123", # Use your secure key here
103
  "Content-Type": "application/json"
104
  }
105
  data = {
106
  "model": "deepseek-chat",
107
- "messages": [{"role": "user", "content": analysis_prompt}],
108
- "temperature": 0.6,
109
  "max_tokens": 1024
110
  }
111
  response = requests.post(
@@ -114,226 +109,201 @@ Also, add a brief self-reflection on your reasoning process.
114
  json=data,
115
  verify=False
116
  )
117
- if response.status_code != 200:
118
- raise Exception(f"API call failed: {response.text}")
119
- response_text = response.json()['choices'][0]['message']['content']
120
- st.write(">> [Agent] Analysis:", response_text)
121
-
122
- # Step 2: Interpret the result and call the appropriate tool(s)
123
- if "ACTION_RESEARCH:" in response_text:
124
- query = response_text.split("ACTION_RESEARCH:")[1].strip().split("\n")[0]
125
- results = research_retriever.invoke(query)
126
- return {"messages": [AIMessage(content=f'Action: research_db_tool\n{{"query": "{query}"}}\n\nResults: {str(results)}\n\nReflection: {response_text}')]}
127
- elif "ACTION_LIVE:" in response_text:
128
- query = response_text.split("ACTION_LIVE:")[1].strip().split("\n")[0]
129
- results = development_retriever.invoke(query)
130
- return {"messages": [AIMessage(content=f'Action: development_db_tool\n{{"query": "{query}"}}\n\nResults: {str(results)}\n\nReflection: {response_text}')]}
131
  else:
132
- # Direct answer with self-reflection
133
- return {"messages": [AIMessage(content=response_text)]}
134
-
135
- # -------------------------------------------------------------------
136
- # DECISION & GENERATION FUNCTIONS: Advanced Grading & Iterative Answering
137
 
138
- def advanced_grade(state: AdvancedAgentState):
139
- """
140
- Checks the last message for valid document retrieval or if further refinement is needed.
141
- """
142
  messages = state["messages"]
143
  last_message = messages[-1]
144
- st.write(">> [Grade] Reviewing output:", last_message.content)
145
  if "Results: [Document" in last_message.content:
146
- st.write(">> [Grade] Documents found; proceed to generation.")
147
  return "generate"
148
  else:
149
- st.write(">> [Grade] No sufficient documents; try rewriting the query.")
150
  return "rewrite"
151
 
152
- def advanced_generate(state: AdvancedAgentState):
153
- """
154
- Generate a final answer by summarizing retrieved documents
155
- while incorporating self-reflection from the agent.
156
- """
157
- st.write(">> [Generate] Synthesizing final answer...")
158
  messages = state["messages"]
159
- original_question = messages[0].content
160
  last_message = messages[-1]
161
-
162
- # Extract retrieved documents if available
163
  docs = ""
164
  if "Results: [" in last_message.content:
165
- docs = last_message.content[last_message.content.find("Results: ["):]
166
-
167
- generate_prompt = f"""Using the following documents and the query below,
168
- summarize a comprehensive answer.
169
- Query: {original_question}
170
- Documents: {docs}
171
- Additionally, integrate the self-reflection notes from the agent to explain your reasoning.
172
- Focus on clarity and depth.
173
- """
174
  headers = {
175
  "Accept": "application/json",
176
- "Authorization": "Bearer sk-ADVANCEDKEY123",
177
  "Content-Type": "application/json"
178
  }
 
 
 
 
 
179
  data = {
180
  "model": "deepseek-chat",
181
- "messages": [{"role": "user", "content": generate_prompt}],
182
- "temperature": 0.65,
183
  "max_tokens": 1024
184
  }
 
185
  response = requests.post(
186
  "https://api.deepseek.com/v1/chat/completions",
187
  headers=headers,
188
  json=data,
189
  verify=False
190
  )
191
- if response.status_code != 200:
192
- raise Exception(f"API call failed during generation: {response.text}")
193
- final_text = response.json()['choices'][0]['message']['content']
194
- st.write(">> [Generate] Final Answer generated.")
195
- return {"messages": [AIMessage(content=final_text)]}
 
196
 
197
- def advanced_rewrite(state: AdvancedAgentState):
198
- """
199
- Rewrite the user query for clarity using a self-reflection process.
200
- """
201
- st.write(">> [Rewrite] Improving query clarity...")
202
  messages = state["messages"]
203
- original_query = messages[0].content
204
  headers = {
205
  "Accept": "application/json",
206
- "Authorization": "Bearer sk-ADVANCEDKEY123",
207
  "Content-Type": "application/json"
208
  }
209
  data = {
210
  "model": "deepseek-chat",
211
- "messages": [{"role": "user", "content": f"Please rewrite this query for more specificity and clarity: {original_query}"}],
212
- "temperature": 0.6,
213
  "max_tokens": 1024
214
  }
 
215
  response = requests.post(
216
  "https://api.deepseek.com/v1/chat/completions",
217
  headers=headers,
218
  json=data,
219
  verify=False
220
  )
221
- if response.status_code != 200:
222
- raise Exception(f"API call failed during rewrite: {response.text}")
223
- rewritten_query = response.json()['choices'][0]['message']['content']
224
- st.write(">> [Rewrite] Rewritten query:", rewritten_query)
225
- return {"messages": [AIMessage(content=rewritten_query)]}
226
-
227
- # -------------------------------------------------------------------
228
- # Custom Tools Condition: Advanced Multi‑Tool Routing
229
-
230
- advanced_tools_pattern = re.compile(r"Action: .*")
231
 
232
- def advanced_tools_condition(state: AdvancedAgentState):
 
 
233
  messages = state["messages"]
234
  last_message = messages[-1]
235
  content = last_message.content
236
- st.write(">> [Condition] Checking for tool invocation:", content)
237
- if advanced_tools_pattern.match(content):
238
- st.write(">> [Condition] Routing to tools retrieval.")
239
  return "tools"
240
- st.write(">> [Condition] No tool call detected; ending workflow.")
241
  return END
242
 
243
- # -------------------------------------------------------------------
244
- # BUILDING THE ADVANCED WORKFLOW WITH LANGGRAPH
 
 
 
 
 
 
 
 
 
 
 
245
 
246
- advanced_workflow = StateGraph(AdvancedAgentState)
247
- advanced_workflow.add_node("agent", advanced_agent)
248
- advanced_tool_node = ToolNode(tools) # Re-use our existing tools
249
- advanced_workflow.add_node("retrieve", advanced_tool_node)
250
- advanced_workflow.add_node("rewrite", advanced_rewrite)
251
- advanced_workflow.add_node("generate", advanced_generate)
252
-
253
- advanced_workflow.add_edge(START, "agent")
254
- advanced_workflow.add_conditional_edges(
255
- "agent",
256
- advanced_tools_condition,
257
- {"tools": "retrieve", END: END}
258
- )
259
- advanced_workflow.add_conditional_edges("retrieve", advanced_grade)
260
- advanced_workflow.add_edge("generate", END)
261
- advanced_workflow.add_edge("rewrite", "agent")
262
-
263
- advanced_app = advanced_workflow.compile()
264
-
265
- def process_advanced_question(user_question, app, config):
266
- """Process user question through the advanced workflow."""
267
  events = []
268
  for event in app.stream({"messages": [("user", user_question)]}, config):
269
  events.append(event)
270
  return events
271
 
272
- # -------------------------------------------------------------------
273
- # STREAMLIT UI: Multi‑Modal Advanced Chatbot Interface
274
-
275
  def main():
276
  st.set_page_config(
277
- page_title="Advanced Multi‑Modal AI Assistant",
278
  layout="wide",
279
  initial_sidebar_state="expanded"
280
  )
281
  st.markdown("""
282
  <style>
283
- .stApp { background-color: #f0f2f6; }
284
  .stButton > button { width: 100%; margin-top: 20px; }
285
- .data-box { padding: 15px; border-radius: 8px; margin: 8px 0; }
286
- .research-box { background-color: #e1f5fe; border-left: 5px solid #0288d1; }
287
- .live-box { background-color: #e8f5e9; border-left: 5px solid #2e7d32; }
288
  </style>
289
  """, unsafe_allow_html=True)
290
 
291
- # Sidebar: Display static and live data
292
  with st.sidebar:
293
- st.header("📚 Data Sources")
294
- st.subheader("Static Research")
295
  for text in research_texts:
296
  st.markdown(f'<div class="data-box research-box">{text}</div>', unsafe_allow_html=True)
297
- st.subheader("Live Updates")
298
  for text in development_texts:
299
- st.markdown(f'<div class="data-box live-box">{text}</div>', unsafe_allow_html=True)
300
 
301
- st.title("🤖 Advanced Multi‑Modal Agentic RAG Assistant")
302
  st.markdown("---")
303
-
304
- # Query Input (supports future multi‑modal extensions)
305
- query = st.text_area("Enter your question (or upload an image in future versions):", height=100, placeholder="e.g., What recent breakthroughs in AI are influencing real‑time projects?")
306
-
307
  col1, col2 = st.columns([1, 2])
308
  with col1:
309
- if st.button("🔍 Get Advanced Answer", use_container_width=True):
310
  if query:
311
- with st.spinner("Processing your advanced query..."):
312
- events = process_advanced_question(query, advanced_app, {"configurable": {"thread_id": "advanced1"}})
313
  for event in events:
314
  if 'agent' in event:
315
- with st.expander("🔄 Agent Analysis", expanded=True):
316
- st.info(event['agent']['messages'][0].content)
 
 
 
 
 
317
  elif 'generate' in event:
318
  st.markdown("### ✨ Final Answer:")
319
  st.success(event['generate']['messages'][0].content)
320
- elif 'rewrite' in event:
321
- st.warning("Query was unclear. Rewriting...")
322
- st.info(event['rewrite']['messages'][0].content)
323
  else:
324
- st.warning("⚠️ Please enter a question!")
325
  with col2:
326
  st.markdown("""
327
- ### How It Works:
328
- 1. **Advanced Agent**: Uses self-reflection to decide between static or live data.
329
- 2. **Tool Coordination**: Routes queries to the appropriate retrieval tool.
330
- 3. **Self‑Reflection & Iteration**: If retrieval fails, the query is rewritten for clarity.
331
- 4. **Final Synthesis**: Retrieved documents are summarized into a final, clear answer.
332
 
333
- ### Example Queries:
334
- - "What new breakthroughs in quantum machine learning are there?"
335
- - "Provide live updates on the progress of Project X."
336
- - "Summarize the recent advancements in transformer models."
337
  """)
338
 
339
  if __name__ == "__main__":
 
1
+ # Install necessary libraries (if not already installed)
2
+ # pip install langchain streamlit requests langgraph typing-extensions
3
 
4
+ from langchain.embeddings.openai import OpenAIEmbeddings
5
+ from langchain.vectorstores import Chroma
6
+ from langchain.schema import HumanMessage, AIMessage, ToolMessage
 
 
 
 
 
 
 
 
7
  from langchain.text_splitter import RecursiveCharacterTextSplitter
8
  from langgraph.graph import END, StateGraph, START
9
  from langgraph.prebuilt import ToolNode
10
  from langgraph.graph.message import add_messages
11
+ from typing_extensions import TypedDict, Annotated
12
+ from typing import Sequence
13
+ import re
14
+ import os
15
+ import streamlit as st
16
+ import requests
17
+ from langchain.tools.retriever import create_retriever_tool
18
 
19
+ # Create Dummy Data
 
 
 
20
  research_texts = [
21
+ "Research Report: Results of a New AI Model Improving Image Recognition Accuracy to 98%",
22
+ "Academic Paper Summary: Why Transformers Became the Mainstream Architecture in Natural Language Processing",
23
+ "Latest Trends in Machine Learning Methods Using Quantum Computing"
24
  ]
25
 
 
26
  development_texts = [
27
+ "Project A: UI Design Completed, API Integration in Progress",
28
+ "Project B: Testing New Feature X, Bug Fixes Needed",
29
+ "Product Y: In the Performance Optimization Stage Before Release"
30
  ]
31
 
32
+ # Text splitting settings
33
  splitter = RecursiveCharacterTextSplitter(chunk_size=100, chunk_overlap=10)
34
+
35
+ # Generate Document objects from text
36
  research_docs = splitter.create_documents(research_texts)
37
  development_docs = splitter.create_documents(development_texts)
38
 
39
+ # Create vector stores using OpenAI embeddings
40
  embeddings = OpenAIEmbeddings(
41
  model="text-embedding-3-large"
42
  )
43
  research_vectorstore = Chroma.from_documents(
44
  documents=research_docs,
45
  embedding=embeddings,
46
+ collection_name="research_collection"
47
  )
48
  development_vectorstore = Chroma.from_documents(
49
  documents=development_docs,
50
  embedding=embeddings,
51
+ collection_name="development_collection"
52
  )
53
 
54
  research_retriever = research_vectorstore.as_retriever()
55
  development_retriever = development_vectorstore.as_retriever()
56
 
57
+ # Create retriever tools
 
58
  research_tool = create_retriever_tool(
59
+ research_retriever, # Retriever object
60
+ "research_db_tool", # Tool name
61
+ "Search information from the research database." # Description
62
  )
63
  development_tool = create_retriever_tool(
64
  development_retriever,
65
  "development_db_tool",
66
+ "Search information from the development database."
67
  )
 
68
 
69
+ # Combine the created tools
70
+ tools = [research_tool, development_tool]
71
 
72
+ # Define the agent state type
73
+ class AgentState(TypedDict):
74
  messages: Annotated[Sequence[AIMessage | HumanMessage | ToolMessage], add_messages]
75
 
76
+ # Define the agent function for processing user questions
77
+ def agent(state: AgentState):
78
+ print("---CALL AGENT---")
 
 
 
 
 
79
  messages = state["messages"]
80
+ if isinstance(messages[0], tuple):
81
+ user_message = messages[0][1]
82
+ else:
83
+ user_message = messages[0].content
84
+
85
+ # Structured prompt for the agent
86
+ prompt = f"""Given this user question: "{user_message}"
87
+ If it's about research or academic topics, respond EXACTLY in this format:
88
+ SEARCH_RESEARCH: <search terms>
89
+
90
+ If it's about development status, respond EXACTLY in this format:
91
+ SEARCH_DEV: <search terms>
92
 
93
+ Otherwise, just answer directly.
 
 
 
 
 
 
 
94
  """
95
  headers = {
96
  "Accept": "application/json",
97
+ "Authorization": f"Bearer sk-1cddf19f9dc4466fa3ecea6fe10abec0",
98
  "Content-Type": "application/json"
99
  }
100
  data = {
101
  "model": "deepseek-chat",
102
+ "messages": [{"role": "user", "content": prompt}],
103
+ "temperature": 0.7,
104
  "max_tokens": 1024
105
  }
106
  response = requests.post(
 
109
  json=data,
110
  verify=False
111
  )
112
+ if response.status_code == 200:
113
+ response_text = response.json()['choices'][0]['message']['content']
114
+ print("Raw response:", response_text)
115
+ if "SEARCH_RESEARCH:" in response_text:
116
+ query = response_text.split("SEARCH_RESEARCH:")[1].strip()
117
+ results = research_retriever.invoke(query)
118
+ return {"messages": [AIMessage(content=f'Action: research_db_tool\n{{"query": "{query}"}}\n\nResults: {str(results)}')]}
119
+ elif "SEARCH_DEV:" in response_text:
120
+ query = response_text.split("SEARCH_DEV:")[1].strip()
121
+ results = development_retriever.invoke(query)
122
+ return {"messages": [AIMessage(content=f'Action: development_db_tool\n{{"query": "{query}"}}\n\nResults: {str(results)}')]}
123
+ else:
124
+ return {"messages": [AIMessage(content=response_text)]}
 
125
  else:
126
+ raise Exception(f"API call failed: {response.text}")
 
 
 
 
127
 
128
+ # Grading function to decide next step
129
+ def simple_grade_documents(state: AgentState):
 
 
130
  messages = state["messages"]
131
  last_message = messages[-1]
132
+ print("Evaluating message:", last_message.content)
133
  if "Results: [Document" in last_message.content:
134
+ print("---DOCS FOUND, GO TO GENERATE---")
135
  return "generate"
136
  else:
137
+ print("---NO DOCS FOUND, TRY REWRITE---")
138
  return "rewrite"
139
 
140
+ # Generation function to synthesize a final answer
141
+ def generate(state: AgentState):
142
+ print("---GENERATE FINAL ANSWER---")
 
 
 
143
  messages = state["messages"]
144
+ question = messages[0].content
145
  last_message = messages[-1]
 
 
146
  docs = ""
147
  if "Results: [" in last_message.content:
148
+ results_start = last_message.content.find("Results: [")
149
+ docs = last_message.content[results_start:]
150
+ print("Documents found:", docs)
 
 
 
 
 
 
151
  headers = {
152
  "Accept": "application/json",
153
+ "Authorization": f"Bearer sk-1cddf19f9dc4466fa3ecea6fe10abec0",
154
  "Content-Type": "application/json"
155
  }
156
+ prompt = f"""Based on these research documents, summarize the latest advancements in AI:
157
+ Question: {question}
158
+ Documents: {docs}
159
+ Focus on extracting and synthesizing the key findings from the research papers.
160
+ """
161
  data = {
162
  "model": "deepseek-chat",
163
+ "messages": [{"role": "user", "content": prompt}],
164
+ "temperature": 0.7,
165
  "max_tokens": 1024
166
  }
167
+ print("Sending generate request to API...")
168
  response = requests.post(
169
  "https://api.deepseek.com/v1/chat/completions",
170
  headers=headers,
171
  json=data,
172
  verify=False
173
  )
174
+ if response.status_code == 200:
175
+ response_text = response.json()['choices'][0]['message']['content']
176
+ print("Final Answer:", response_text)
177
+ return {"messages": [AIMessage(content=response_text)]}
178
+ else:
179
+ raise Exception(f"API call failed: {response.text}")
180
 
181
+ # Rewrite function to refine unclear questions
182
+ def rewrite(state: AgentState):
183
+ print("---REWRITE QUESTION---")
 
 
184
  messages = state["messages"]
185
+ original_question = messages[0].content if len(messages) > 0 else "N/A"
186
  headers = {
187
  "Accept": "application/json",
188
+ "Authorization": f"Bearer sk-1cddf19f9dc4466fa3ecea6fe10abec0",
189
  "Content-Type": "application/json"
190
  }
191
  data = {
192
  "model": "deepseek-chat",
193
+ "messages": [{"role": "user", "content": f"Rewrite this question to be more specific and clearer: {original_question}"}],
194
+ "temperature": 0.7,
195
  "max_tokens": 1024
196
  }
197
+ print("Sending rewrite request...")
198
  response = requests.post(
199
  "https://api.deepseek.com/v1/chat/completions",
200
  headers=headers,
201
  json=data,
202
  verify=False
203
  )
204
+ print("Status Code:", response.status_code)
205
+ print("Response:", response.text)
206
+ if response.status_code == 200:
207
+ response_text = response.json()['choices'][0]['message']['content']
208
+ print("Rewritten question:", response_text)
209
+ return {"messages": [AIMessage(content=response_text)]}
210
+ else:
211
+ raise Exception(f"API call failed: {response.text}")
 
 
212
 
213
+ # Custom condition to check if a tool action is called
214
+ tools_pattern = re.compile(r"Action: .*")
215
+ def custom_tools_condition(state: AgentState):
216
  messages = state["messages"]
217
  last_message = messages[-1]
218
  content = last_message.content
219
+ print("Checking tools condition:", content)
220
+ if tools_pattern.match(content):
221
+ print("Moving to retrieve...")
222
  return "tools"
223
+ print("Moving to END...")
224
  return END
225
 
226
+ # Build the workflow using LangGraph's StateGraph
227
+ workflow = StateGraph(AgentState)
228
+ workflow.add_node("agent", agent)
229
+ retrieve_node = ToolNode(tools)
230
+ workflow.add_node("retrieve", retrieve_node)
231
+ workflow.add_node("rewrite", rewrite)
232
+ workflow.add_node("generate", generate)
233
+ workflow.add_edge(START, "agent")
234
+ workflow.add_conditional_edges("agent", custom_tools_condition, {"tools": "retrieve", END: END})
235
+ workflow.add_conditional_edges("retrieve", simple_grade_documents)
236
+ workflow.add_edge("generate", END)
237
+ workflow.add_edge("rewrite", "agent")
238
+ app = workflow.compile()
239
 
240
+ # Function to process a user question through the workflow
241
+ def process_question(user_question, app, config):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
242
  events = []
243
  for event in app.stream({"messages": [("user", user_question)]}, config):
244
  events.append(event)
245
  return events
246
 
247
+ # Streamlit UI for interaction
 
 
248
  def main():
249
  st.set_page_config(
250
+ page_title="AI Research & Development Assistant",
251
  layout="wide",
252
  initial_sidebar_state="expanded"
253
  )
254
  st.markdown("""
255
  <style>
256
+ .stApp { background-color: #f8f9fa; }
257
  .stButton > button { width: 100%; margin-top: 20px; }
258
+ .data-box { padding: 20px; border-radius: 10px; margin: 10px 0; }
259
+ .research-box { background-color: #e3f2fd; border-left: 5px solid #1976d2; }
260
+ .dev-box { background-color: #e8f5e9; border-left: 5px solid #43a047; }
261
  </style>
262
  """, unsafe_allow_html=True)
263
 
 
264
  with st.sidebar:
265
+ st.header("📚 Available Data")
266
+ st.subheader("Research Database")
267
  for text in research_texts:
268
  st.markdown(f'<div class="data-box research-box">{text}</div>', unsafe_allow_html=True)
269
+ st.subheader("Development Database")
270
  for text in development_texts:
271
+ st.markdown(f'<div class="data-box dev-box">{text}</div>', unsafe_allow_html=True)
272
 
273
+ st.title("🤖 AI Research & Development Assistant")
274
  st.markdown("---")
275
+ query = st.text_area("Enter your question:", height=100, placeholder="e.g., What is the latest advancement in AI research?")
 
 
 
276
  col1, col2 = st.columns([1, 2])
277
  with col1:
278
+ if st.button("🔍 Get Answer", use_container_width=True):
279
  if query:
280
+ with st.spinner('Processing your question...'):
281
+ events = process_question(query, app, {"configurable": {"thread_id": "1"}})
282
  for event in events:
283
  if 'agent' in event:
284
+ with st.expander("🔄 Processing Step", expanded=True):
285
+ content = event['agent']['messages'][0].content
286
+ if "Results:" in content:
287
+ st.markdown("### 📑 Retrieved Documents:")
288
+ docs_start = content.find("Results:")
289
+ docs = content[docs_start:]
290
+ st.info(docs)
291
  elif 'generate' in event:
292
  st.markdown("### ✨ Final Answer:")
293
  st.success(event['generate']['messages'][0].content)
 
 
 
294
  else:
295
+ st.warning("⚠️ Please enter a question first!")
296
  with col2:
297
  st.markdown("""
298
+ ### 🎯 How to Use
299
+ 1. Type your question in the text box
300
+ 2. Click "Get Answer" to process
301
+ 3. View retrieved documents and final answer
 
302
 
303
+ ### 💡 Example Questions
304
+ - What are the latest advancements in AI research?
305
+ - What is the status of Project A?
306
+ - What are the current trends in machine learning?
307
  """)
308
 
309
  if __name__ == "__main__":