mgbam commited on
Commit
b7719bf
·
verified ·
1 Parent(s): 8860cae

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +275 -168
app.py CHANGED
@@ -1,233 +1,340 @@
1
- # Drug Repurposing Advisor: A Multi-Agent Workflow Example
2
- # This example uses dummy data for demonstration.
3
- # In a production system, replace the dummy data with real pharmaceutical databases.
4
 
5
- import os
6
- import json
7
- import requests
8
  import streamlit as st
9
- from typing import List, Union, Tuple
 
 
 
 
 
 
 
 
10
  from langchain_core.messages import HumanMessage, AIMessage, ToolMessage
11
  from langchain.text_splitter import RecursiveCharacterTextSplitter
12
  from langgraph.graph import END, StateGraph, START
13
  from langgraph.prebuilt import ToolNode
14
  from langgraph.graph.message import add_messages
15
- from typing_extensions import TypedDict, Annotated
16
- from typing import Sequence
17
 
18
- # Dummy data for drug mechanism research and clinical trial outcomes
19
- drug_mechanism_texts = [
20
- "Drug A: Inhibits enzyme X and modulates receptor Y; potential anti-inflammatory effects.",
21
- "Drug B: Blocks ion channel Z; has been shown to reduce oxidative stress in preclinical models.",
22
- "Drug C: Activates nuclear receptor W; exhibits neuroprotective properties."
 
 
 
23
  ]
24
 
25
- clinical_trials_texts = [
26
- "Trial 1: Drug A repurposed for rheumatoid arthritis showed a 30% improvement in joint function.",
27
- "Trial 2: Drug B evaluated in a pilot study for neurodegenerative disorders demonstrated a reduction in symptom severity.",
28
- "Trial 3: Drug C tested in a phase II trial for multiple sclerosis reported significant reduction in relapse rates."
 
29
  ]
30
 
31
- # Text splitting settings
32
  splitter = RecursiveCharacterTextSplitter(chunk_size=100, chunk_overlap=10)
33
- mechanism_docs = splitter.create_documents(drug_mechanism_texts)
34
- clinical_docs = splitter.create_documents(clinical_trials_texts)
35
-
36
- # Here you would typically create vector embeddings and vectorstores (e.g., using ChromaDB)
37
- # For demonstration, we define simple retriever functions that return dummy results.
38
- def mechanism_retriever(query: str) -> str:
39
- # Dummy search: return first document that mentions a keyword from the query
40
- for doc in drug_mechanism_texts:
41
- if any(word.lower() in doc.lower() for word in query.split()):
42
- return f"[Mechanism Doc]: {doc}"
43
- return "No relevant mechanism data found."
44
-
45
- def clinical_retriever(query: str) -> str:
46
- for doc in clinical_trials_texts:
47
- if any(word.lower() in doc.lower() for word in query.split()):
48
- return f"[Clinical Trial Doc]: {doc}"
49
- return "No relevant clinical trial data found."
50
-
51
- # Define tools using a simple wrapper function
52
- def create_retriever_tool(retriever_func, tool_name: str, description: str):
53
- def tool(query: str):
54
- return retriever_func(query)
55
- # Mimic a tool message (in a real system, you would wrap this in a ToolMessage object)
56
- tool.__name__ = tool_name
57
- tool.description = description
58
- return tool
59
-
60
- mechanism_tool = create_retriever_tool(
61
- mechanism_retriever,
62
- "mechanism_db_tool",
63
- "Search drug mechanism data for repurposing insights."
64
  )
65
- clinical_tool = create_retriever_tool(
66
- clinical_retriever,
67
- "clinical_db_tool",
68
- "Search clinical trial outcomes for repurposing evidence."
 
 
 
 
 
69
  )
70
- tools = [mechanism_tool, clinical_tool]
71
 
72
- # Define the AgentState type for our workflow
73
- class AgentState(TypedDict):
74
- messages: Annotated[Sequence[AIMessage | HumanMessage | ToolMessage], add_messages]
75
 
76
- # Agent function: Classifies queries as targeting drug mechanisms or clinical outcomes
77
- def agent(state: AgentState):
78
- print("---CALL AGENT---")
79
- messages = state["messages"]
80
- user_message = messages[0].content if not isinstance(messages[0], tuple) else messages[0][1]
 
 
 
 
 
 
 
 
81
 
82
- # Build a prompt to classify the query
83
- prompt = f"""Given the user question: "{user_message}"
84
- If the question is about the molecular mechanism or pharmacodynamics, respond EXACTLY in this format:
85
- SEARCH_MECHANISM: <search terms>
86
 
87
- If it's about clinical trial outcomes, efficacy, or safety evidence, respond EXACTLY in this format:
88
- SEARCH_CLINICAL: <search terms>
89
 
90
- Otherwise, answer directly with general repurposing insights.
91
  """
92
- # (Here we simulate a call to DeepSeek-R1 using a dummy response)
93
- # In a real implementation, make an API call to DeepSeek-R1.
94
- if "mechanism" in user_message.lower() or "how it works" in user_message.lower():
95
- response_text = f"SEARCH_MECHANISM: {user_message}"
96
- elif "trial" in user_message.lower() or "efficacy" in user_message.lower() or "safety" in user_message.lower():
97
- response_text = f"SEARCH_CLINICAL: {user_message}"
98
- else:
99
- response_text = "The system did not classify your query. Please rephrase to focus on drug mechanism or clinical data."
100
-
101
- print("Agent response:", response_text)
102
- # Format response into expected tool call format
103
- if "SEARCH_MECHANISM:" in response_text:
104
- query = response_text.split("SEARCH_MECHANISM:")[1].strip()
105
- result = mechanism_tool(query)
106
- return {"messages": [AIMessage(content=f'Action: mechanism_db_tool\n{{"query": "{query}"}}\n\nResults: {result}')]}
107
-
108
- elif "SEARCH_CLINICAL:" in response_text:
109
- query = response_text.split("SEARCH_CLINICAL:")[1].strip()
110
- result = clinical_tool(query)
111
- return {"messages": [AIMessage(content=f'Action: clinical_db_tool\n{{"query": "{query}"}}\n\nResults: {result}')]}
112
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113
  else:
 
114
  return {"messages": [AIMessage(content=response_text)]}
115
 
116
- # Grading function: Checks if retrieved documents were found
117
- def simple_grade_documents(state: AgentState):
 
 
 
 
 
118
  messages = state["messages"]
119
  last_message = messages[-1]
120
- print("Evaluating message:", last_message.content)
121
- if "Results:" in last_message.content and "No relevant" not in last_message.content:
122
- print("---DATA FOUND, PROCEED TO GENERATE INSIGHTS---")
123
  return "generate"
124
  else:
125
- print("---NO DATA FOUND, TRY REWRITE---")
126
  return "rewrite"
127
 
128
- # Generate function: Synthesizes repurposing insights from retrieved data
129
- def generate(state: AgentState):
130
- print("---GENERATE FINAL INSIGHTS---")
 
 
 
131
  messages = state["messages"]
132
- question = messages[0].content
133
  last_message = messages[-1]
134
- # Extract data from results
135
- data_start = last_message.content.find("Results:")
136
- retrieved_data = last_message.content[data_start:] if data_start != -1 else "No data available"
137
- # Build a prompt to synthesize insights
138
- prompt = f"""Based on the following retrieved data:
139
- {retrieved_data}
140
- and considering the question:
141
- {question}
142
- Summarize potential drug repurposing opportunities and any recommended next steps for further investigation.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
143
  """
144
- # Dummy generation using a simple echo for demonstration.
145
- final_answer = f"Summary Insight: Considering the data, a promising repurposing opportunity is to explore Drug A for anti-inflammatory applications beyond its original use, and Drug B might be repurposed for neurodegenerative conditions. Further research should validate these hypotheses."
146
- print("Final Answer:", final_answer)
147
- return {"messages": [AIMessage(content=final_answer)]}
148
-
149
- # Rewrite function: If no data is found, help rephrase the query for clarity
150
- def rewrite(state: AgentState):
151
- print("---REWRITE QUESTION---")
152
  messages = state["messages"]
153
- original_question = messages[0].content if messages else "N/A"
154
- # Dummy rewrite that just appends "Please specify mechanism or trial data" for demonstration.
155
- rewritten = f"{original_question} (Please specify if you are asking about drug mechanism or clinical trial outcomes.)"
156
- print("Rewritten question:", rewritten)
157
- return {"messages": [AIMessage(content=rewritten)]}
158
-
159
- # Decision function: Determines next step based on last message content
160
- def custom_tools_condition(state: AgentState):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
161
  messages = state["messages"]
162
  last_message = messages[-1]
163
  content = last_message.content
164
- if content.startswith("Action:"):
165
- print("Tool action detected. Proceed to retrieval.")
 
166
  return "tools"
 
167
  return END
168
 
169
- # Create the workflow graph
170
- workflow = StateGraph(AgentState)
171
- workflow.add_node("agent", agent)
172
- retrieve_node = ToolNode(tools)
173
- workflow.add_node("retrieve", retrieve_node)
174
- workflow.add_node("rewrite", rewrite)
175
- workflow.add_node("generate", generate)
176
-
177
- # Define workflow edges
178
- workflow.add_edge(START, "agent")
179
- workflow.add_conditional_edges("agent", custom_tools_condition, {"tools": "retrieve", END: END})
180
- workflow.add_conditional_edges("retrieve", simple_grade_documents)
181
- workflow.add_edge("generate", END)
182
- workflow.add_edge("rewrite", "agent")
183
- app = workflow.compile()
184
-
185
- # Function to process a query through the workflow
186
- def process_question(user_question: str, app, config: dict):
 
 
 
 
 
 
187
  events = []
188
  for event in app.stream({"messages": [("user", user_question)]}, config):
189
  events.append(event)
190
  return events
191
 
192
- # Streamlit UI for the Drug Repurposing Advisor
 
 
193
  def main():
194
  st.set_page_config(
195
- page_title="Drug Repurposing Advisor",
196
  layout="wide",
197
  initial_sidebar_state="expanded"
198
  )
199
- st.title("💊 Drug Repurposing Advisor")
200
- st.markdown("### Explore potential drug repurposing opportunities with AI-driven insights.")
201
- query = st.text_area("Enter your research question:",
202
- placeholder="e.g., Can Drug A be repurposed for neurodegenerative diseases?")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
203
  col1, col2 = st.columns([1, 2])
204
  with col1:
205
- if st.button("🔍 Get Insights", use_container_width=True):
206
  if query:
207
- with st.spinner("Processing your query..."):
208
- events = process_question(query, app, {"configurable": {"thread_id": "1"}})
209
  for event in events:
210
  if 'agent' in event:
211
- with st.expander("Agent Processing Step", expanded=True):
212
- content = event['agent']['messages'][0].content
213
- st.markdown(f"**Agent Step Output:**\n\n{content}")
214
  elif 'generate' in event:
215
- st.markdown("### Final Insights:")
216
  st.success(event['generate']['messages'][0].content)
217
  elif 'rewrite' in event:
218
- st.markdown("### Suggestion:")
219
- st.warning(event['rewrite']['messages'][0].content)
220
  else:
221
- st.warning("⚠️ Please enter a query.")
222
  with col2:
223
  st.markdown("""
224
- **How to Use the Drug Repurposing Advisor:**
225
- 1. **Input Query:** Describe your research question. Specify whether you are interested in drug mechanisms or clinical outcomes.
226
- 2. **Get Insights:** Click "Get Insights" and let the system process your query.
227
- 3. **Review Output:** Explore the retrieved data and the final synthesized insights.
228
- **Example Questions:**
229
- - "How does Drug A work and could its mechanism be useful in treating inflammatory diseases?"
230
- - "What are the clinical trial outcomes of Drug B and can it be repurposed for neurodegenerative conditions?"
 
 
 
231
  """)
 
232
  if __name__ == "__main__":
233
  main()
 
1
+ # Advanced Multi‑Modal Agentic RAG Chatbot
2
+ # pip install -r requirements.txt
 
3
 
 
 
 
4
  import streamlit as st
5
+ import requests
6
+ import json
7
+ import re
8
+ import os
9
+ from typing import Sequence
10
+ from typing_extensions import TypedDict, Annotated
11
+
12
+ from langchain_openai import OpenAIEmbeddings
13
+ from langchain_community.vectorstores import Chroma
14
  from langchain_core.messages import HumanMessage, AIMessage, ToolMessage
15
  from langchain.text_splitter import RecursiveCharacterTextSplitter
16
  from langgraph.graph import END, StateGraph, START
17
  from langgraph.prebuilt import ToolNode
18
  from langgraph.graph.message import add_messages
 
 
19
 
20
+ # -------------------------------------------------------------------
21
+ # DATA SETUP: Static (research) and Dynamic (live updates) Databases
22
+
23
+ # Static research data (e.g., academic papers, reports)
24
+ research_texts = [
25
+ "Research Report: New algorithm boosts image recognition to 99%.",
26
+ "Paper: Transformers have redefined natural language processing paradigms.",
27
+ "Deep dive: Quantum computing’s emerging role in machine learning."
28
  ]
29
 
30
+ # Dynamic development/live data (e.g., real-time project updates)
31
+ development_texts = [
32
+ "Live Update: Project X API integration at 75% completion.",
33
+ "Status: Project Y is undergoing stress testing for scalability.",
34
+ "Alert: Immediate patch required for Project Z deployment issues."
35
  ]
36
 
37
+ # Text splitting settings: adaptable for multi‑modal data (could extend to images)
38
  splitter = RecursiveCharacterTextSplitter(chunk_size=100, chunk_overlap=10)
39
+ research_docs = splitter.create_documents(research_texts)
40
+ development_docs = splitter.create_documents(development_texts)
41
+
42
+ # Create vector stores using advanced embeddings
43
+ embeddings = OpenAIEmbeddings(
44
+ model="text-embedding-3-large"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  )
46
+ research_vectorstore = Chroma.from_documents(
47
+ documents=research_docs,
48
+ embedding=embeddings,
49
+ collection_name="research_collection_adv"
50
+ )
51
+ development_vectorstore = Chroma.from_documents(
52
+ documents=development_docs,
53
+ embedding=embeddings,
54
+ collection_name="development_collection_adv"
55
  )
 
56
 
57
+ research_retriever = research_vectorstore.as_retriever()
58
+ development_retriever = development_vectorstore.as_retriever()
 
59
 
60
+ # Create tool wrappers for the two databases
61
+ from langchain.tools.retriever import create_retriever_tool
62
+ research_tool = create_retriever_tool(
63
+ research_retriever,
64
+ "research_db_tool",
65
+ "Search and retrieve static academic research documents."
66
+ )
67
+ development_tool = create_retriever_tool(
68
+ development_retriever,
69
+ "development_db_tool",
70
+ "Retrieve dynamic, real‑time development updates."
71
+ )
72
+ tools = [research_tool, development_tool]
73
 
74
+ # -------------------------------------------------------------------
75
+ # AGENT DESIGN: Advanced Agent with Self‑Reflection & Multi‑Tool Coordination
 
 
76
 
77
+ class AdvancedAgentState(TypedDict):
78
+ messages: Annotated[Sequence[AIMessage | HumanMessage | ToolMessage], add_messages]
79
 
80
+ def advanced_agent(state: AdvancedAgentState):
81
  """
82
+ A smarter agent that:
83
+ Receives a multi-modal query (text and potentially images)
84
+ Self-reflects on the query to decide if a real-time lookup is needed
85
+ Chooses the appropriate tool or even combines results if required.
86
+ """
87
+ st.write(">> [Agent] Processing query...")
88
+ messages = state["messages"]
89
+ user_message = messages[0].content if not isinstance(messages[0], tuple) else messages[0][1]
 
 
 
 
 
 
 
 
 
 
 
 
90
 
91
+ # Step 1: Initial Analysis and Self-Reflection
92
+ analysis_prompt = f"""You are an advanced multi-modal reasoning engine.
93
+ User Query: "{user_message}"
94
+ Analyze the query and decide:
95
+ - If it is about static academic research, output EXACTLY: ACTION_RESEARCH: <query>.
96
+ - If it is about dynamic development or live updates, output EXACTLY: ACTION_LIVE: <query>.
97
+ - Otherwise, output a direct answer with self-reflection.
98
+ Also, add a brief self-reflection on your reasoning process.
99
+ """
100
+ headers = {
101
+ "Accept": "application/json",
102
+ "Authorization": "Bearer sk-ADVANCEDKEY123", # Use your secure key here
103
+ "Content-Type": "application/json"
104
+ }
105
+ data = {
106
+ "model": "deepseek-chat",
107
+ "messages": [{"role": "user", "content": analysis_prompt}],
108
+ "temperature": 0.6,
109
+ "max_tokens": 1024
110
+ }
111
+ response = requests.post(
112
+ "https://api.deepseek.com/v1/chat/completions",
113
+ headers=headers,
114
+ json=data,
115
+ verify=False
116
+ )
117
+ if response.status_code != 200:
118
+ raise Exception(f"API call failed: {response.text}")
119
+ response_text = response.json()['choices'][0]['message']['content']
120
+ st.write(">> [Agent] Analysis:", response_text)
121
+
122
+ # Step 2: Interpret the result and call the appropriate tool(s)
123
+ if "ACTION_RESEARCH:" in response_text:
124
+ query = response_text.split("ACTION_RESEARCH:")[1].strip().split("\n")[0]
125
+ results = research_retriever.invoke(query)
126
+ return {"messages": [AIMessage(content=f'Action: research_db_tool\n{{"query": "{query}"}}\n\nResults: {str(results)}\n\nReflection: {response_text}')]}
127
+ elif "ACTION_LIVE:" in response_text:
128
+ query = response_text.split("ACTION_LIVE:")[1].strip().split("\n")[0]
129
+ results = development_retriever.invoke(query)
130
+ return {"messages": [AIMessage(content=f'Action: development_db_tool\n{{"query": "{query}"}}\n\nResults: {str(results)}\n\nReflection: {response_text}')]}
131
  else:
132
+ # Direct answer with self-reflection
133
  return {"messages": [AIMessage(content=response_text)]}
134
 
135
+ # -------------------------------------------------------------------
136
+ # DECISION & GENERATION FUNCTIONS: Advanced Grading & Iterative Answering
137
+
138
+ def advanced_grade(state: AdvancedAgentState):
139
+ """
140
+ Checks the last message for valid document retrieval or if further refinement is needed.
141
+ """
142
  messages = state["messages"]
143
  last_message = messages[-1]
144
+ st.write(">> [Grade] Reviewing output:", last_message.content)
145
+ if "Results: [Document" in last_message.content:
146
+ st.write(">> [Grade] Documents found; proceed to generation.")
147
  return "generate"
148
  else:
149
+ st.write(">> [Grade] No sufficient documents; try rewriting the query.")
150
  return "rewrite"
151
 
152
+ def advanced_generate(state: AdvancedAgentState):
153
+ """
154
+ Generate a final answer by summarizing retrieved documents
155
+ while incorporating self-reflection from the agent.
156
+ """
157
+ st.write(">> [Generate] Synthesizing final answer...")
158
  messages = state["messages"]
159
+ original_question = messages[0].content
160
  last_message = messages[-1]
161
+
162
+ # Extract retrieved documents if available
163
+ docs = ""
164
+ if "Results: [" in last_message.content:
165
+ docs = last_message.content[last_message.content.find("Results: ["):]
166
+
167
+ generate_prompt = f"""Using the following documents and the query below,
168
+ summarize a comprehensive answer.
169
+ Query: {original_question}
170
+ Documents: {docs}
171
+ Additionally, integrate the self-reflection notes from the agent to explain your reasoning.
172
+ Focus on clarity and depth.
173
+ """
174
+ headers = {
175
+ "Accept": "application/json",
176
+ "Authorization": "Bearer sk-ADVANCEDKEY123",
177
+ "Content-Type": "application/json"
178
+ }
179
+ data = {
180
+ "model": "deepseek-chat",
181
+ "messages": [{"role": "user", "content": generate_prompt}],
182
+ "temperature": 0.65,
183
+ "max_tokens": 1024
184
+ }
185
+ response = requests.post(
186
+ "https://api.deepseek.com/v1/chat/completions",
187
+ headers=headers,
188
+ json=data,
189
+ verify=False
190
+ )
191
+ if response.status_code != 200:
192
+ raise Exception(f"API call failed during generation: {response.text}")
193
+ final_text = response.json()['choices'][0]['message']['content']
194
+ st.write(">> [Generate] Final Answer generated.")
195
+ return {"messages": [AIMessage(content=final_text)]}
196
+
197
+ def advanced_rewrite(state: AdvancedAgentState):
198
  """
199
+ Rewrite the user query for clarity using a self-reflection process.
200
+ """
201
+ st.write(">> [Rewrite] Improving query clarity...")
 
 
 
 
 
202
  messages = state["messages"]
203
+ original_query = messages[0].content
204
+ headers = {
205
+ "Accept": "application/json",
206
+ "Authorization": "Bearer sk-ADVANCEDKEY123",
207
+ "Content-Type": "application/json"
208
+ }
209
+ data = {
210
+ "model": "deepseek-chat",
211
+ "messages": [{"role": "user", "content": f"Please rewrite this query for more specificity and clarity: {original_query}"}],
212
+ "temperature": 0.6,
213
+ "max_tokens": 1024
214
+ }
215
+ response = requests.post(
216
+ "https://api.deepseek.com/v1/chat/completions",
217
+ headers=headers,
218
+ json=data,
219
+ verify=False
220
+ )
221
+ if response.status_code != 200:
222
+ raise Exception(f"API call failed during rewrite: {response.text}")
223
+ rewritten_query = response.json()['choices'][0]['message']['content']
224
+ st.write(">> [Rewrite] Rewritten query:", rewritten_query)
225
+ return {"messages": [AIMessage(content=rewritten_query)]}
226
+
227
+ # -------------------------------------------------------------------
228
+ # Custom Tools Condition: Advanced Multi‑Tool Routing
229
+
230
+ advanced_tools_pattern = re.compile(r"Action: .*")
231
+
232
+ def advanced_tools_condition(state: AdvancedAgentState):
233
  messages = state["messages"]
234
  last_message = messages[-1]
235
  content = last_message.content
236
+ st.write(">> [Condition] Checking for tool invocation:", content)
237
+ if advanced_tools_pattern.match(content):
238
+ st.write(">> [Condition] Routing to tools retrieval.")
239
  return "tools"
240
+ st.write(">> [Condition] No tool call detected; ending workflow.")
241
  return END
242
 
243
+ # -------------------------------------------------------------------
244
+ # BUILDING THE ADVANCED WORKFLOW WITH LANGGRAPH
245
+
246
+ advanced_workflow = StateGraph(AdvancedAgentState)
247
+ advanced_workflow.add_node("agent", advanced_agent)
248
+ advanced_tool_node = ToolNode(tools) # Re-use our existing tools
249
+ advanced_workflow.add_node("retrieve", advanced_tool_node)
250
+ advanced_workflow.add_node("rewrite", advanced_rewrite)
251
+ advanced_workflow.add_node("generate", advanced_generate)
252
+
253
+ advanced_workflow.add_edge(START, "agent")
254
+ advanced_workflow.add_conditional_edges(
255
+ "agent",
256
+ advanced_tools_condition,
257
+ {"tools": "retrieve", END: END}
258
+ )
259
+ advanced_workflow.add_conditional_edges("retrieve", advanced_grade)
260
+ advanced_workflow.add_edge("generate", END)
261
+ advanced_workflow.add_edge("rewrite", "agent")
262
+
263
+ advanced_app = advanced_workflow.compile()
264
+
265
+ def process_advanced_question(user_question, app, config):
266
+ """Process user question through the advanced workflow."""
267
  events = []
268
  for event in app.stream({"messages": [("user", user_question)]}, config):
269
  events.append(event)
270
  return events
271
 
272
+ # -------------------------------------------------------------------
273
+ # STREAMLIT UI: Multi‑Modal Advanced Chatbot Interface
274
+
275
  def main():
276
  st.set_page_config(
277
+ page_title="Advanced Multi‑Modal AI Assistant",
278
  layout="wide",
279
  initial_sidebar_state="expanded"
280
  )
281
+ st.markdown("""
282
+ <style>
283
+ .stApp { background-color: #f0f2f6; }
284
+ .stButton > button { width: 100%; margin-top: 20px; }
285
+ .data-box { padding: 15px; border-radius: 8px; margin: 8px 0; }
286
+ .research-box { background-color: #e1f5fe; border-left: 5px solid #0288d1; }
287
+ .live-box { background-color: #e8f5e9; border-left: 5px solid #2e7d32; }
288
+ </style>
289
+ """, unsafe_allow_html=True)
290
+
291
+ # Sidebar: Display static and live data
292
+ with st.sidebar:
293
+ st.header("📚 Data Sources")
294
+ st.subheader("Static Research")
295
+ for text in research_texts:
296
+ st.markdown(f'<div class="data-box research-box">{text}</div>', unsafe_allow_html=True)
297
+ st.subheader("Live Updates")
298
+ for text in development_texts:
299
+ st.markdown(f'<div class="data-box live-box">{text}</div>', unsafe_allow_html=True)
300
+
301
+ st.title("🤖 Advanced Multi‑Modal Agentic RAG Assistant")
302
+ st.markdown("---")
303
+
304
+ # Query Input (supports future multi‑modal extensions)
305
+ query = st.text_area("Enter your question (or upload an image in future versions):", height=100, placeholder="e.g., What recent breakthroughs in AI are influencing real‑time projects?")
306
+
307
  col1, col2 = st.columns([1, 2])
308
  with col1:
309
+ if st.button("🔍 Get Advanced Answer", use_container_width=True):
310
  if query:
311
+ with st.spinner("Processing your advanced query..."):
312
+ events = process_advanced_question(query, advanced_app, {"configurable": {"thread_id": "advanced1"}})
313
  for event in events:
314
  if 'agent' in event:
315
+ with st.expander("🔄 Agent Analysis", expanded=True):
316
+ st.info(event['agent']['messages'][0].content)
 
317
  elif 'generate' in event:
318
+ st.markdown("### Final Answer:")
319
  st.success(event['generate']['messages'][0].content)
320
  elif 'rewrite' in event:
321
+ st.warning("Query was unclear. Rewriting...")
322
+ st.info(event['rewrite']['messages'][0].content)
323
  else:
324
+ st.warning("⚠️ Please enter a question!")
325
  with col2:
326
  st.markdown("""
327
+ ### How It Works:
328
+ 1. **Advanced Agent**: Uses self-reflection to decide between static or live data.
329
+ 2. **Tool Coordination**: Routes queries to the appropriate retrieval tool.
330
+ 3. **Self‑Reflection & Iteration**: If retrieval fails, the query is rewritten for clarity.
331
+ 4. **Final Synthesis**: Retrieved documents are summarized into a final, clear answer.
332
+
333
+ ### Example Queries:
334
+ - "What new breakthroughs in quantum machine learning are there?"
335
+ - "Provide live updates on the progress of Project X."
336
+ - "Summarize the recent advancements in transformer models."
337
  """)
338
+
339
  if __name__ == "__main__":
340
  main()