mgbam commited on
Commit
1d3eda8
·
verified ·
1 Parent(s): 541be76

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -317
app.py CHANGED
@@ -1,332 +1,50 @@
1
- # app.py
2
- # Multi-Agent Chatbot with LangGraph, DeepSeek-R1, Function Calls, and Agentic RAG
3
- # Using local (in-memory) Chroma to avoid tenant errors.
4
- #
5
- # Ensure that the environment variables OPENAI_API_KEY and DEEP_SEEK_API are set in your HF Space Secrets.
6
-
7
- import os
8
- import re
9
- import logging
10
  import streamlit as st
11
- import requests
12
- from typing import Sequence
13
- from typing_extensions import TypedDict, Annotated
14
-
15
- # LangChain imports
16
- from langchain.embeddings.openai import OpenAIEmbeddings
17
- from langchain.vectorstores import Chroma
18
- from langchain.schema import HumanMessage, AIMessage
19
- from langchain.text_splitter import RecursiveCharacterTextSplitter
20
- from langchain.tools.retriever import create_retriever_tool
21
-
22
- # Chroma in-memory settings
23
- from chromadb.config import Settings
24
-
25
- # LangGraph imports
26
- from langgraph.graph import END, StateGraph, START
27
- from langgraph.prebuilt import ToolNode
28
- from langgraph.graph.message import add_messages
29
-
30
- # Configure logging
31
- logging.basicConfig(level=logging.INFO)
32
- logger = logging.getLogger(__name__)
33
-
34
- # --- Dummy Data Setup ---
35
- research_texts = [
36
- "Research Report: Results of a New AI Model Improving Image Recognition Accuracy to 98%",
37
- "Academic Paper Summary: Why Transformers Became the Mainstream Architecture in Natural Language Processing",
38
- "Latest Trends in Machine Learning Methods Using Quantum Computing"
39
- ]
40
-
41
- development_texts = [
42
- "Project A: UI Design Completed, API Integration in Progress",
43
- "Project B: Testing New Feature X, Bug Fixes Needed",
44
- "Product Y: In the Performance Optimization Stage Before Release"
45
- ]
46
-
47
- # --- Chroma Client Settings (in-memory) ---
48
- client_settings = Settings(
49
- chroma_api_impl="local",
50
- persist_directory=None # Use None for ephemeral in-memory DB; or specify a folder to persist data.
51
- )
52
-
53
- # --- Preprocessing & Embeddings ---
54
- splitter = RecursiveCharacterTextSplitter(chunk_size=100, chunk_overlap=10)
55
- research_docs = splitter.create_documents(research_texts)
56
- development_docs = splitter.create_documents(development_texts)
57
-
58
- embeddings = OpenAIEmbeddings(
59
- model="text-embedding-3-large",
60
- openai_api_key=os.environ.get("OPENAI_API_KEY") # Set this in your HF Secrets.
61
- )
62
-
63
- # Create vector stores using local in-memory Chroma
64
- research_vectorstore = Chroma.from_documents(
65
- documents=research_docs,
66
- embedding=embeddings,
67
- collection_name="research_collection",
68
- client_settings=client_settings
69
- )
70
- development_vectorstore = Chroma.from_documents(
71
- documents=development_docs,
72
- embedding=embeddings,
73
- collection_name="development_collection",
74
- client_settings=client_settings
75
- )
76
-
77
- research_retriever = research_vectorstore.as_retriever()
78
- development_retriever = development_vectorstore.as_retriever()
79
-
80
- research_tool = create_retriever_tool(
81
- research_retriever,
82
- "research_db_tool",
83
- "Search information from the research database."
84
- )
85
- development_tool = create_retriever_tool(
86
- development_retriever,
87
- "development_db_tool",
88
- "Search information from the development database."
89
- )
90
-
91
- tools = [research_tool, development_tool]
92
-
93
- # --- Agent and Workflow Functions ---
94
- # Use only AIMessage and HumanMessage for message types.
95
- class AgentState(TypedDict):
96
- messages: Annotated[Sequence[AIMessage | HumanMessage], add_messages]
97
-
98
- def agent(state: AgentState):
99
- logger.info("Agent invoked")
100
- messages = state["messages"]
101
- user_message = messages[0][1] if isinstance(messages[0], tuple) else messages[0].content
102
 
103
- prompt = f"""Given this user question: "{user_message}"
104
- If it's about research or academic topics, respond EXACTLY in this format:
105
- SEARCH_RESEARCH: <search terms>
106
-
107
- If it's about development status, respond EXACTLY in this format:
108
- SEARCH_DEV: <search terms>
109
-
110
- Otherwise, just answer directly.
111
- """
112
- headers = {
113
- "Accept": "application/json",
114
- "Authorization": f"Bearer {os.environ.get('DEEP_SEEK_API')}",
115
- "Content-Type": "application/json"
116
- }
117
- data = {
118
- "model": "deepseek-chat",
119
- "messages": [{"role": "user", "content": prompt}],
120
- "temperature": 0.7,
121
- "max_tokens": 1024
122
- }
123
- response = requests.post(
124
- "https://api.deepseek.com/v1/chat/completions",
125
- headers=headers,
126
- json=data,
127
- verify=False
128
- )
129
- if response.status_code == 200:
130
- response_text = response.json()['choices'][0]['message']['content']
131
- logger.info(f"DeepSeek response: {response_text}")
132
- if "SEARCH_RESEARCH:" in response_text:
133
- query = response_text.split("SEARCH_RESEARCH:")[1].strip()
134
- results = research_retriever.invoke(query)
135
- return {"messages": [AIMessage(content=f'Action: research_db_tool\n{{"query": "{query}"}}\n\nResults: {str(results)}')]}
136
- elif "SEARCH_DEV:" in response_text:
137
- query = response_text.split("SEARCH_DEV:")[1].strip()
138
- results = development_retriever.invoke(query)
139
- return {"messages": [AIMessage(content=f'Action: development_db_tool\n{{"query": "{query}"}}\n\nResults: {str(results)}')]}
140
- else:
141
- return {"messages": [AIMessage(content=response_text)]}
142
- else:
143
- error_msg = f"DeepSeek API call failed: {response.text}"
144
- logger.error(error_msg)
145
- raise Exception(error_msg)
146
-
147
- def simple_grade_documents(state: AgentState):
148
- last_message = state["messages"][-1]
149
- logger.info(f"Grading message: {last_message.content}")
150
- if "Results: [Document" in last_message.content:
151
- return "generate"
152
- else:
153
- return "rewrite"
154
-
155
- def generate(state: AgentState):
156
- logger.info("Generating final answer")
157
- messages = state["messages"]
158
- question = messages[0].content if not isinstance(messages[0], tuple) else messages[0][1]
159
- last_message = messages[-1]
160
- docs = ""
161
- if "Results: [" in last_message.content:
162
- docs = last_message.content[last_message.content.find("Results: ["):]
163
- headers = {
164
- "Accept": "application/json",
165
- "Authorization": f"Bearer {os.environ.get('DEEP_SEEK_API')}",
166
- "Content-Type": "application/json"
167
- }
168
- prompt = f"""Based on these research documents, summarize the latest advancements in AI:
169
- Question: {question}
170
- Documents: {docs}
171
- Focus on extracting and synthesizing the key findings from the research papers.
172
- """
173
- data = {
174
- "model": "deepseek-chat",
175
- "messages": [{"role": "user", "content": prompt}],
176
- "temperature": 0.7,
177
- "max_tokens": 1024
178
- }
179
- response = requests.post(
180
- "https://api.deepseek.com/v1/chat/completions",
181
- headers=headers,
182
- json=data,
183
- verify=False
184
- )
185
- if response.status_code == 200:
186
- response_text = response.json()['choices'][0]['message']['content']
187
- return {"messages": [AIMessage(content=response_text)]}
188
- else:
189
- error_msg = f"DeepSeek API generate call failed: {response.text}"
190
- logger.error(error_msg)
191
- raise Exception(error_msg)
192
-
193
- def rewrite(state: AgentState):
194
- logger.info("Rewriting question")
195
- original_question = state["messages"][0].content if state["messages"] else "N/A"
196
- headers = {
197
- "Accept": "application/json",
198
- "Authorization": f"Bearer {os.environ.get('DEEP_SEEK_API')}",
199
- "Content-Type": "application/json"
200
- }
201
- data = {
202
- "model": "deepseek-chat",
203
- "messages": [{"role": "user", "content": f"Rewrite this question to be more specific and clearer: {original_question}"}],
204
- "temperature": 0.7,
205
- "max_tokens": 1024
206
- }
207
- response = requests.post(
208
- "https://api.deepseek.com/v1/chat/completions",
209
- headers=headers,
210
- json=data,
211
- verify=False
212
- )
213
- if response.status_code == 200:
214
- response_text = response.json()['choices'][0]['message']['content']
215
- return {"messages": [AIMessage(content=response_text)]}
216
- else:
217
- error_msg = f"DeepSeek API rewrite call failed: {response.text}"
218
- logger.error(error_msg)
219
- raise Exception(error_msg)
220
-
221
- tools_pattern = re.compile(r"Action: .*")
222
- def custom_tools_condition(state: AgentState):
223
- last_message = state["messages"][-1]
224
- if tools_pattern.match(last_message.content):
225
- return "tools"
226
- return END
227
-
228
- # Build the workflow with LangGraph's StateGraph
229
- workflow = StateGraph(AgentState)
230
- workflow.add_node("agent", agent)
231
- retrieve_node = ToolNode(tools)
232
- workflow.add_node("retrieve", retrieve_node)
233
- workflow.add_node("rewrite", rewrite)
234
- workflow.add_node("generate", generate)
235
- workflow.add_edge(START, "agent")
236
- workflow.add_conditional_edges("agent", custom_tools_condition, {"tools": "retrieve", END: END})
237
- workflow.add_conditional_edges("retrieve", simple_grade_documents)
238
- workflow.add_edge("generate", END)
239
- workflow.add_edge("rewrite", "agent")
240
- app_workflow = workflow.compile()
241
-
242
- def process_question(user_question, app, config):
243
- events = []
244
- for event in app.stream({"messages": [("user", user_question)]}, config):
245
- events.append(event)
246
- return events
247
-
248
- # --- Streamlit UI ---
249
  def main():
250
  st.set_page_config(
251
- page_title="Multi-Agent Chatbot",
252
  layout="wide",
253
  initial_sidebar_state="expanded"
254
  )
255
 
256
- # Simple CSS for improved visibility
257
  st.markdown("""
258
- <style>
259
- .stApp {
260
- background-color: #ffffff;
261
- }
262
- .stButton > button {
263
- width: 100%;
264
- margin-top: 20px;
265
- }
266
- .data-box {
267
- padding: 20px;
268
- border-radius: 10px;
269
- margin: 10px 0;
270
- background-color: #f0f0f0;
271
- }
272
- .research-box {
273
- border-left: 5px solid #1976d2;
274
- color: #111 !important;
275
- }
276
- .dev-box {
277
- border-left: 5px solid #43a047;
278
- color: #111 !important;
279
- }
280
- </style>
 
 
 
 
 
281
  """, unsafe_allow_html=True)
282
 
283
- # Sidebar with data
284
- with st.sidebar:
285
- st.header("📚 Available Data")
286
- st.subheader("Research Database")
287
- for text in research_texts:
288
- st.markdown(f'<div class="data-box research-box">{text}</div>', unsafe_allow_html=True)
289
- st.subheader("Development Database")
290
- for text in development_texts:
291
- st.markdown(f'<div class="data-box dev-box">{text}</div>', unsafe_allow_html=True)
292
-
293
- st.title("🤖 Multi-Agent Chatbot")
294
- st.markdown("---")
295
-
296
- query = st.text_area("Enter your question:", height=100, placeholder="e.g., What is the latest advancement in AI research?")
297
-
298
- col1, col2 = st.columns([1, 2])
299
- with col1:
300
- if st.button("🔍 Get Answer", use_container_width=True):
301
- if query:
302
- with st.spinner("Processing your question..."):
303
- events = process_question(query, app_workflow, {"configurable": {"thread_id": "1"}})
304
- for event in events:
305
- # Display processing steps
306
- if 'agent' in event:
307
- with st.expander("🔄 Processing Step", expanded=True):
308
- content = event['agent']['messages'][0].content
309
- if "Results:" in content:
310
- st.markdown("### 📑 Retrieved Documents:")
311
- docs = content[content.find("Results:"):]
312
- st.info(docs)
313
- elif 'generate' in event:
314
- st.markdown("### ✨ Final Answer:")
315
- st.success(event['generate']['messages'][0].content)
316
- else:
317
- st.warning("⚠️ Please enter a question first!")
318
- with col2:
319
- st.markdown("""
320
- ### 🎯 How to Use
321
- 1. Type your question in the text box.
322
- 2. Click "Get Answer" to process.
323
- 3. View retrieved documents and the final answer.
324
 
325
- ### 💡 Example Questions
326
- - What are the latest advancements in AI research?
327
- - What is the status of Project A?
328
- - What are the current trends in machine learning?
329
- """)
330
 
331
  if __name__ == "__main__":
332
  main()
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  def main():
4
  st.set_page_config(
5
+ page_title="Enhanced Contrast Chatbot",
6
  layout="wide",
7
  initial_sidebar_state="expanded"
8
  )
9
 
10
+ # Custom CSS to improve text visibility
11
  st.markdown("""
12
+ <style>
13
+ /* Force a white background for the main app area */
14
+ .stApp {
15
+ background-color: #ffffff !important;
16
+ }
17
+
18
+ /* Make text darker for better contrast */
19
+ html, body, [class^="css"] {
20
+ color: #111111 !important;
21
+ }
22
+
23
+ /* Adjust label text (like "Enter your question") */
24
+ .stTextArea label {
25
+ color: #111111 !important;
26
+ }
27
+
28
+ /* Make sure sidebar text is also dark */
29
+ .css-1v3fvcr {
30
+ color: #111111 !important;
31
+ }
32
+
33
+ /* Example: You can also adjust the background color of
34
+ your "data-box" classes if needed */
35
+ .data-box {
36
+ background-color: #f0f0f0 !important;
37
+ color: #111111 !important;
38
+ }
39
+ </style>
40
  """, unsafe_allow_html=True)
41
 
42
+ st.title("Enhanced Contrast Chatbot")
43
+ st.markdown("Try typing your question below to see if the text is clearer now:")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
 
45
+ user_query = st.text_area("Enter your question here:")
46
+ if st.button("Submit"):
47
+ st.write("Your query:", user_query)
 
 
48
 
49
  if __name__ == "__main__":
50
  main()