mgbam commited on
Commit
fc628b4
·
verified ·
1 Parent(s): c436283

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +113 -134
app.py CHANGED
@@ -9,8 +9,8 @@ from langchain_core.documents import Document
9
  from langgraph.graph import END, StateGraph
10
  from typing_extensions import TypedDict, Annotated
11
  from typing import Sequence, Dict, List, Optional, Any
12
- from langgraph.graph.message import add_messages # Add this import
13
  import chromadb
 
14
  import numpy as np
15
  import os
16
  import streamlit as st
@@ -41,30 +41,24 @@ class ResearchConfig:
41
  MAX_CONCURRENT_REQUESTS = 5
42
  EMBEDDING_DIMENSIONS = 1536
43
  RESEARCH_EMBEDDING = np.random.randn(1536)
 
 
44
 
45
  DOCUMENT_MAP = {
46
  "CV-Transformer Hybrid Architecture": {
47
- "title": "Research Report: CV-Transformer Model (98% Accuracy)",
48
  "content": """
49
- Hybrid architecture combining CNNs and Transformers achieves 98% image recognition accuracy.
50
- Key equation: $f(x) = \text{Attention}(\text{CNN}(x))$
51
- Validation on ImageNet-1k: Top-1 Accuracy 98.2%, Inference Speed 42ms/img
52
  """
53
  },
54
  "Transformer Architecture Analysis": {
55
- "title": "Academic Paper: Transformers in NLP",
56
  "content": """
57
- Self-attention mechanism remains core innovation:
58
  $\text{Attention}(Q, K, V) = \text{softmax}(\frac{QK^T}{\sqrt{d_k}})V$
59
- GLUE Benchmark Score: 92.4%, Training Efficiency: 1.8x vs RNNs
60
- """
61
- },
62
- "Quantum ML Frontiers": {
63
- "title": "Quantum Machine Learning Review",
64
- "content": """
65
- Quantum gradient descent enables faster optimization:
66
- $\theta_{t+1} = \theta_t - \eta \nabla_\theta \mathcal{L}(\theta_t)$
67
- 100x speedup on optimization tasks, 58% energy reduction
68
  """
69
  }
70
  }
@@ -73,11 +67,11 @@ class ResearchConfig:
73
  {context}
74
 
75
  Respond in MARKDOWN with:
76
- 1. **Key Technical Contributions** (bullet points with equations)
77
- 2. **Novel Methodologies** (algorithms with math notation)
78
  3. **Empirical Results** (comparative metrics)
79
- 4. **Applications** (domain-specific implementations)
80
- 5. **Limitations** (theoretical/practical boundaries)
81
 
82
  Include LaTeX equations where applicable."""
83
 
@@ -89,16 +83,36 @@ if not ResearchConfig.DEEPSEEK_API_KEY:
89
  st.stop()
90
 
91
  # ------------------------------
92
- # Document Processing System
93
  # ------------------------------
94
  class QuantumDocumentManager:
95
  def __init__(self):
96
- self.client = chromadb.PersistentClient(path=ResearchConfig.CHROMA_PATH)
 
 
 
 
 
 
97
  self.embeddings = OpenAIEmbeddings(
98
  model="text-embedding-3-large",
99
  dimensions=ResearchConfig.EMBEDDING_DIMENSIONS
100
  )
 
 
 
 
 
 
101
 
 
 
 
 
 
 
 
 
102
  def create_collection(self, document_map: Dict[str, Dict[str, str]], collection_name: str) -> Chroma:
103
  splitter = RecursiveCharacterTextSplitter(
104
  chunk_size=ResearchConfig.CHUNK_SIZE,
@@ -123,6 +137,10 @@ class QuantumDocumentManager:
123
  documents=docs,
124
  embedding=self.embeddings,
125
  collection_name=collection_name,
 
 
 
 
126
  ids=[self._document_id(doc.page_content) for doc in docs]
127
  )
128
 
@@ -131,10 +149,10 @@ class QuantumDocumentManager:
131
 
132
  # Initialize document system
133
  qdm = QuantumDocumentManager()
134
- research_docs = qdm.create_collection(ResearchConfig.DOCUMENT_MAP, "research")
135
 
136
  # ------------------------------
137
- # Intelligent Retrieval System
138
  # ------------------------------
139
  class ResearchRetriever:
140
  def __init__(self):
@@ -150,7 +168,7 @@ class ResearchRetriever:
150
  def retrieve(self, query: str) -> List[Document]:
151
  try:
152
  docs = self.retriever.invoke(query)
153
- if not docs:
154
  raise ValueError("No relevant documents found")
155
  return docs
156
  except Exception as e:
@@ -158,7 +176,7 @@ class ResearchRetriever:
158
  return []
159
 
160
  # ------------------------------
161
- # Robust Processing Core
162
  # ------------------------------
163
  class CognitiveProcessor:
164
  def __init__(self):
@@ -206,7 +224,7 @@ class CognitiveProcessor:
206
  return valid[np.argmax(tech_scores)]
207
 
208
  # ------------------------------
209
- # Validation Workflow Engine
210
  # ------------------------------
211
  class ResearchWorkflow:
212
  def __init__(self):
@@ -216,11 +234,11 @@ class ResearchWorkflow:
216
  self._build_workflow()
217
 
218
  def _build_workflow(self):
219
- self.workflow.add_node("ingest", self.ingest_query)
220
- self.workflow.add_node("retrieve", self.retrieve_documents)
221
- self.workflow.add_node("analyze", self.analyze_content)
222
- self.workflow.add_node("validate", self.validate_output)
223
- self.workflow.add_node("refine", self.refine_results)
224
 
225
  self.workflow.set_entry_point("ingest")
226
  self.workflow.add_edge("ingest", "retrieve")
@@ -235,100 +253,93 @@ class ResearchWorkflow:
235
 
236
  self.app = self.workflow.compile()
237
 
238
- def ingest_query(self, state: AgentState) -> Dict:
239
  try:
240
  query = state["messages"][-1].content
241
  return {
242
- "messages": [AIMessage(content="Query ingested successfully")],
243
- "context": {"raw_query": query},
244
  "metadata": {"timestamp": datetime.now().isoformat()}
245
  }
246
  except Exception as e:
247
  return self._error_state(f"Ingestion Error: {str(e)}")
248
 
249
- def retrieve_documents(self, state: AgentState) -> Dict:
250
  try:
251
- docs = self.retriever.retrieve(state["context"]["raw_query"])
252
- if not docs:
253
- return self._error_state("Document correlation failure - no relevant papers found")
254
  return {
255
- "messages": [AIMessage(content=f"Retrieved {len(docs)} documents")],
256
- "context": {"documents": docs}
257
  }
258
  except Exception as e:
259
  return self._error_state(f"Retrieval Error: {str(e)}")
260
 
261
- def analyze_content(self, state: AgentState) -> Dict:
262
  try:
263
- docs = state["context"]["documents"]
264
- context = "\n\n".join([f"### {doc.metadata['title']}\n{doc.page_content}" for doc in docs])
 
 
265
  prompt = ResearchConfig.ANALYSIS_TEMPLATE.format(context=context)
266
  response = self.processor.process_query(prompt)
267
 
268
  if "error" in response:
269
  raise RuntimeError(response["error"])
 
 
 
270
 
271
- analysis = response['choices'][0]['message']['content']
272
- self._validate_analysis_structure(analysis)
273
-
274
- return {
275
- "messages": [AIMessage(content=analysis)],
276
- "context": {"analysis": analysis}
277
- }
278
  except Exception as e:
279
  return self._error_state(f"Analysis Error: {str(e)}")
280
 
281
- def validate_output(self, state: AgentState) -> Dict:
282
  validation_prompt = f"""Validate this technical analysis:
283
  {state["messages"][-1].content}
284
 
285
  Check for:
286
  1. Mathematical accuracy
287
- 2. Empirical evidence
288
- 3. Technical depth
289
- 4. Logical consistency
290
 
291
  Respond with 'VALID' or 'INVALID'"""
292
 
293
  response = self.processor.process_query(validation_prompt)
294
- content = response.get('choices', [{}])[0].get('message', {}).get('content', '')
295
  return {
296
- "messages": [AIMessage(content=f"{state['messages'][-1].content}\n\n## Validation\n{content}")],
297
- "context": {"valid": "VALID" in content}
298
  }
299
 
300
- def refine_results(self, state: AgentState) -> Dict:
301
  refinement_prompt = f"""Improve this analysis:
302
  {state["messages"][-1].content}
303
 
304
  Focus on:
305
- 1. Enhancing mathematical rigor
306
- 2. Adding empirical references
307
- 3. Strengthening technical arguments"""
308
 
309
  response = self.processor.process_query(refinement_prompt)
310
- return {
311
- "messages": [AIMessage(content=response['choices'][0]['message']['content'])],
312
- "context": state["context"]
313
- }
314
 
315
  def _quality_check(self, state: AgentState) -> str:
316
  return "valid" if state.get("context", {}).get("valid", False) else "invalid"
317
 
318
- def _validate_analysis_structure(self, content: str):
319
  required_sections = [
320
- "Key Technical Contributions",
321
- "Novel Methodologies",
322
  "Empirical Results",
323
  "Applications",
324
  "Limitations"
325
  ]
326
  missing = [s for s in required_sections if f"## {s}" not in content]
327
  if missing:
328
- raise ValueError(f"Missing critical sections: {', '.join(missing)}")
329
 
330
  if not re.search(r"\$.*?\$", content):
331
- raise ValueError("Analysis lacks required mathematical notation")
332
 
333
  def _error_state(self, message: str) -> Dict:
334
  return {
@@ -338,22 +349,22 @@ Focus on:
338
  }
339
 
340
  # ------------------------------
341
- # Research Interface
342
  # ------------------------------
343
  class ResearchInterface:
344
  def __init__(self):
345
  self.workflow = ResearchWorkflow()
346
- self._initialize_interface()
347
 
348
- def _initialize_interface(self):
349
  st.set_page_config(
350
- page_title="NeuroResearch AI",
351
  layout="wide",
352
  initial_sidebar_state="expanded"
353
  )
354
  self._inject_styles()
355
  self._build_sidebar()
356
- self._build_main_interface()
357
 
358
  def _inject_styles(self):
359
  st.markdown("""
@@ -362,103 +373,71 @@ class ResearchInterface:
362
  --primary: #2ecc71;
363
  --secondary: #3498db;
364
  --background: #0a0a0a;
365
- --text: #ecf0f1;
366
  }
367
-
368
  .stApp {
369
  background: var(--background);
370
- color: var(--text);
371
- font-family: 'Roboto', sans-serif;
372
  }
373
-
374
  .stTextArea textarea {
375
  background: #1a1a1a !important;
376
- color: var(--text) !important;
377
- border: 2px solid var(--secondary);
378
- border-radius: 8px;
379
- padding: 1rem;
380
- }
381
-
382
- .stButton>button {
383
- background: linear-gradient(135deg, var(--primary), var(--secondary));
384
- border: none;
385
- border-radius: 8px;
386
- padding: 1rem 2rem;
387
- transition: all 0.3s;
388
  }
389
-
390
- .stButton>button:hover {
391
- transform: translateY(-2px);
392
- box-shadow: 0 4px 12px rgba(46, 204, 113, 0.3);
393
- }
394
-
395
- .stExpander {
396
- background: #1a1a1a;
397
- border: 1px solid #2a2a2a;
398
- border-radius: 8px;
399
- margin: 1rem 0;
400
- }
401
-
402
  code {
403
- color: #2ecc71;
404
  background: #002200;
405
  padding: 2px 4px;
406
- border-radius: 4px;
407
  }
408
  </style>
409
  """, unsafe_allow_html=True)
410
 
411
  def _build_sidebar(self):
412
  with st.sidebar:
413
- st.title("🔍 Research Database")
414
  for key, data in ResearchConfig.DOCUMENT_MAP.items():
415
  with st.expander(data["title"]):
416
- st.markdown(f"```\n{data['content']}\n```")
417
- st.metric("Embedding Dimensions", ResearchConfig.EMBEDDING_DIMENSIONS)
418
- st.metric("Document Chunks", len(research_docs.get()['ids']))
419
-
420
- def _build_main_interface(self):
421
- st.title("🧠 NeuroResearch AI")
422
- query = st.text_area("Research Query:", height=200,
423
- placeholder="Enter technical research question...")
424
 
425
- if st.button("Execute Analysis", type="primary"):
426
  self._execute_analysis(query)
427
 
428
  def _execute_analysis(self, query: str):
429
  try:
430
- with st.spinner("Performing deep technical analysis..."):
431
  result = self.workflow.app.invoke(
432
  {"messages": [HumanMessage(content=query)]}
433
  )
434
 
435
  if result.get("context", {}).get("error"):
436
- self._show_error(result["context"].get("error", "Unknown error"))
437
  else:
438
- self._display_results(result)
439
  except Exception as e:
440
  self._show_error(str(e))
441
 
442
- def _display_results(self, result):
443
- content = result["messages"][-1].content
444
- with st.expander("Technical Analysis Report", expanded=True):
445
- st.markdown(content)
446
 
447
- with st.expander("Source Documents", expanded=False):
448
- for doc in result["context"].get("documents", []):
449
  st.markdown(f"**{doc.metadata['title']}**")
450
  st.code(doc.page_content, language='latex')
451
 
452
  def _show_error(self, message):
453
  st.error(f"""
454
- ⚠️ Analysis Failed: {message}
 
455
 
456
- Troubleshooting Steps:
457
- 1. Check query specificity
458
- 2. Verify document connections
459
- 3. Ensure mathematical notation in sources
460
- 4. Review API key validity
461
- 5. Simplify complex query structures
462
  """)
463
 
464
  if __name__ == "__main__":
 
9
  from langgraph.graph import END, StateGraph
10
  from typing_extensions import TypedDict, Annotated
11
  from typing import Sequence, Dict, List, Optional, Any
 
12
  import chromadb
13
+ from chromadb.config import Settings
14
  import numpy as np
15
  import os
16
  import streamlit as st
 
41
  MAX_CONCURRENT_REQUESTS = 5
42
  EMBEDDING_DIMENSIONS = 1536
43
  RESEARCH_EMBEDDING = np.random.randn(1536)
44
+ TENANT = "research_tenant"
45
+ DATABASE = "ai_papers_db"
46
 
47
  DOCUMENT_MAP = {
48
  "CV-Transformer Hybrid Architecture": {
49
+ "title": "Hybrid CV-Transformer Model (98% Accuracy)",
50
  "content": """
51
+ Combines CNN feature extraction with transformer attention mechanisms.
52
+ Key equation: $f(x) = \text{Softmax}(\frac{QK^T}{\sqrt{d_k}})V$
53
+ ImageNet-1k: 98.2% Top-1 Accuracy, 42ms/inference
54
  """
55
  },
56
  "Transformer Architecture Analysis": {
57
+ "title": "Transformer Architectures in NLP",
58
  "content": """
59
+ Self-attention mechanisms enable parallel processing of sequences.
60
  $\text{Attention}(Q, K, V) = \text{softmax}(\frac{QK^T}{\sqrt{d_k}})V$
61
+ GLUE Score: 92.4%, Training Efficiency: 1.8x vs RNNs
 
 
 
 
 
 
 
 
62
  """
63
  }
64
  }
 
67
  {context}
68
 
69
  Respond in MARKDOWN with:
70
+ 1. **Key Innovations** (mathematical formulations)
71
+ 2. **Methodologies** (algorithms & architectures)
72
  3. **Empirical Results** (comparative metrics)
73
+ 4. **Applications** (industry use cases)
74
+ 5. **Limitations** (theoretical boundaries)
75
 
76
  Include LaTeX equations where applicable."""
77
 
 
83
  st.stop()
84
 
85
  # ------------------------------
86
+ # ChromaDB Document Manager (Fixed)
87
  # ------------------------------
88
  class QuantumDocumentManager:
89
  def __init__(self):
90
+ self.client_settings = Settings(
91
+ chroma_db_impl="duckdb+parquet",
92
+ persist_directory=ResearchConfig.CHROMA_PATH,
93
+ anonymized_telemetry=False
94
+ )
95
+ self.client = chromadb.Client(self.client_settings)
96
+ self._initialize_tenant_db()
97
  self.embeddings = OpenAIEmbeddings(
98
  model="text-embedding-3-large",
99
  dimensions=ResearchConfig.EMBEDDING_DIMENSIONS
100
  )
101
+
102
+ def _initialize_tenant_db(self):
103
+ try:
104
+ self.client.create_tenant(ResearchConfig.TENANT)
105
+ except chromadb.db.base.UniqueConstraintError:
106
+ pass # Tenant exists
107
 
108
+ try:
109
+ self.client.create_database(
110
+ ResearchConfig.DATABASE,
111
+ tenant=ResearchConfig.TENANT
112
+ )
113
+ except chromadb.db.base.UniqueConstraintError:
114
+ pass # Database exists
115
+
116
  def create_collection(self, document_map: Dict[str, Dict[str, str]], collection_name: str) -> Chroma:
117
  splitter = RecursiveCharacterTextSplitter(
118
  chunk_size=ResearchConfig.CHUNK_SIZE,
 
137
  documents=docs,
138
  embedding=self.embeddings,
139
  collection_name=collection_name,
140
+ client=self.client,
141
+ tenant=ResearchConfig.TENANT,
142
+ database=ResearchConfig.DATABASE,
143
+ collection_metadata={"hnsw:space": "cosine"},
144
  ids=[self._document_id(doc.page_content) for doc in docs]
145
  )
146
 
 
149
 
150
  # Initialize document system
151
  qdm = QuantumDocumentManager()
152
+ research_docs = qdm.create_collection(ResearchConfig.DOCUMENT_MAP, "research_papers")
153
 
154
  # ------------------------------
155
+ # Retrieval System
156
  # ------------------------------
157
  class ResearchRetriever:
158
  def __init__(self):
 
168
  def retrieve(self, query: str) -> List[Document]:
169
  try:
170
  docs = self.retriever.invoke(query)
171
+ if len(docs) < 1:
172
  raise ValueError("No relevant documents found")
173
  return docs
174
  except Exception as e:
 
176
  return []
177
 
178
  # ------------------------------
179
+ # Analysis Processor
180
  # ------------------------------
181
  class CognitiveProcessor:
182
  def __init__(self):
 
224
  return valid[np.argmax(tech_scores)]
225
 
226
  # ------------------------------
227
+ # Workflow Engine
228
  # ------------------------------
229
  class ResearchWorkflow:
230
  def __init__(self):
 
234
  self._build_workflow()
235
 
236
  def _build_workflow(self):
237
+ self.workflow.add_node("ingest", self.ingest)
238
+ self.workflow.add_node("retrieve", self.retrieve)
239
+ self.workflow.add_node("analyze", self.analyze)
240
+ self.workflow.add_node("validate", self.validate)
241
+ self.workflow.add_node("refine", self.refine)
242
 
243
  self.workflow.set_entry_point("ingest")
244
  self.workflow.add_edge("ingest", "retrieve")
 
253
 
254
  self.app = self.workflow.compile()
255
 
256
+ def ingest(self, state: AgentState) -> Dict:
257
  try:
258
  query = state["messages"][-1].content
259
  return {
260
+ "messages": [AIMessage(content="Query ingested")],
261
+ "context": {"query": query},
262
  "metadata": {"timestamp": datetime.now().isoformat()}
263
  }
264
  except Exception as e:
265
  return self._error_state(f"Ingestion Error: {str(e)}")
266
 
267
+ def retrieve(self, state: AgentState) -> Dict:
268
  try:
269
+ docs = self.retriever.retrieve(state["context"]["query"])
 
 
270
  return {
271
+ "messages": [AIMessage(content=f"Found {len(docs)} relevant papers")],
272
+ "context": {"docs": docs}
273
  }
274
  except Exception as e:
275
  return self._error_state(f"Retrieval Error: {str(e)}")
276
 
277
+ def analyze(self, state: AgentState) -> Dict:
278
  try:
279
+ context = "\n\n".join([
280
+ f"### {doc.metadata['title']}\n{doc.page_content}"
281
+ for doc in state["context"]["docs"]
282
+ ])
283
  prompt = ResearchConfig.ANALYSIS_TEMPLATE.format(context=context)
284
  response = self.processor.process_query(prompt)
285
 
286
  if "error" in response:
287
  raise RuntimeError(response["error"])
288
+
289
+ content = response['choices'][0]['message']['content']
290
+ self._validate_analysis(content)
291
 
292
+ return {"messages": [AIMessage(content=content)]}
 
 
 
 
 
 
293
  except Exception as e:
294
  return self._error_state(f"Analysis Error: {str(e)}")
295
 
296
+ def validate(self, state: AgentState) -> Dict:
297
  validation_prompt = f"""Validate this technical analysis:
298
  {state["messages"][-1].content}
299
 
300
  Check for:
301
  1. Mathematical accuracy
302
+ 2. Technical depth
303
+ 3. Logical consistency
 
304
 
305
  Respond with 'VALID' or 'INVALID'"""
306
 
307
  response = self.processor.process_query(validation_prompt)
308
+ valid = "VALID" in response.get('choices', [{}])[0].get('message', {}).get('content', '')
309
  return {
310
+ "messages": [AIMessage(content=f"{state['messages'][-1].content}\n\nValidation: {'✅ Valid' if valid else '❌ Invalid'}")],
311
+ "context": {"valid": valid}
312
  }
313
 
314
+ def refine(self, state: AgentState) -> Dict:
315
  refinement_prompt = f"""Improve this analysis:
316
  {state["messages"][-1].content}
317
 
318
  Focus on:
319
+ 1. Mathematical precision
320
+ 2. Technical terminology
321
+ 3. Empirical references"""
322
 
323
  response = self.processor.process_query(refinement_prompt)
324
+ return {"messages": [AIMessage(content=response['choices'][0]['message']['content'])]}
 
 
 
325
 
326
  def _quality_check(self, state: AgentState) -> str:
327
  return "valid" if state.get("context", {}).get("valid", False) else "invalid"
328
 
329
+ def _validate_analysis(self, content: str):
330
  required_sections = [
331
+ "Key Innovations",
332
+ "Methodologies",
333
  "Empirical Results",
334
  "Applications",
335
  "Limitations"
336
  ]
337
  missing = [s for s in required_sections if f"## {s}" not in content]
338
  if missing:
339
+ raise ValueError(f"Missing sections: {', '.join(missing)}")
340
 
341
  if not re.search(r"\$.*?\$", content):
342
+ raise ValueError("Analysis lacks mathematical notation")
343
 
344
  def _error_state(self, message: str) -> Dict:
345
  return {
 
349
  }
350
 
351
  # ------------------------------
352
+ # Streamlit Interface
353
  # ------------------------------
354
  class ResearchInterface:
355
  def __init__(self):
356
  self.workflow = ResearchWorkflow()
357
+ self._initialize()
358
 
359
+ def _initialize(self):
360
  st.set_page_config(
361
+ page_title="AI Research Assistant",
362
  layout="wide",
363
  initial_sidebar_state="expanded"
364
  )
365
  self._inject_styles()
366
  self._build_sidebar()
367
+ self._build_main()
368
 
369
  def _inject_styles(self):
370
  st.markdown("""
 
373
  --primary: #2ecc71;
374
  --secondary: #3498db;
375
  --background: #0a0a0a;
 
376
  }
 
377
  .stApp {
378
  background: var(--background);
379
+ color: white;
 
380
  }
 
381
  .stTextArea textarea {
382
  background: #1a1a1a !important;
383
+ border: 2px solid var(--secondary) !important;
 
 
 
 
 
 
 
 
 
 
 
384
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
385
  code {
386
+ color: var(--primary);
387
  background: #002200;
388
  padding: 2px 4px;
 
389
  }
390
  </style>
391
  """, unsafe_allow_html=True)
392
 
393
  def _build_sidebar(self):
394
  with st.sidebar:
395
+ st.title("🔬 Research Corpus")
396
  for key, data in ResearchConfig.DOCUMENT_MAP.items():
397
  with st.expander(data["title"]):
398
+ st.markdown(f"```latex\n{data['content']}\n```")
399
+ st.metric("Vector DB Size", len(research_docs.get()['ids']))
400
+
401
+ def _build_main(self):
402
+ st.title("🧠 AI Research Analyst")
403
+ query = st.text_area("Research Query:", height=150,
404
+ placeholder="Enter technical question...")
 
405
 
406
+ if st.button("Analyze", type="primary"):
407
  self._execute_analysis(query)
408
 
409
  def _execute_analysis(self, query: str):
410
  try:
411
+ with st.spinner("Analyzing research corpus..."):
412
  result = self.workflow.app.invoke(
413
  {"messages": [HumanMessage(content=query)]}
414
  )
415
 
416
  if result.get("context", {}).get("error"):
417
+ self._show_error(result["context"]["error"])
418
  else:
419
+ self._display_result(result)
420
  except Exception as e:
421
  self._show_error(str(e))
422
 
423
+ def _display_result(self, result):
424
+ with st.expander("Technical Report", expanded=True):
425
+ st.markdown(result["messages"][-1].content)
 
426
 
427
+ with st.expander("Source Excerpts", expanded=False):
428
+ for doc in result["context"].get("docs", []):
429
  st.markdown(f"**{doc.metadata['title']}**")
430
  st.code(doc.page_content, language='latex')
431
 
432
  def _show_error(self, message):
433
  st.error(f"""
434
+ ⚠️ Analysis Failed
435
+ {message}
436
 
437
+ Mitigation Steps:
438
+ 1. Simplify query complexity
439
+ 2. Check document connections
440
+ 3. Verify technical terms
 
 
441
  """)
442
 
443
  if __name__ == "__main__":