mgbam commited on
Commit
9c89976
·
verified ·
1 Parent(s): 8b6c849

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +155 -166
app.py CHANGED
@@ -11,16 +11,16 @@ from langgraph.graph.message import add_messages
11
  from typing_extensions import TypedDict, Annotated
12
  from typing import Sequence, Dict, List, Optional, Any
13
  import chromadb
14
- import numpy as np
15
  import os
16
  import streamlit as st
17
  import requests
18
  import hashlib
19
  import json
20
  import time
 
21
  from concurrent.futures import ThreadPoolExecutor, as_completed
22
  from datetime import datetime
23
- from sklearn.metrics.pairwise import cosine_similarity
24
 
25
  # ------------------------------
26
  # State Schema Definition
@@ -31,7 +31,7 @@ class AgentState(TypedDict):
31
  metadata: Dict[str, Any]
32
 
33
  # ------------------------------
34
- # Enhanced Configuration
35
  # ------------------------------
36
  class ResearchConfig:
37
  DEEPSEEK_API_KEY = os.environ.get("DEEPSEEK_API_KEY")
@@ -40,23 +40,35 @@ class ResearchConfig:
40
  CHUNK_OVERLAP = 64
41
  MAX_CONCURRENT_REQUESTS = 5
42
  EMBEDDING_DIMENSIONS = 1536
43
- RESEARCH_EMBEDDING = np.random.randn(1536) # Pre-computed research domain embedding
44
- ANALYSIS_TEMPLATE = """Analyze these technical documents with quantum-informed rigor:
 
 
 
 
 
 
 
45
  {context}
46
 
47
  Respond with:
48
- 1. Key Technical Innovations (bullet points with mathematical notation)
49
- 2. Novel Methodologies (algorithms & architectures)
50
- 3. Empirical Validation (comparative metrics table)
51
- 4. Industrial Applications (domain-specific use cases)
52
- 5. Current Limitations (with theoretical boundaries)
53
 
54
- Include:
55
- - LaTeX equations for key formulas
56
- - Markdown tables for comparative results
57
- - Quantum complexity analysis where applicable
58
  """
59
 
 
 
 
 
 
 
 
 
60
  # ------------------------------
61
  # Quantum Document Processing
62
  # ------------------------------
@@ -68,51 +80,40 @@ class QuantumDocumentManager:
68
  dimensions=ResearchConfig.EMBEDDING_DIMENSIONS
69
  )
70
 
71
- def create_collection(self, documents: Dict[str, str], collection_name: str) -> Chroma:
72
  splitter = RecursiveCharacterTextSplitter(
73
  chunk_size=ResearchConfig.CHUNK_SIZE,
74
  chunk_overlap=ResearchConfig.CHUNK_OVERLAP,
75
  separators=["\n\n", "\n", "|||"]
76
  )
77
- docs = splitter.create_documents([f"{k}\n{v}" for k,v in documents.items()])
78
  return Chroma.from_documents(
79
  documents=docs,
80
  embedding=self.embeddings,
81
  client=self.client,
82
  collection_name=collection_name,
83
- ids=[self._document_id(doc.page_content) for doc in docs],
84
- metadata=[{"title": k} for k in documents.keys()]
85
  )
86
 
87
  def _document_id(self, content: str) -> str:
88
  return f"{hashlib.sha256(content.encode()).hexdigest()[:16]}-{int(time.time())}"
89
 
90
- # Initialize with enhanced documents
91
- RESEARCH_DOCUMENTS = {
92
- "Quantum ML Frontiers": """
93
- Breakthrough: Quantum Neural Architecture Search (Q-NAS)
94
- - Hybrid quantum-classical networks achieving 98% accuracy on quantum state classification
95
- - Quantum circuit ansatz optimization via differentiable architecture search
96
- - 40% parameter reduction with comparable accuracy (98% vs 96% classical)
97
- - Implemented quantum annealing for hyperparameter optimization
98
- - Published in Nature Quantum Computing 2024
99
- """,
100
-
101
- "Transformer Architecture Analysis": """
102
- Transformers Redefined: Attention with Temporal Encoding
103
- - Temporal attention mechanisms for time-series data (O(n log n) complexity
104
- - Achieved SOTA 92% accuracy on LRA benchmarks
105
- - Developed efficient attention variants with learnable sparse patterns
106
- - Introduced quantum-inspired initialization for attention weights
107
- - Published in NeurIPS 2023
108
- """
109
- }
110
-
111
  qdm = QuantumDocumentManager()
112
- research_docs = qdm.create_collection(RESEARCH_DOCUMENTS, "research")
 
 
 
 
 
 
 
 
 
 
113
 
114
  # ------------------------------
115
- # Enhanced Retrieval System
116
  # ------------------------------
117
  class ResearchRetriever:
118
  def __init__(self):
@@ -120,10 +121,14 @@ class ResearchRetriever:
120
  "research": research_docs.as_retriever(
121
  search_type="mmr",
122
  search_kwargs={
123
- 'k': 6,
124
- 'fetch_k': 25,
125
- 'lambda_mult': 0.9
126
  }
 
 
 
 
127
  )
128
  }
129
 
@@ -136,7 +141,7 @@ class ResearchRetriever:
136
  retriever = ResearchRetriever()
137
 
138
  # ------------------------------
139
- # Quantum Cognitive Processor
140
  # ------------------------------
141
  class CognitiveProcessor:
142
  def __init__(self):
@@ -145,7 +150,7 @@ class CognitiveProcessor:
145
 
146
  def process_query(self, prompt: str) -> Dict:
147
  futures = []
148
- for _ in range(3): # Quantum-inspired redundancy
149
  futures.append(self.executor.submit(
150
  self._execute_api_request,
151
  prompt
@@ -156,9 +161,9 @@ class CognitiveProcessor:
156
  try:
157
  results.append(future.result())
158
  except Exception as e:
159
- st.error(f"Quantum Processing Error: {str(e)}")
160
 
161
- return self._quantum_consensus(results)
162
 
163
  def _execute_api_request(self, prompt: str) -> Dict:
164
  headers = {
@@ -175,39 +180,31 @@ class CognitiveProcessor:
175
  "model": "deepseek-chat",
176
  "messages": [{
177
  "role": "user",
178
- "content": f"Respond as Quantum AI Researcher:\n{prompt}"
179
  }],
180
  "temperature": 0.7,
181
- "max_tokens": 2000,
182
- "top_p": 0.85
183
  },
184
- timeout=60
185
  )
186
  response.raise_for_status()
187
  return response.json()
188
  except requests.exceptions.RequestException as e:
189
  return {"error": str(e)}
190
 
191
- def _quantum_consensus(self, results: List[Dict]) -> Dict:
192
  valid = [r for r in results if "error" not in r]
193
  if not valid:
194
- return {"error": "All quantum circuits failed"}
195
-
196
- # Quantum-inspired selection
197
- contents = [r.get('choices', [{}])[0].get('message', {}).get('content', '') for r in valid]
198
- similarities = cosine_similarity(
199
- [self.embeddings.embed_query(c) for c in contents],
200
- [ResearchConfig.RESEARCH_EMBEDDING]
201
- )
202
- return valid[np.argmax(similarities)]
203
 
204
  # ------------------------------
205
- # Enhanced Research Workflow
206
  # ------------------------------
207
  class ResearchWorkflow:
208
  def __init__(self):
209
  self.processor = CognitiveProcessor()
210
- self.embeddings = OpenAIEmbeddings()
211
  self.workflow = StateGraph(AgentState)
212
  self._build_workflow()
213
 
@@ -223,7 +220,7 @@ class ResearchWorkflow:
223
  self.workflow.add_edge("retrieve", "analyze")
224
  self.workflow.add_conditional_edges(
225
  "analyze",
226
- self._quantum_quality_check,
227
  {"valid": "validate", "invalid": "refine"}
228
  )
229
  self.workflow.add_edge("validate", END)
@@ -235,7 +232,7 @@ class ResearchWorkflow:
235
  try:
236
  query = state["messages"][-1].content
237
  return {
238
- "messages": [AIMessage(content="Quantum ingestion complete")],
239
  "context": {"raw_query": query},
240
  "metadata": {"timestamp": datetime.now().isoformat()}
241
  }
@@ -245,57 +242,25 @@ class ResearchWorkflow:
245
  def retrieve_documents(self, state: AgentState) -> Dict:
246
  try:
247
  query = state["context"]["raw_query"]
248
- domain = self._quantum_domain_detection(query)
249
- docs = retriever.retrieve(query, domain)
250
-
251
- if not docs:
252
- return self._error_state("No relevant documents found")
253
-
254
- filtered_docs = self._quantum_filter(docs, query)
255
  return {
256
- "messages": [AIMessage(content=f"Retrieved {len(filtered_docs)} quantum-relevant documents")],
257
  "context": {
258
- "documents": filtered_docs,
259
- "retrieval_time": time.time(),
260
- "domain": domain
261
  }
262
  }
263
  except Exception as e:
264
  return self._error_state(f"Retrieval Error: {str(e)}")
265
 
266
- def _quantum_domain_detection(self, query: str) -> str:
267
- query_vec = self.embeddings.embed_query(query)
268
- research_sim = cosine_similarity([query_vec], [ResearchConfig.RESEARCH_EMBEDDING])[0][0]
269
- return "research" if research_sim > 0.7 else "development"
270
-
271
- def _quantum_filter(self, docs: List, query: str) -> List:
272
- # Stage 1: Embedding similarity cutoff
273
- filtered = [doc for doc in docs if doc.metadata.get('score', 0) > 0.65]
274
-
275
- # Stage 2: LLM relevance verification
276
- verified = []
277
- for doc in filtered:
278
- response = self.processor.process_query(
279
- f"Document: {doc.page_content}\nQuery: {query}\nRelevant? (yes/no)"
280
- )
281
- if "yes" in response.get('choices', [{}])[0].get('message', {}).get('content', '').lower():
282
- verified.append(doc)
283
- return verified[:3]
284
-
285
  def analyze_content(self, state: AgentState) -> Dict:
286
  try:
287
- if not state["context"].get("documents"):
288
- return self._error_state("No documents for quantum analysis")
289
-
290
  docs = "\n\n".join([d.page_content for d in state["context"]["documents"]])
291
  prompt = ResearchConfig.ANALYSIS_TEMPLATE.format(context=docs)
292
  response = self.processor.process_query(prompt)
293
 
294
  if "error" in response:
295
  return self._error_state(response["error"])
296
-
297
- if not self._check_coherence(response['choices'][0]['message']['content']):
298
- return self._error_state("Analysis failed quantum coherence check")
299
 
300
  return {
301
  "messages": [AIMessage(content=response['choices'][0]['message']['content'])],
@@ -304,31 +269,32 @@ class ResearchWorkflow:
304
  except Exception as e:
305
  return self._error_state(f"Analysis Error: {str(e)}")
306
 
307
- def _check_coherence(self, analysis: str) -> bool:
308
- required = [
309
- "Key Technical Innovations",
310
- "Novel Methodologies",
311
- "Empirical Validation",
312
- "Industrial Applications",
313
- "Current Limitations"
314
- ]
315
- return all(req in analysis for req in required)
316
-
317
  def validate_output(self, state: AgentState) -> Dict:
318
- content = state["messages"][-1].content
 
 
 
 
 
 
 
 
 
 
 
 
319
  return {
320
- "messages": [AIMessage(content=f"{content}\n\n## Quantum Validation\n- Coherence Score: 0.92\n- Error Margin: ±0.05\n- Theta Convergence: ✓")],
321
- "metadata": {"validated": True}
322
  }
323
 
324
  def refine_results(self, state: AgentState) -> Dict:
325
- refinement_prompt = f"""Refine this quantum analysis:
326
  {state["messages"][-1].content}
327
 
328
- Improvements needed:
329
- 1. Enhance mathematical rigor
330
- 2. Add comparative metrics
331
- 3. Strengthen quantum complexity analysis"""
332
 
333
  response = self.processor.process_query(refinement_prompt)
334
  return {
@@ -336,19 +302,19 @@ class ResearchWorkflow:
336
  "context": state["context"]
337
  }
338
 
339
- def _quantum_quality_check(self, state: AgentState) -> str:
340
  content = state["messages"][-1].content
341
- return "valid" if "Coherence Score" in content else "invalid"
342
 
343
  def _error_state(self, message: str) -> Dict:
344
  return {
345
- "messages": [AIMessage(content=f" Quantum Error: {message}")],
346
  "context": {"error": True},
347
  "metadata": {"status": "error"}
348
  }
349
 
350
  # ------------------------------
351
- # Quantum Research Interface
352
  # ------------------------------
353
  class ResearchInterface:
354
  def __init__(self):
@@ -357,7 +323,7 @@ class ResearchInterface:
357
 
358
  def _initialize_interface(self):
359
  st.set_page_config(
360
- page_title="Quantum Research AI",
361
  layout="wide",
362
  initial_sidebar_state="expanded"
363
  )
@@ -369,95 +335,118 @@ class ResearchInterface:
369
  st.markdown("""
370
  <style>
371
  :root {
372
- --quantum-primary: #00f3ff;
373
- --neon-secondary: #ff00ff;
374
- --dark-bg: #000a1f;
 
375
  }
376
 
377
  .stApp {
378
- background: var(--dark-bg);
379
- color: white;
380
- font-family: 'Courier New', monospace;
381
  }
382
 
383
  .stTextArea textarea {
384
- background: #001233 !important;
385
- border: 2px solid var(--quantum-primary);
386
- color: white !important;
387
  border-radius: 8px;
388
  padding: 1rem;
389
  }
390
 
391
  .stButton>button {
392
- background: linear-gradient(45deg, #00f3ff, #ff00ff);
393
  border: none;
394
  border-radius: 8px;
395
  padding: 1rem 2rem;
396
  transition: all 0.3s;
397
  }
398
 
399
- .stMarkdown h1, .stMarkdown h2 {
400
- color: var(--quantum-primary);
401
- border-bottom: 2px solid var(--neon-secondary);
 
 
 
 
 
 
 
402
  }
403
  </style>
404
  """, unsafe_allow_html=True)
405
 
406
  def _build_sidebar(self):
407
  with st.sidebar:
408
- st.title("🔮 Quantum Knowledge Base")
409
- for title, content in RESEARCH_DOCUMENTS.items():
410
- with st.expander(f"⚛️ {title}"):
411
- st.markdown(f"```quantum\n{content}\n```")
 
 
 
 
 
412
 
413
  def _build_main_interface(self):
414
- st.title("⚛️ Quantum Research Nexus")
415
- query = st.text_area("Enter Quantum Research Query:", height=150,
416
- placeholder="Input quantum computing or ML research question...")
417
 
418
- if st.button("Execute Quantum Analysis", type="primary"):
419
- self._execute_quantum_analysis(query)
420
 
421
- def _execute_quantum_analysis(self, query: str):
422
  try:
423
- with st.spinner("Entangling quantum states..."):
424
  results = self.workflow.app.stream(
425
  {"messages": [HumanMessage(content=query)], "context": {}, "metadata": {}}
426
  )
427
 
428
  for event in results:
429
- self._render_quantum_event(event)
430
 
431
- st.success("🌀 Quantum Analysis Collapsed Successfully")
432
  except Exception as e:
433
- st.error(f"""Quantum Decoherence Detected:
434
  {str(e)}
435
- Mitigation Strategies:
436
- 1. Simplify query complexity
437
- 2. Increase error correction rounds
438
- 3. Check quantum resource availability""")
439
 
440
- def _render_quantum_event(self, event: Dict):
441
- if 'retrieve' in event:
 
 
 
 
442
  with st.container():
443
  docs = event['retrieve']['context']['documents']
444
- st.info(f"📡 Retrieved {len(docs)} quantum documents")
445
- with st.expander("Quantum Document Entanglement", expanded=False):
446
- for doc in docs:
447
- st.markdown(f"### {doc.metadata['title']}")
448
- st.markdown(f"```quantum\n{doc.page_content}\n```")
449
 
450
  elif 'analyze' in event:
451
  with st.container():
452
  content = event['analyze']['messages'][0].content
453
- with st.expander("Quantum Analysis Matrix", expanded=True):
454
  st.markdown(content)
455
 
456
  elif 'validate' in event:
457
  with st.container():
458
  content = event['validate']['messages'][0].content
459
- st.success(" Quantum State Validated")
460
- st.markdown(content)
 
 
 
 
 
 
461
 
462
  if __name__ == "__main__":
463
  ResearchInterface()
 
11
  from typing_extensions import TypedDict, Annotated
12
  from typing import Sequence, Dict, List, Optional, Any
13
  import chromadb
14
+ import re
15
  import os
16
  import streamlit as st
17
  import requests
18
  import hashlib
19
  import json
20
  import time
21
+ from langchain.tools.retriever import create_retriever_tool
22
  from concurrent.futures import ThreadPoolExecutor, as_completed
23
  from datetime import datetime
 
24
 
25
  # ------------------------------
26
  # State Schema Definition
 
31
  metadata: Dict[str, Any]
32
 
33
  # ------------------------------
34
+ # Configuration
35
  # ------------------------------
36
  class ResearchConfig:
37
  DEEPSEEK_API_KEY = os.environ.get("DEEPSEEK_API_KEY")
 
40
  CHUNK_OVERLAP = 64
41
  MAX_CONCURRENT_REQUESTS = 5
42
  EMBEDDING_DIMENSIONS = 1536
43
+ DOCUMENT_MAP = {
44
+ "Research Report: Results of a New AI Model Improving Image Recognition Accuracy to 98%":
45
+ "CV-Transformer Hybrid Architecture",
46
+ "Academic Paper Summary: Why Transformers Became the Mainstream Architecture in Natural Language Processing":
47
+ "Transformer Architecture Analysis",
48
+ "Latest Trends in Machine Learning Methods Using Quantum Computing":
49
+ "Quantum ML Frontiers"
50
+ }
51
+ ANALYSIS_TEMPLATE = """Analyze these technical documents with scientific rigor:
52
  {context}
53
 
54
  Respond with:
55
+ 1. Key Technical Contributions (bullet points)
56
+ 2. Novel Methodologies
57
+ 3. Empirical Results (with metrics)
58
+ 4. Potential Applications
59
+ 5. Limitations & Future Directions
60
 
61
+ Format: Markdown with LaTeX mathematical notation where applicable
 
 
 
62
  """
63
 
64
+ # Validation
65
+ if not ResearchConfig.DEEPSEEK_API_KEY:
66
+ st.error("""**Research Portal Configuration Required**
67
+ 1. Obtain DeepSeek API key: [platform.deepseek.com](https://platform.deepseek.com/)
68
+ 2. Configure secret: `DEEPSEEK_API_KEY` in Space settings
69
+ 3. Rebuild deployment""")
70
+ st.stop()
71
+
72
  # ------------------------------
73
  # Quantum Document Processing
74
  # ------------------------------
 
80
  dimensions=ResearchConfig.EMBEDDING_DIMENSIONS
81
  )
82
 
83
+ def create_collection(self, documents: List[str], collection_name: str) -> Chroma:
84
  splitter = RecursiveCharacterTextSplitter(
85
  chunk_size=ResearchConfig.CHUNK_SIZE,
86
  chunk_overlap=ResearchConfig.CHUNK_OVERLAP,
87
  separators=["\n\n", "\n", "|||"]
88
  )
89
+ docs = splitter.create_documents(documents)
90
  return Chroma.from_documents(
91
  documents=docs,
92
  embedding=self.embeddings,
93
  client=self.client,
94
  collection_name=collection_name,
95
+ ids=[self._document_id(doc.page_content) for doc in docs]
 
96
  )
97
 
98
  def _document_id(self, content: str) -> str:
99
  return f"{hashlib.sha256(content.encode()).hexdigest()[:16]}-{int(time.time())}"
100
 
101
+ # Initialize document collections
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
  qdm = QuantumDocumentManager()
103
+ research_docs = qdm.create_collection([
104
+ "Research Report: Results of a New AI Model Improving Image Recognition Accuracy to 98%",
105
+ "Academic Paper Summary: Why Transformers Became the Mainstream Architecture in Natural Language Processing",
106
+ "Latest Trends in Machine Learning Methods Using Quantum Computing"
107
+ ], "research")
108
+
109
+ development_docs = qdm.create_collection([
110
+ "Project A: UI Design Completed, API Integration in Progress",
111
+ "Project B: Testing New Feature X, Bug Fixes Needed",
112
+ "Product Y: In the Performance Optimization Stage Before Release"
113
+ ], "development")
114
 
115
  # ------------------------------
116
+ # Advanced Retrieval System
117
  # ------------------------------
118
  class ResearchRetriever:
119
  def __init__(self):
 
121
  "research": research_docs.as_retriever(
122
  search_type="mmr",
123
  search_kwargs={
124
+ 'k': 4,
125
+ 'fetch_k': 20,
126
+ 'lambda_mult': 0.85
127
  }
128
+ ),
129
+ "development": development_docs.as_retriever(
130
+ search_type="similarity",
131
+ search_kwargs={'k': 3}
132
  )
133
  }
134
 
 
141
  retriever = ResearchRetriever()
142
 
143
  # ------------------------------
144
+ # Cognitive Processing Unit
145
  # ------------------------------
146
  class CognitiveProcessor:
147
  def __init__(self):
 
150
 
151
  def process_query(self, prompt: str) -> Dict:
152
  futures = []
153
+ for _ in range(3): # Triple redundancy
154
  futures.append(self.executor.submit(
155
  self._execute_api_request,
156
  prompt
 
161
  try:
162
  results.append(future.result())
163
  except Exception as e:
164
+ st.error(f"Processing Error: {str(e)}")
165
 
166
+ return self._consensus_check(results)
167
 
168
  def _execute_api_request(self, prompt: str) -> Dict:
169
  headers = {
 
180
  "model": "deepseek-chat",
181
  "messages": [{
182
  "role": "user",
183
+ "content": f"Respond as Senior AI Researcher:\n{prompt}"
184
  }],
185
  "temperature": 0.7,
186
+ "max_tokens": 1500,
187
+ "top_p": 0.9
188
  },
189
+ timeout=45
190
  )
191
  response.raise_for_status()
192
  return response.json()
193
  except requests.exceptions.RequestException as e:
194
  return {"error": str(e)}
195
 
196
+ def _consensus_check(self, results: List[Dict]) -> Dict:
197
  valid = [r for r in results if "error" not in r]
198
  if not valid:
199
+ return {"error": "All API requests failed"}
200
+ return max(valid, key=lambda x: len(x.get('choices', [{}])[0].get('message', {}).get('content', '')))
 
 
 
 
 
 
 
201
 
202
  # ------------------------------
203
+ # Research Workflow Engine
204
  # ------------------------------
205
  class ResearchWorkflow:
206
  def __init__(self):
207
  self.processor = CognitiveProcessor()
 
208
  self.workflow = StateGraph(AgentState)
209
  self._build_workflow()
210
 
 
220
  self.workflow.add_edge("retrieve", "analyze")
221
  self.workflow.add_conditional_edges(
222
  "analyze",
223
+ self._quality_check,
224
  {"valid": "validate", "invalid": "refine"}
225
  )
226
  self.workflow.add_edge("validate", END)
 
232
  try:
233
  query = state["messages"][-1].content
234
  return {
235
+ "messages": [AIMessage(content="Query ingested successfully")],
236
  "context": {"raw_query": query},
237
  "metadata": {"timestamp": datetime.now().isoformat()}
238
  }
 
242
  def retrieve_documents(self, state: AgentState) -> Dict:
243
  try:
244
  query = state["context"]["raw_query"]
245
+ docs = retriever.retrieve(query, "research")
 
 
 
 
 
 
246
  return {
247
+ "messages": [AIMessage(content=f"Retrieved {len(docs)} documents")],
248
  "context": {
249
+ "documents": docs,
250
+ "retrieval_time": time.time()
 
251
  }
252
  }
253
  except Exception as e:
254
  return self._error_state(f"Retrieval Error: {str(e)}")
255
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
256
  def analyze_content(self, state: AgentState) -> Dict:
257
  try:
 
 
 
258
  docs = "\n\n".join([d.page_content for d in state["context"]["documents"]])
259
  prompt = ResearchConfig.ANALYSIS_TEMPLATE.format(context=docs)
260
  response = self.processor.process_query(prompt)
261
 
262
  if "error" in response:
263
  return self._error_state(response["error"])
 
 
 
264
 
265
  return {
266
  "messages": [AIMessage(content=response['choices'][0]['message']['content'])],
 
269
  except Exception as e:
270
  return self._error_state(f"Analysis Error: {str(e)}")
271
 
 
 
 
 
 
 
 
 
 
 
272
  def validate_output(self, state: AgentState) -> Dict:
273
+ analysis = state["messages"][-1].content
274
+ validation_prompt = f"""Validate research analysis:
275
+ {analysis}
276
+
277
+ Check for:
278
+ 1. Technical accuracy
279
+ 2. Citation support
280
+ 3. Logical consistency
281
+ 4. Methodological soundness
282
+
283
+ Respond with 'VALID' or 'INVALID'"""
284
+
285
+ response = self.processor.process_query(validation_prompt)
286
  return {
287
+ "messages": [AIMessage(content=analysis + f"\n\nValidation: {response.get('choices', [{}])[0].get('message', {}).get('content', '')}")]
 
288
  }
289
 
290
  def refine_results(self, state: AgentState) -> Dict:
291
+ refinement_prompt = f"""Refine this analysis:
292
  {state["messages"][-1].content}
293
 
294
+ Improve:
295
+ 1. Technical precision
296
+ 2. Empirical grounding
297
+ 3. Theoretical coherence"""
298
 
299
  response = self.processor.process_query(refinement_prompt)
300
  return {
 
302
  "context": state["context"]
303
  }
304
 
305
+ def _quality_check(self, state: AgentState) -> str:
306
  content = state["messages"][-1].content
307
+ return "valid" if "VALID" in content else "invalid"
308
 
309
  def _error_state(self, message: str) -> Dict:
310
  return {
311
+ "messages": [AIMessage(content=f" {message}")],
312
  "context": {"error": True},
313
  "metadata": {"status": "error"}
314
  }
315
 
316
  # ------------------------------
317
+ # Research Interface
318
  # ------------------------------
319
  class ResearchInterface:
320
  def __init__(self):
 
323
 
324
  def _initialize_interface(self):
325
  st.set_page_config(
326
+ page_title="NeuroResearch AI",
327
  layout="wide",
328
  initial_sidebar_state="expanded"
329
  )
 
335
  st.markdown("""
336
  <style>
337
  :root {
338
+ --primary: #2ecc71;
339
+ --secondary: #3498db;
340
+ --background: #0a0a0a;
341
+ --text: #ecf0f1;
342
  }
343
 
344
  .stApp {
345
+ background: var(--background);
346
+ color: var(--text);
347
+ font-family: 'Roboto', sans-serif;
348
  }
349
 
350
  .stTextArea textarea {
351
+ background: #1a1a1a !important;
352
+ color: var(--text) !important;
353
+ border: 2px solid var(--secondary);
354
  border-radius: 8px;
355
  padding: 1rem;
356
  }
357
 
358
  .stButton>button {
359
+ background: linear-gradient(135deg, var(--primary), var(--secondary));
360
  border: none;
361
  border-radius: 8px;
362
  padding: 1rem 2rem;
363
  transition: all 0.3s;
364
  }
365
 
366
+ .stButton>button:hover {
367
+ transform: translateY(-2px);
368
+ box-shadow: 0 4px 12px rgba(46, 204, 113, 0.3);
369
+ }
370
+
371
+ .stExpander {
372
+ background: #1a1a1a;
373
+ border: 1px solid #2a2a2a;
374
+ border-radius: 8px;
375
+ margin: 1rem 0;
376
  }
377
  </style>
378
  """, unsafe_allow_html=True)
379
 
380
  def _build_sidebar(self):
381
  with st.sidebar:
382
+ st.title("🔍 Research Database")
383
+ st.subheader("Technical Papers")
384
+ for title, short in ResearchConfig.DOCUMENT_MAP.items():
385
+ with st.expander(short):
386
+ st.markdown(f"```\n{title}\n```")
387
+
388
+ st.subheader("Analysis Metrics")
389
+ st.metric("Vector Collections", 2)
390
+ st.metric("Embedding Dimensions", ResearchConfig.EMBEDDING_DIMENSIONS)
391
 
392
  def _build_main_interface(self):
393
+ st.title("🧠 NeuroResearch AI")
394
+ query = st.text_area("Research Query:", height=200,
395
+ placeholder="Enter technical research question...")
396
 
397
+ if st.button("Execute Analysis", type="primary"):
398
+ self._execute_analysis(query)
399
 
400
+ def _execute_analysis(self, query: str):
401
  try:
402
+ with st.spinner("Initializing Quantum Analysis..."):
403
  results = self.workflow.app.stream(
404
  {"messages": [HumanMessage(content=query)], "context": {}, "metadata": {}}
405
  )
406
 
407
  for event in results:
408
+ self._render_event(event)
409
 
410
+ st.success(" Analysis Completed Successfully")
411
  except Exception as e:
412
+ st.error(f"""**Analysis Failed**
413
  {str(e)}
414
+ Potential issues:
415
+ - Complex query structure
416
+ - Document correlation failure
417
+ - Temporal processing constraints""")
418
 
419
+ def _render_event(self, event: Dict):
420
+ if 'ingest' in event:
421
+ with st.container():
422
+ st.success("✅ Query Ingested")
423
+
424
+ elif 'retrieve' in event:
425
  with st.container():
426
  docs = event['retrieve']['context']['documents']
427
+ st.info(f"📚 Retrieved {len(docs)} documents")
428
+ with st.expander("View Retrieved Documents", expanded=False):
429
+ for i, doc in enumerate(docs, 1):
430
+ st.markdown(f"**Document {i}**")
431
+ st.code(doc.page_content, language='text')
432
 
433
  elif 'analyze' in event:
434
  with st.container():
435
  content = event['analyze']['messages'][0].content
436
+ with st.expander("Technical Analysis Report", expanded=True):
437
  st.markdown(content)
438
 
439
  elif 'validate' in event:
440
  with st.container():
441
  content = event['validate']['messages'][0].content
442
+ if "VALID" in content:
443
+ st.success("✅ Validation Passed")
444
+ with st.expander("View Validated Analysis", expanded=True):
445
+ st.markdown(content.split("Validation:")[0])
446
+ else:
447
+ st.warning("⚠️ Validation Issues Detected")
448
+ with st.expander("View Validation Details", expanded=True):
449
+ st.markdown(content)
450
 
451
  if __name__ == "__main__":
452
  ResearchInterface()