mgbam commited on
Commit
b6b8274
·
verified ·
1 Parent(s): 6a37e47

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +75 -38
app.py CHANGED
@@ -1,5 +1,6 @@
1
  # ------------------------------
2
- # Enhanced NeuroResearch AI System with Refinement Counter and Increased Recursion Limit
 
3
  # ------------------------------
4
  import logging
5
  import os
@@ -96,7 +97,7 @@ class QuantumDocumentManager:
96
  self.client = chromadb.PersistentClient(path=ResearchConfig.CHROMA_PATH)
97
  logger.info("Initialized PersistentClient for Chroma.")
98
  except Exception as e:
99
- logger.error(f"Error initializing PersistentClient: {e}")
100
  self.client = chromadb.Client() # Fallback to in-memory client
101
  self.embeddings = OpenAIEmbeddings(
102
  model="text-embedding-3-large",
@@ -116,7 +117,7 @@ class QuantumDocumentManager:
116
  docs = splitter.create_documents(documents)
117
  logger.info(f"Created {len(docs)} document chunks for collection '{collection_name}'.")
118
  except Exception as e:
119
- logger.error(f"Error splitting documents: {e}")
120
  raise e
121
 
122
  return Chroma.from_documents(
@@ -166,7 +167,7 @@ class ResearchRetriever:
166
  )
167
  logger.info("Initialized retrievers for research and development domains.")
168
  except Exception as e:
169
- logger.error(f"Error initializing retrievers: {e}")
170
  raise e
171
 
172
  def retrieve(self, query: str, domain: str) -> List[Any]:
@@ -182,7 +183,7 @@ class ResearchRetriever:
182
  logger.warning(f"Domain '{domain}' not recognized.")
183
  return []
184
  except Exception as e:
185
- logger.error(f"Retrieval error for domain '{domain}': {e}")
186
  return []
187
 
188
  retriever = ResearchRetriever()
@@ -212,7 +213,7 @@ class CognitiveProcessor:
212
  try:
213
  results.append(future.result())
214
  except Exception as e:
215
- logger.error(f"Error in API request: {e}")
216
  st.error(f"Processing Error: {str(e)}")
217
 
218
  return self._consensus_check(results)
@@ -247,7 +248,7 @@ class CognitiveProcessor:
247
  logger.info("DeepSeek API request successful.")
248
  return response.json()
249
  except requests.exceptions.RequestException as e:
250
- logger.error(f"DeepSeek API request failed: {e}")
251
  return {"error": str(e)}
252
 
253
  def _consensus_check(self, results: List[Dict]) -> Dict:
@@ -294,12 +295,12 @@ class ResearchWorkflow:
294
 
295
  def ingest_query(self, state: AgentState) -> Dict:
296
  """
297
- Ingests the research query and initializes the refinement counter.
298
  """
299
  try:
300
  query = state["messages"][-1].content
301
- # Initialize context with raw query and refinement counter
302
- new_context = {"raw_query": query, "refine_count": 0}
303
  logger.info("Query ingested.")
304
  return {
305
  "messages": [AIMessage(content="Query ingested successfully")],
@@ -307,6 +308,7 @@ class ResearchWorkflow:
307
  "metadata": {"timestamp": datetime.now().isoformat()}
308
  }
309
  except Exception as e:
 
310
  return self._error_state(f"Ingestion Error: {str(e)}")
311
 
312
  def retrieve_documents(self, state: AgentState) -> Dict:
@@ -319,9 +321,10 @@ class ResearchWorkflow:
319
  logger.info(f"Retrieved {len(docs)} documents for query.")
320
  return {
321
  "messages": [AIMessage(content=f"Retrieved {len(docs)} documents")],
322
- "context": {"documents": docs, "retrieval_time": time.time(), "refine_count": state["context"].get("refine_count", 0)}
323
  }
324
  except Exception as e:
 
325
  return self._error_state(f"Retrieval Error: {str(e)}")
326
 
327
  def analyze_content(self, state: AgentState) -> Dict:
@@ -334,49 +337,83 @@ class ResearchWorkflow:
334
  prompt = ResearchConfig.ANALYSIS_TEMPLATE.format(context=docs_text)
335
  response = self.processor.process_query(prompt)
336
  if "error" in response:
 
337
  return self._error_state(response["error"])
338
  logger.info("Content analysis completed.")
339
  return {
340
  "messages": [AIMessage(content=response.get('choices', [{}])[0].get('message', {}).get('content', ''))],
341
- "context": {"analysis": response, "refine_count": state["context"].get("refine_count", 0)}
342
  }
343
  except Exception as e:
 
344
  return self._error_state(f"Analysis Error: {str(e)}")
345
 
346
  def validate_output(self, state: AgentState) -> Dict:
347
  """
348
  Validates the technical analysis report.
349
  """
350
- analysis = state["messages"][-1].content
351
- validation_prompt = (
352
- f"Validate research analysis:\n{analysis}\n\n"
353
- "Check for:\n1. Technical accuracy\n2. Citation support\n3. Logical consistency\n4. Methodological soundness\n\n"
354
- "Respond with 'VALID' or 'INVALID'"
355
- )
356
- response = self.processor.process_query(validation_prompt)
357
- logger.info("Output validation completed.")
358
- return {
359
- "messages": [AIMessage(content=analysis + f"\n\nValidation: {response.get('choices', [{}])[0].get('message', {}).get('content', '')}")]
360
- }
 
 
 
 
361
 
362
  def refine_results(self, state: AgentState) -> Dict:
363
  """
364
  Refines the analysis report if validation fails.
365
- Increments the refinement counter to limit infinite loops.
 
366
  """
367
- current_count = state["context"].get("refine_count", 0)
368
- state["context"]["refine_count"] = current_count + 1
369
- logger.info(f"Refinement iteration: {state['context']['refine_count']}")
370
- refinement_prompt = (
371
- f"Refine this analysis:\n{state['messages'][-1].content}\n\n"
372
- "Improve:\n1. Technical precision\n2. Empirical grounding\n3. Theoretical coherence"
373
- )
374
- response = self.processor.process_query(refinement_prompt)
375
- logger.info("Refinement completed.")
376
- return {
377
- "messages": [AIMessage(content=response.get('choices', [{}])[0].get('message', {}).get('content', ''))],
378
- "context": state["context"]
379
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
380
 
381
  def _quality_check(self, state: AgentState) -> str:
382
  """
@@ -502,7 +539,7 @@ class ResearchInterface:
502
  self._render_event(event)
503
  st.success("✅ Analysis Completed Successfully")
504
  except Exception as e:
505
- logger.error(f"Workflow execution failed: {e}")
506
  st.error(
507
  f"""**Analysis Failed**
508
  {str(e)}
 
1
  # ------------------------------
2
+ # Enhanced NeuroResearch AI System with Refinement Counter,
3
+ # Dynamic Difficulty Gradient, and Meta-Refinement Inspired by LADDER
4
  # ------------------------------
5
  import logging
6
  import os
 
97
  self.client = chromadb.PersistentClient(path=ResearchConfig.CHROMA_PATH)
98
  logger.info("Initialized PersistentClient for Chroma.")
99
  except Exception as e:
100
+ logger.exception("Error initializing PersistentClient; falling back to in-memory client.")
101
  self.client = chromadb.Client() # Fallback to in-memory client
102
  self.embeddings = OpenAIEmbeddings(
103
  model="text-embedding-3-large",
 
117
  docs = splitter.create_documents(documents)
118
  logger.info(f"Created {len(docs)} document chunks for collection '{collection_name}'.")
119
  except Exception as e:
120
+ logger.exception("Error during document splitting.")
121
  raise e
122
 
123
  return Chroma.from_documents(
 
167
  )
168
  logger.info("Initialized retrievers for research and development domains.")
169
  except Exception as e:
170
+ logger.exception("Error initializing retrievers.")
171
  raise e
172
 
173
  def retrieve(self, query: str, domain: str) -> List[Any]:
 
183
  logger.warning(f"Domain '{domain}' not recognized.")
184
  return []
185
  except Exception as e:
186
+ logger.exception(f"Retrieval error for domain '{domain}'.")
187
  return []
188
 
189
  retriever = ResearchRetriever()
 
213
  try:
214
  results.append(future.result())
215
  except Exception as e:
216
+ logger.exception("Error during API request execution.")
217
  st.error(f"Processing Error: {str(e)}")
218
 
219
  return self._consensus_check(results)
 
248
  logger.info("DeepSeek API request successful.")
249
  return response.json()
250
  except requests.exceptions.RequestException as e:
251
+ logger.exception("DeepSeek API request failed.")
252
  return {"error": str(e)}
253
 
254
  def _consensus_check(self, results: List[Dict]) -> Dict:
 
295
 
296
  def ingest_query(self, state: AgentState) -> Dict:
297
  """
298
+ Ingests the research query and initializes the refinement counter and history.
299
  """
300
  try:
301
  query = state["messages"][-1].content
302
+ # Initialize context with raw query, refinement counter, and an empty history list
303
+ new_context = {"raw_query": query, "refine_count": 0, "refinement_history": []}
304
  logger.info("Query ingested.")
305
  return {
306
  "messages": [AIMessage(content="Query ingested successfully")],
 
308
  "metadata": {"timestamp": datetime.now().isoformat()}
309
  }
310
  except Exception as e:
311
+ logger.exception("Error during query ingestion.")
312
  return self._error_state(f"Ingestion Error: {str(e)}")
313
 
314
  def retrieve_documents(self, state: AgentState) -> Dict:
 
321
  logger.info(f"Retrieved {len(docs)} documents for query.")
322
  return {
323
  "messages": [AIMessage(content=f"Retrieved {len(docs)} documents")],
324
+ "context": {"documents": docs, "retrieval_time": time.time(), "refine_count": state["context"].get("refine_count", 0), "refinement_history": state["context"].get("refinement_history", [])}
325
  }
326
  except Exception as e:
327
+ logger.exception("Error during document retrieval.")
328
  return self._error_state(f"Retrieval Error: {str(e)}")
329
 
330
  def analyze_content(self, state: AgentState) -> Dict:
 
337
  prompt = ResearchConfig.ANALYSIS_TEMPLATE.format(context=docs_text)
338
  response = self.processor.process_query(prompt)
339
  if "error" in response:
340
+ logger.error("DeepSeek response error during analysis.")
341
  return self._error_state(response["error"])
342
  logger.info("Content analysis completed.")
343
  return {
344
  "messages": [AIMessage(content=response.get('choices', [{}])[0].get('message', {}).get('content', ''))],
345
+ "context": {"analysis": response, "refine_count": state["context"].get("refine_count", 0), "refinement_history": state["context"].get("refinement_history", [])}
346
  }
347
  except Exception as e:
348
+ logger.exception("Error during content analysis.")
349
  return self._error_state(f"Analysis Error: {str(e)}")
350
 
351
  def validate_output(self, state: AgentState) -> Dict:
352
  """
353
  Validates the technical analysis report.
354
  """
355
+ try:
356
+ analysis = state["messages"][-1].content
357
+ validation_prompt = (
358
+ f"Validate research analysis:\n{analysis}\n\n"
359
+ "Check for:\n1. Technical accuracy\n2. Citation support\n3. Logical consistency\n4. Methodological soundness\n\n"
360
+ "Respond with 'VALID' or 'INVALID'"
361
+ )
362
+ response = self.processor.process_query(validation_prompt)
363
+ logger.info("Output validation completed.")
364
+ return {
365
+ "messages": [AIMessage(content=analysis + f"\n\nValidation: {response.get('choices', [{}])[0].get('message', {}).get('content', '')}")]
366
+ }
367
+ except Exception as e:
368
+ logger.exception("Error during output validation.")
369
+ return self._error_state(f"Validation Error: {str(e)}")
370
 
371
  def refine_results(self, state: AgentState) -> Dict:
372
  """
373
  Refines the analysis report if validation fails.
374
+ Implements an innovative meta-refinement mechanism inspired by LADDER.
375
+ It tracks refinement history and uses a dynamic difficulty gradient.
376
  """
377
+ try:
378
+ current_count = state["context"].get("refine_count", 0)
379
+ state["context"]["refine_count"] = current_count + 1
380
+ # Append current analysis to refinement history
381
+ refinement_history = state["context"].setdefault("refinement_history", [])
382
+ current_analysis = state["messages"][-1].content
383
+ refinement_history.append(current_analysis)
384
+ # Compute a "difficulty level" (from 3 to 0) based on refinement count
385
+ difficulty_level = max(0, 3 - state["context"]["refine_count"])
386
+ logger.info(f"Refinement iteration: {state['context']['refine_count']}, Difficulty level: {difficulty_level}")
387
+
388
+ # If refinement count exceeds threshold, perform meta-refinement by summarizing the history
389
+ if state["context"]["refine_count"] >= 3:
390
+ meta_prompt = (
391
+ "You are given the following series of refinement outputs:\n" +
392
+ "\n---\n".join(refinement_history) +
393
+ "\n\nSummarize the above into a final, concise, and high-quality technical analysis report. Do not introduce new ideas; just synthesize the improvements."
394
+ )
395
+ meta_response = self.processor.process_query(meta_prompt)
396
+ logger.info("Meta-refinement completed.")
397
+ return {
398
+ "messages": [AIMessage(content=meta_response.get('choices', [{}])[0].get('message', {}).get('content', ''))],
399
+ "context": state["context"]
400
+ }
401
+ else:
402
+ # Standard refinement with a dynamic difficulty prompt
403
+ refinement_prompt = (
404
+ f"Refine this analysis (current difficulty level: {difficulty_level}):\n{current_analysis}\n\n"
405
+ "Improve the following aspects:\n1. Technical precision\n2. Empirical grounding\n3. Theoretical coherence\n\n"
406
+ "Use a structured difficulty gradient approach (similar to LADDER) to produce a simpler yet more accurate variant."
407
+ )
408
+ response = self.processor.process_query(refinement_prompt)
409
+ logger.info("Refinement completed.")
410
+ return {
411
+ "messages": [AIMessage(content=response.get('choices', [{}])[0].get('message', {}).get('content', ''))],
412
+ "context": state["context"]
413
+ }
414
+ except Exception as e:
415
+ logger.exception("Error during refinement.")
416
+ return self._error_state(f"Refinement Error: {str(e)}")
417
 
418
  def _quality_check(self, state: AgentState) -> str:
419
  """
 
539
  self._render_event(event)
540
  st.success("✅ Analysis Completed Successfully")
541
  except Exception as e:
542
+ logger.exception("Workflow execution failed.")
543
  st.error(
544
  f"""**Analysis Failed**
545
  {str(e)}