Spaces:
Running
Running
# processor.py | |
import logging | |
import hashlib | |
import time | |
from datetime import datetime | |
from concurrent.futures import ThreadPoolExecutor, as_completed | |
from typing import Dict, List | |
import requests | |
from langchain_core.messages import AIMessage | |
from config import ResearchConfig | |
from knowledge_graph import QuantumKnowledgeGraph | |
logger = logging.getLogger(__name__) | |
class CognitiveProcessor: | |
""" | |
Executes API requests to the backend using triple redundancy and consolidates results via a consensus mechanism. | |
""" | |
def __init__(self) -> None: | |
self.executor = ThreadPoolExecutor(max_workers=ResearchConfig.MAX_CONCURRENT_REQUESTS) | |
self.session_id = hashlib.sha256(datetime.now().isoformat().encode()).hexdigest()[:12] | |
def process_query(self, prompt: str) -> Dict: | |
futures = [self.executor.submit(self._execute_api_request, prompt) for _ in range(3)] | |
results = [] | |
for future in as_completed(futures): | |
try: | |
results.append(future.result()) | |
except Exception as e: | |
logger.exception("Error during API request execution.") | |
return self._consensus_check(results) | |
def _execute_api_request(self, prompt: str) -> Dict: | |
headers = { | |
"Authorization": f"Bearer {ResearchConfig.DEEPSEEK_API_KEY}", | |
"Content-Type": "application/json", | |
"X-Research-Session": self.session_id | |
} | |
payload = { | |
"model": "deepseek-chat", | |
"messages": [{ | |
"role": "user", | |
"content": f"Respond as a Senior AI Researcher and Technical Writer:\n{prompt}" | |
}], | |
"temperature": 0.7, | |
"max_tokens": 1500, | |
"top_p": 0.9 | |
} | |
try: | |
response = requests.post( | |
"https://api.deepseek.com/v1/chat/completions", | |
headers=headers, | |
json=payload, | |
timeout=45 | |
) | |
response.raise_for_status() | |
logger.info("Backend API request successful.") | |
return response.json() | |
except requests.exceptions.RequestException as e: | |
logger.exception("Backend API request failed.") | |
return {"error": str(e)} | |
def _consensus_check(self, results: List[Dict]) -> Dict: | |
valid_results = [r for r in results if "error" not in r] | |
if not valid_results: | |
logger.error("All API requests failed.") | |
return {"error": "All API requests failed"} | |
# Choose the result with the longest response content as a simple consensus metric | |
return max(valid_results, key=lambda x: len(x.get('choices', [{}])[0].get('message', {}).get('content', ''))) | |
class EnhancedCognitiveProcessor(CognitiveProcessor): | |
""" | |
Extends CognitiveProcessor with ensemble processing and knowledge graph integration. | |
""" | |
def __init__(self) -> None: | |
super().__init__() | |
self.knowledge_graph = QuantumKnowledgeGraph() | |
self.ensemble_models = ["deepseek-chat", "deepseek-coder"] | |
def process_query(self, prompt: str) -> Dict: | |
futures = [self.executor.submit(self._execute_api_request, prompt, model) for model in self.ensemble_models] | |
results = [] | |
for future in as_completed(futures): | |
try: | |
results.append(future.result()) | |
except Exception as e: | |
logger.error(f"Model processing error: {str(e)}") | |
best_response = self._consensus_check(results) | |
self._update_knowledge_graph(best_response) | |
return best_response | |
def _execute_api_request(self, prompt: str, model: str) -> Dict: | |
headers = { | |
"Authorization": f"Bearer {ResearchConfig.DEEPSEEK_API_KEY}", | |
"Content-Type": "application/json", | |
"X-Research-Session": self.session_id | |
} | |
payload = { | |
"model": model, | |
"messages": [{ | |
"role": "user", | |
"content": f"Respond as a Senior AI Researcher and Technical Writer:\n{prompt}" | |
}], | |
"temperature": ResearchConfig.ENSEMBLE_MODELS[model]["temp"], | |
"max_tokens": ResearchConfig.ENSEMBLE_MODELS[model]["max_tokens"], | |
"top_p": 0.9 | |
} | |
try: | |
response = requests.post( | |
"https://api.deepseek.com/v1/chat/completions", | |
headers=headers, | |
json=payload, | |
timeout=45 | |
) | |
response.raise_for_status() | |
logger.info(f"API request successful for model {model}.") | |
return response.json() | |
except requests.exceptions.RequestException as e: | |
logger.exception(f"API request failed for model {model}.") | |
return {"error": str(e)} | |
def _update_knowledge_graph(self, response: Dict) -> None: | |
content = response.get('choices', [{}])[0].get('message', {}).get('content', '') | |
node_id = self.knowledge_graph.create_node({"content": content}, "analysis") | |
if self.knowledge_graph.node_counter > 1: | |
self.knowledge_graph.create_relation(node_id - 1, node_id, "evolution", strength=0.8) | |