Spaces:
Sleeping
Sleeping
Create src/chimera/core/orchestrator.py (The MCP)
Browse files
src/chimera/core/orchestrator.py (The MCP)
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# src/chimera/core/orchestrator.py
|
2 |
+
import asyncio
|
3 |
+
from ..api_clients import gemini_client, serp_client # , external_apis
|
4 |
+
from ..utils.logging_config import logger
|
5 |
+
from ..utils import data_processing
|
6 |
+
|
7 |
+
async def run_analysis(user_query: str) -> str:
|
8 |
+
"""
|
9 |
+
Main orchestration logic for Project Chimera.
|
10 |
+
1. Interprets user query (simple keyword check for now).
|
11 |
+
2. Calls relevant APIs concurrently.
|
12 |
+
3. Gathers data.
|
13 |
+
4. Formats data and creates a prompt for Gemini.
|
14 |
+
5. Calls Gemini for analysis.
|
15 |
+
6. Returns the result.
|
16 |
+
"""
|
17 |
+
logger.info(f"Received query: {user_query}")
|
18 |
+
|
19 |
+
# Step 1: Basic Query Interpretation (Replace with more sophisticated logic/LLM call if needed)
|
20 |
+
tasks = []
|
21 |
+
if "news" in user_query.lower() or "search" in user_query.lower():
|
22 |
+
# Extract keywords or use the whole query for SERP
|
23 |
+
search_term = user_query # Or extract better term
|
24 |
+
logger.info("Adding SERP task.")
|
25 |
+
tasks.append(asyncio.create_task(serp_client.search_google(search_term, num_results=5)))
|
26 |
+
# Add conditions for other APIs based on keywords (e.g., "weather", "stock", "earthquake")
|
27 |
+
# if "weather" in user_query.lower():
|
28 |
+
# location = "New York" # Extract location from query
|
29 |
+
# logger.info("Adding Weather task.")
|
30 |
+
# tasks.append(asyncio.create_task(external_apis.get_weather(location)))
|
31 |
+
|
32 |
+
if not tasks:
|
33 |
+
logger.warning("No relevant APIs identified for the query.")
|
34 |
+
# Fallback: Just send the raw query to Gemini? Or ask user for clarification?
|
35 |
+
# For now, just send the query directly for general knowledge analysis
|
36 |
+
pass # Let Gemini handle the query directly without external data
|
37 |
+
|
38 |
+
# Step 2 & 3: Call APIs Concurrently and Gather Data
|
39 |
+
api_results = {}
|
40 |
+
if tasks:
|
41 |
+
logger.info(f"Gathering data from {len(tasks)} API(s)...")
|
42 |
+
results = await asyncio.gather(*tasks, return_exceptions=True) # Collect all results/exceptions
|
43 |
+
logger.info("API data gathering complete.")
|
44 |
+
|
45 |
+
# Process results (basic example)
|
46 |
+
# Check result types or assigned task names if needed
|
47 |
+
if isinstance(results[0], dict) and "organic_results" in results[0]:
|
48 |
+
api_results["serp"] = results[0]
|
49 |
+
# Add checks and assignments for other potential API results
|
50 |
+
|
51 |
+
# Handle potential errors from asyncio.gather
|
52 |
+
for i, result in enumerate(results):
|
53 |
+
if isinstance(result, Exception):
|
54 |
+
logger.error(f"API call task {i} failed: {result}")
|
55 |
+
# Decide how to handle partial failures - inform Gemini? Return error?
|
56 |
+
# For now, we'll just log it and proceed with whatever data we got.
|
57 |
+
elif isinstance(result, dict) and "error" in result:
|
58 |
+
logger.error(f"API call task {i} reported an error: {result['error']}")
|
59 |
+
|
60 |
+
# Step 4: Format Data and Create Gemini Prompt
|
61 |
+
# Process the gathered data into a readable format for the LLM
|
62 |
+
formatted_data = data_processing.format_api_data_for_llm(api_results)
|
63 |
+
|
64 |
+
# Construct the final prompt
|
65 |
+
# This is CRITICAL - prompt engineering is key here!
|
66 |
+
prompt = f"""
|
67 |
+
Analyze the following user query and synthesized real-time data to provide insights, identify patterns, potential solutions, or opportunities.
|
68 |
+
|
69 |
+
User Query: "{user_query}"
|
70 |
+
|
71 |
+
Synthesized Data:
|
72 |
+
---
|
73 |
+
{formatted_data if formatted_data else "No additional real-time data was gathered for this query."}
|
74 |
+
---
|
75 |
+
|
76 |
+
Based on the query and the data (if provided), please provide a comprehensive analysis. Consider potential implications, connections between data points, and answer the user's core question or request. If suggesting solutions or opportunities, be specific and justify your reasoning.
|
77 |
+
"""
|
78 |
+
|
79 |
+
# Step 5: Call Gemini
|
80 |
+
logger.info("Sending final prompt to Gemini for analysis.")
|
81 |
+
analysis_result = await gemini_client.generate_analysis(prompt)
|
82 |
+
|
83 |
+
# Step 6: Return Result
|
84 |
+
logger.info("Analysis complete.")
|
85 |
+
return analysis_result
|