mgbam commited on
Commit
b105f0b
Β·
verified Β·
1 Parent(s): cac3976

Update src/forge_agent.py

Browse files
Files changed (1) hide show
  1. src/forge_agent.py +83 -35
src/forge_agent.py CHANGED
@@ -1,5 +1,7 @@
1
  import asyncio
 
2
  from typing import List, Dict, Any
 
3
  from .mcp_client import MCPClient
4
 
5
  class ToolRegistry:
@@ -30,33 +32,67 @@ class ToolRegistry:
30
  """Closes all client connections."""
31
  await asyncio.gather(*(client.close() for client in self.servers.values()))
32
 
33
- class AIAgent:
34
- """A mock AI agent that generates a plan based on a goal."""
35
- def __init__(self, model_name: str = "mock-planner"):
36
  self.model = model_name
 
37
 
38
- async def generate_plan(self, goal: str, available_tools: List[str]) -> List[Dict[str, Any]]:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
  """
40
  Generates a step-by-step plan.
41
- In a real application, this would involve a call to a powerful LLM.
42
- Here, we use a hardcoded plan for demonstration.
43
  """
44
- await asyncio.sleep(1) # Simulate LLM thinking time
45
-
46
- # This is a mock plan. A real LLM would generate this dynamically.
47
- plan = [
48
- {"step": 1, "thought": "I need a place to store the code. I'll use the `create_repo` tool.", "tool": "create_repo", "params": {"name": "my-awesome-blog", "private": False}},
49
- {"step": 2, "thought": "Now I need to scaffold a new Next.js application inside a secure sandbox.", "tool": "execute_shell", "params": {"command": "npx create-next-app@latest my-awesome-blog --yes"}},
50
- {"step": 3, "thought": "The project is created. I should commit the initial code.", "tool": "execute_shell", "params": {"command": "cd my-awesome-blog && git init && git add . && git commit -m 'Initial commit'"}},
51
- {"step": 4, "thought": "The plan is complete. I will report success.", "tool": "report_success", "params": {"message": "Blog project scaffolded successfully."}}
52
- ]
53
- return plan
54
 
55
  class ForgeApp:
56
  """The main orchestrator for the Forge application."""
57
- def __init__(self, goal: str, mcp_server_urls: List[str]):
58
  self.goal = goal
59
- self.planner = AIAgent()
60
  self.tool_registry = ToolRegistry(server_urls=mcp_server_urls)
61
 
62
  async def run(self):
@@ -66,34 +102,46 @@ class ForgeApp:
66
  yield "πŸš€ **Starting Forge... Initializing systems.**"
67
  await self.tool_registry.discover_tools()
68
  yield f"βœ… **Tool Discovery Complete.** Found {len(self.tool_registry.tools)} tools."
69
-
70
- available_tool_names = list(self.tool_registry.tools.keys())
 
 
71
  yield f"🧠 **Generating a plan for your goal:** '{self.goal}'"
72
- plan = await self.planner.generate_plan(self.goal, available_tool_names)
73
  yield "πŸ“ **Plan Generated!** Starting execution..."
74
 
75
- for task in plan:
76
- yield f"\n**[Step {task['step']}]** πŸ€” **Thought:** {task['thought']}"
77
-
78
- if task["tool"] == "report_success":
79
- yield f"πŸŽ‰ **Final Result:** {task['params']['message']}"
80
- break
 
 
 
 
 
81
 
82
  try:
83
- yield f"πŸ› οΈ **Action:** Executing tool `{task['tool']}` with params: `{task['params']}`"
84
- result = await self.tool_registry.execute(task["tool"], task["params"])
85
 
86
  if result.get("status") == "error":
87
- yield f"❌ **Error:** {result.get('result', 'Unknown error')}"
88
- yield "πŸ›‘ **Execution Halted due to error.**"
89
- break
 
 
 
90
  else:
91
- yield f"βœ… **Observation:** {result.get('result', 'Tool executed successfully.')}"
 
 
92
 
93
  except Exception as e:
94
- yield f"❌ **Critical Error executing step {task['step']}:** {e}"
95
  yield "πŸ›‘ **Execution Halted due to critical error.**"
96
- break
97
 
98
  await self.tool_registry.close_all()
99
  yield "\n🏁 **Forge execution finished.**"
 
1
  import asyncio
2
+ import json
3
  from typing import List, Dict, Any
4
+ from huggingface_hub import AsyncInferenceClient
5
  from .mcp_client import MCPClient
6
 
7
  class ToolRegistry:
 
32
  """Closes all client connections."""
33
  await asyncio.gather(*(client.close() for client in self.servers.values()))
34
 
35
+ class HuggingFaceAgent:
36
+ """An AI agent that uses a Hugging Face model to generate plans."""
37
+ def __init__(self, hf_token: str, model_name: str = "mistralai/Mixtral-8x7B-Instruct-v0.1"):
38
  self.model = model_name
39
+ self.client = AsyncInferenceClient(model=model_name, token=hf_token)
40
 
41
+ def _construct_prompt(self, goal: str, available_tools: List[Dict[str, Any]], previous_steps: List = None, error: str = None) -> str:
42
+ """Constructs the detailed prompt for the LLM."""
43
+ tools_json_string = json.dumps(available_tools, indent=2)
44
+
45
+ prompt = f"""You are Forge, an autonomous AI agent. Your task is to create a step-by-step plan to achieve a goal.
46
+ You must respond with a valid JSON array of objects, where each object represents a step in the plan.
47
+ Each step must have 'step', 'thought', 'tool', and 'params' keys.
48
+ The final step must always use the 'report_success' tool.
49
+
50
+ Available Tools:
51
+ {tools_json_string}
52
+
53
+ Goal: "{goal}"
54
+ """
55
+ if previous_steps:
56
+ prompt += f"\nYou have already completed these steps:\n{json.dumps(previous_steps, indent=2)}\n"
57
+ if error:
58
+ prompt += f"\nAn error occurred during the last step: {error}\nAnalyze the error and create a new, corrected plan to achieve the original goal. Start the new plan from the current state."
59
+
60
+ prompt += "\nGenerate the JSON plan now:"
61
+ return prompt
62
+
63
+ async def _invoke_llm(self, prompt: str) -> List[Dict[str, Any]]:
64
+ """Invokes the LLM and parses the JSON response."""
65
+ try:
66
+ response = await self.client.text_generation(prompt, max_new_tokens=1024)
67
+ # The response might contain the JSON within backticks or other text.
68
+ json_response_str = response.strip().split('```json')[-1].split('```')[0].strip()
69
+ plan = json.loads(json_response_str)
70
+ if isinstance(plan, list):
71
+ return plan
72
+ else:
73
+ raise ValueError("LLM did not return a JSON list.")
74
+ except (json.JSONDecodeError, ValueError, IndexError) as e:
75
+ print(f"Error parsing LLM response: {e}\nRaw response:\n{response}")
76
+ # Fallback or re-try logic could be added here
77
+ return [{"step": 1, "thought": "Failed to generate a plan due to a parsing error.", "tool": "report_failure", "params": {"message": f"LLM response parsing failed: {e}"}}]
78
+
79
+ async def generate_plan(self, goal: str, available_tools: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
80
  """
81
  Generates a step-by-step plan.
 
 
82
  """
83
+ prompt = self._construct_prompt(goal, available_tools)
84
+ return await self._invoke_llm(prompt)
85
+
86
+ async def regenerate_plan_on_error(self, goal: str, available_tools: List[Dict[str, Any]], completed_steps: List, error_message: str) -> List[Dict[str, Any]]:
87
+ """Generates a new plan after an error occurred."""
88
+ prompt = self._construct_prompt(goal, available_tools, previous_steps=completed_steps, error=error_message)
89
+ return await self._invoke_llm(prompt)
 
 
 
90
 
91
  class ForgeApp:
92
  """The main orchestrator for the Forge application."""
93
+ def __init__(self, goal: str, mcp_server_urls: List[str], hf_token: str):
94
  self.goal = goal
95
+ self.planner = HuggingFaceAgent(hf_token=hf_token)
96
  self.tool_registry = ToolRegistry(server_urls=mcp_server_urls)
97
 
98
  async def run(self):
 
102
  yield "πŸš€ **Starting Forge... Initializing systems.**"
103
  await self.tool_registry.discover_tools()
104
  yield f"βœ… **Tool Discovery Complete.** Found {len(self.tool_registry.tools)} tools."
105
+
106
+ # Provide the LLM with full tool details, not just names
107
+ available_tools_details = [{"name": name, "description": data["description"]} for name, data in self.tool_registry.tools.items()]
108
+
109
  yield f"🧠 **Generating a plan for your goal:** '{self.goal}'"
110
+ plan = await self.planner.generate_plan(self.goal, available_tools_details)
111
  yield "πŸ“ **Plan Generated!** Starting execution..."
112
 
113
+ completed_steps = []
114
+ while plan:
115
+ task = plan.pop(0)
116
+ yield f"\n**[Step {task.get('step', '?')}]** πŸ€” **Thought:** {task.get('thought', 'N/A')}"
117
+
118
+ tool_name = task.get("tool")
119
+ if tool_name in ["report_success", "report_failure"]:
120
+ emoji = "πŸŽ‰" if tool_name == "report_success" else "πŸ›‘"
121
+ yield f"{emoji} **Final Result:** {task.get('params', {}).get('message', 'N/A')}"
122
+ plan = [] # End execution
123
+ continue
124
 
125
  try:
126
+ yield f"πŸ› οΈ **Action:** Executing tool `{tool_name}` with params: `{task.get('params', {})}`"
127
+ result = await self.tool_registry.execute(tool_name, task.get("params", {}))
128
 
129
  if result.get("status") == "error":
130
+ error_message = result.get('result', 'Unknown error')
131
+ yield f"❌ **Error:** {error_message}"
132
+ yield "🧠 **Agent is re-evaluating the plan based on the error...**"
133
+ completed_steps.append({"step": task, "outcome": "error", "details": error_message})
134
+ plan = await self.planner.regenerate_plan_on_error(self.goal, available_tools_details, completed_steps, error_message)
135
+ yield "πŸ“ **New Plan Generated!** Resuming execution..."
136
  else:
137
+ observation = result.get('result', 'Tool executed successfully.')
138
+ yield f"βœ… **Observation:** {observation}"
139
+ completed_steps.append({"step": task, "outcome": "success", "details": observation})
140
 
141
  except Exception as e:
142
+ yield f"❌ **Critical Error executing step {task.get('step', '?')}:** {e}"
143
  yield "πŸ›‘ **Execution Halted due to critical error.**"
144
+ plan = [] # End execution
145
 
146
  await self.tool_registry.close_all()
147
  yield "\n🏁 **Forge execution finished.**"