Admin Idiakhoa commited on
Commit
915668d
Β·
1 Parent(s): 15b7523

Add Hugging Face Space app files

Browse files
README.md CHANGED
@@ -1,12 +1,38 @@
1
  ---
2
- title: Mcp
3
- emoji: ⚑
4
- colorFrom: yellow
5
- colorTo: yellow
6
  sdk: gradio
7
- sdk_version: 5.36.2
 
8
  app_file: app.py
9
- pinned: false
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: Forge AI Agent
3
+ emoji: Forge AI Agent πŸš€
4
+ colorFrom: blue
5
+ colorTo: green
6
  sdk: gradio
7
+ sdk_version: 4.28.3
8
+ python_version: 3.10
9
  app_file: app.py
 
10
  ---
11
 
12
+ # Forge - The Autonomous AI Software Engineer
13
+
14
+ This Hugging Face Space demonstrates the "Forge" concept: an autonomous AI agent that can understand a high-level goal, create a plan, and execute it using a suite of tools exposed via the Model Context Protocol (MCP).
15
+
16
+ ## How It Works
17
+
18
+ 1. **User Goal**: You provide a high-level software development task in the textbox.
19
+ 2. **Planning**: An AI agent (mocked in this demo) receives the goal and a list of available tools. It generates a step-by-step plan to achieve the goal.
20
+ 3. **Execution**: The Forge orchestrator executes each step in the plan by calling the appropriate tool from a mock MCP server.
21
+ 4. **Live Feedback**: The agent's thoughts, actions, and results are streamed to the UI in real-time.
22
+
23
+ ## Running Locally
24
+
25
+ This application is designed to be self-contained, but for local development, you need to run the mock MCP servers that the agent communicates with.
26
+
27
+ 1. **Install dependencies:**
28
+ ```bash
29
+ pip install -r requirements.txt
30
+ ```
31
+ 2. **Run the mock servers in a separate terminal:**
32
+ ```bash
33
+ python mock_mcp_servers/run_servers.py
34
+ ```
35
+ 3. **Run the Gradio app:**
36
+ ```bash
37
+ gradio app.py
38
+ ```
app.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import asyncio
3
+ import os
4
+ import multiprocessing
5
+ import uvicorn
6
+ import time
7
+ from src.forge_agent import ForgeApp
8
+
9
+ def run_server(app_path: str, port: int):
10
+ """Helper function to run a uvicorn server."""
11
+ uvicorn.run(app_path, host="127.0.0.1", port=port, log_level="info")
12
+
13
+ # --- Launch background MCP servers ---
14
+ # This code runs once when the Gradio app starts.
15
+ servers_to_run = {
16
+ "mock_mcp_servers.github_server:app": 8001,
17
+ "mock_mcp_servers.sandbox_server:app": 8002,
18
+ }
19
+ for app_path, port in servers_to_run.items():
20
+ process = multiprocessing.Process(target=run_server, args=(app_path, port), daemon=True)
21
+ process.start()
22
+ time.sleep(2) # Give servers a moment to start
23
+
24
+ async def run_forge_agent(goal: str, hf_token: str, progress=gr.Progress(track_tqdm=True)):
25
+ """
26
+ The main function to be called by the Gradio interface.
27
+ It instantiates and runs the ForgeApp, yielding updates to the UI.
28
+ """
29
+ if not goal:
30
+ yield "Please enter a goal.", ""
31
+ return
32
+
33
+ if not hf_token:
34
+ yield [("Please provide a Hugging Face API Token to use the planner agent.", None)]
35
+ return
36
+
37
+ # These are the URLs for our mock servers. In a real scenario,
38
+ # these could point to any deployed MCP server.
39
+ mcp_server_urls = [
40
+ "http://127.0.0.1:8001", # Mock GitHub Server
41
+ "http://127.0.0.1:8002", # Mock Sandbox Server
42
+ ]
43
+
44
+ app = ForgeApp(goal=goal, mcp_server_urls=mcp_server_urls, hf_token=hf_token)
45
+
46
+ # The chatbot history will store the conversation
47
+ chatbot_history = []
48
+ full_log = ""
49
+
50
+ # The run method is a generator, yielding updates as it progresses.
51
+ async for update in app.run():
52
+ # Append the update to the full log and update the chatbot history
53
+ full_log += update + "\n"
54
+ chatbot_history.append((None, update.strip()))
55
+ yield chatbot_history
56
+
57
+
58
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
59
+ gr.Markdown("# πŸš€ Forge - The Autonomous AI Agent")
60
+ gr.Markdown("Enter a high-level goal, and watch the AI agent create and execute a plan to achieve it.")
61
+
62
+ chatbot = gr.Chatbot(label="Agent Log", height=500, show_copy_button=True)
63
+
64
+ with gr.Row():
65
+ goal_input = gr.Textbox(
66
+ label="Agent's Goal",
67
+ placeholder="e.g., Scaffold a new Next.js blog and create a GitHub repo for it.",
68
+ scale=4,
69
+ )
70
+ run_button = gr.Button("Start", variant="primary", scale=1)
71
+
72
+ with gr.Accordion("Advanced Settings", open=False):
73
+ hf_token_input = gr.Textbox(
74
+ label="Hugging Face API Token",
75
+ placeholder="hf_...",
76
+ type="password",
77
+ value=os.environ.get("HF_TOKEN", ""), # Read from Space secrets if available
78
+ )
79
+
80
+ run_button.click(fn=run_forge_agent, inputs=[goal_input, hf_token_input], outputs=[chatbot])
81
+
82
+ if __name__ == "__main__":
83
+ demo.launch()
forge_agent.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import json
3
+ from typing import List, Dict, Any
4
+ from huggingface_hub import AsyncInferenceClient
5
+ from .mcp_client import MCPClient
6
+
7
+ class ToolRegistry:
8
+ """Manages connections to all required MCP servers and their tools."""
9
+ def __init__(self, server_urls: List[str]):
10
+ self.servers: Dict[str, MCPClient] = {url: MCPClient(url) for url in server_urls}
11
+ self.tools: Dict[str, Dict[str, Any]] = {}
12
+
13
+ async def discover_tools(self):
14
+ """Discovers all available tools from all connected MCP servers."""
15
+ discovery_tasks = [client.list_tools() for client in self.servers.values()]
16
+ results = await asyncio.gather(*discovery_tasks, return_exceptions=True)
17
+
18
+ for i, client in enumerate(self.servers.values()):
19
+ server_tools = results[i]
20
+ if isinstance(server_tools, list):
21
+ for tool in server_tools:
22
+ self.tools[tool["name"]] = {"client": client, "description": tool["description"]}
23
+
24
+ async def execute(self, tool_name: str, params: Dict[str, Any]) -> Dict[str, Any]:
25
+ """Finds the correct MCP server and executes the tool."""
26
+ if tool_name not in self.tools:
27
+ raise ValueError(f"Tool '{tool_name}' not found in registry.")
28
+ tool_info = self.tools[tool_name]
29
+ return await tool_info["client"].execute_tool(tool_name, params)
30
+
31
+ async def close_all(self):
32
+ """Closes all client connections."""
33
+ await asyncio.gather(*(client.close() for client in self.servers.values()))
34
+
35
+ class HuggingFaceAgent:
36
+ """An AI agent that uses a Hugging Face model to generate plans."""
37
+ def __init__(self, hf_token: str, model_name: str = "mistralai/Mixtral-8x7B-Instruct-v0.1"):
38
+ self.model = model_name
39
+ self.client = AsyncInferenceClient(model=model_name, token=hf_token)
40
+
41
+ def _construct_prompt(self, goal: str, available_tools: List[Dict[str, Any]], previous_steps: List = None, error: str = None) -> str:
42
+ """Constructs the detailed prompt for the LLM."""
43
+ tools_json_string = json.dumps(available_tools, indent=2)
44
+
45
+ prompt = f"""You are Forge, an autonomous AI agent. Your task is to create a step-by-step plan to achieve a goal.
46
+ You must respond with a valid JSON array of objects, where each object represents a step in the plan.
47
+ Each step must have 'step', 'thought', 'tool', and 'params' keys.
48
+ The final step must always use the 'report_success' tool.
49
+
50
+ Available Tools:
51
+ {tools_json_string}
52
+
53
+ Goal: "{goal}"
54
+ """
55
+ if previous_steps:
56
+ prompt += f"\nYou have already completed these steps:\n{json.dumps(previous_steps, indent=2)}\n"
57
+ if error:
58
+ prompt += f"\nAn error occurred during the last step: {error}\nAnalyze the error and create a new, corrected plan to achieve the original goal. Start the new plan from the current state."
59
+
60
+ prompt += "\nGenerate the JSON plan now:"
61
+ return prompt
62
+
63
+ async def _invoke_llm(self, prompt: str) -> List[Dict[str, Any]]:
64
+ """Invokes the LLM and parses the JSON response."""
65
+ try:
66
+ response = await self.client.text_generation(prompt, max_new_tokens=1024)
67
+ # The response might contain the JSON within backticks or other text.
68
+ json_response_str = response.strip().split('```json')[-1].split('```')[0].strip()
69
+ plan = json.loads(json_response_str)
70
+ if isinstance(plan, list):
71
+ return plan
72
+ else:
73
+ raise ValueError("LLM did not return a JSON list.")
74
+ except (json.JSONDecodeError, ValueError, IndexError) as e:
75
+ print(f"Error parsing LLM response: {e}\nRaw response:\n{response}")
76
+ # Fallback or re-try logic could be added here
77
+ return [{"step": 1, "thought": "Failed to generate a plan due to a parsing error.", "tool": "report_failure", "params": {"message": f"LLM response parsing failed: {e}"}}]
78
+
79
+ async def generate_plan(self, goal: str, available_tools: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
80
+ """
81
+ Generates a step-by-step plan.
82
+ """
83
+ prompt = self._construct_prompt(goal, available_tools)
84
+ return await self._invoke_llm(prompt)
85
+
86
+ async def regenerate_plan_on_error(self, goal: str, available_tools: List[Dict[str, Any]], completed_steps: List, error_message: str) -> List[Dict[str, Any]]:
87
+ """Generates a new plan after an error occurred."""
88
+ prompt = self._construct_prompt(goal, available_tools, previous_steps=completed_steps, error=error_message)
89
+ return await self._invoke_llm(prompt)
90
+
91
+ class ForgeApp:
92
+ """The main orchestrator for the Forge application."""
93
+ def __init__(self, goal: str, mcp_server_urls: List[str], hf_token: str):
94
+ self.goal = goal
95
+ self.planner = HuggingFaceAgent(hf_token=hf_token)
96
+ self.tool_registry = ToolRegistry(server_urls=mcp_server_urls)
97
+
98
+ async def run(self):
99
+ """
100
+ Runs the agent and yields status updates as a generator.
101
+ """
102
+ yield "πŸš€ **Starting Forge... Initializing systems.**"
103
+ await self.tool_registry.discover_tools()
104
+ yield f"βœ… **Tool Discovery Complete.** Found {len(self.tool_registry.tools)} tools."
105
+
106
+ # Provide the LLM with full tool details, not just names
107
+ available_tools_details = [{"name": name, "description": data["description"]} for name, data in self.tool_registry.tools.items()]
108
+
109
+ yield f"🧠 **Generating a plan for your goal:** '{self.goal}'"
110
+ plan = await self.planner.generate_plan(self.goal, available_tools_details)
111
+ yield "πŸ“ **Plan Generated!** Starting execution..."
112
+
113
+ completed_steps = []
114
+ while plan:
115
+ task = plan.pop(0)
116
+ yield f"\n**[Step {task.get('step', '?')}]** πŸ€” **Thought:** {task.get('thought', 'N/A')}"
117
+
118
+ tool_name = task.get("tool")
119
+ if tool_name in ["report_success", "report_failure"]:
120
+ emoji = "πŸŽ‰" if tool_name == "report_success" else "πŸ›‘"
121
+ yield f"{emoji} **Final Result:** {task.get('params', {}).get('message', 'N/A')}"
122
+ plan = [] # End execution
123
+ continue
124
+
125
+ try:
126
+ yield f"πŸ› οΈ **Action:** Executing tool `{tool_name}` with params: `{task.get('params', {})}`"
127
+ result = await self.tool_registry.execute(tool_name, task.get("params", {}))
128
+
129
+ if result.get("status") == "error":
130
+ error_message = result.get('result', 'Unknown error')
131
+ yield f"❌ **Error:** {error_message}"
132
+ yield "🧠 **Agent is re-evaluating the plan based on the error...**"
133
+ completed_steps.append({"step": task, "outcome": "error", "details": error_message})
134
+ plan = await self.planner.regenerate_plan_on_error(self.goal, available_tools_details, completed_steps, error_message)
135
+ yield "πŸ“ **New Plan Generated!** Resuming execution..."
136
+ else:
137
+ observation = result.get('result', 'Tool executed successfully.')
138
+ yield f"βœ… **Observation:** {observation}"
139
+ completed_steps.append({"step": task, "outcome": "success", "details": observation})
140
+
141
+ except Exception as e:
142
+ yield f"❌ **Critical Error executing step {task.get('step', '?')}:** {e}"
143
+ yield "πŸ›‘ **Execution Halted due to critical error.**"
144
+ plan = [] # End execution
145
+
146
+ await self.tool_registry.close_all()
147
+ yield "\n🏁 **Forge execution finished.**"
mock_mcp_servers/github_server.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI
2
+
3
+ app = FastAPI()
4
+
5
+ @app.get("/mcp/tools")
6
+ async def list_tools():
7
+ return {
8
+ "tools": [
9
+ {
10
+ "name": "create_repo",
11
+ "description": "Creates a new GitHub repository.",
12
+ "input_schema": {"name": "string", "private": "boolean"},
13
+ }
14
+ ]
15
+ }
16
+
17
+ @app.post("/mcp/tools/create_repo")
18
+ async def create_repo(payload: dict):
19
+ params = payload.get("params", {})
20
+ repo_name = params.get("name", "unnamed-repo")
21
+ print(f"[GitHub Server] Received request to create repo: {repo_name}")
22
+ return {"status": "success", "result": f"Successfully created GitHub repository '{repo_name}'."}
mock_mcp_servers/run_servers.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import uvicorn
2
+ import multiprocessing
3
+
4
+ def run_github_server():
5
+ uvicorn.run("mock_mcp_servers.github_server:app", host="127.0.0.1", port=8001, log_level="info")
6
+
7
+ def run_sandbox_server():
8
+ uvicorn.run("mock_mcp_servers.sandbox_server:app", host="127.0.0.1", port=8002, log_level="info")
9
+
10
+ if __name__ == "__main__":
11
+ p1 = multiprocessing.Process(target=run_github_server)
12
+ p2 = multiprocessing.Process(target=run_sandbox_server)
13
+
14
+ p1.start()
15
+ p2.start()
16
+
17
+ p1.join()
18
+ p2.join()
mock_mcp_servers/sandbox_server.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI
2
+ import subprocess
3
+ import os
4
+
5
+ app = FastAPI()
6
+
7
+ @app.get("/mcp/tools")
8
+ async def list_tools():
9
+ return {
10
+ "tools": [
11
+ {
12
+ "name": "execute_shell",
13
+ "description": "Executes a shell command in a secure sandbox.",
14
+ "input_schema": {"type": "object", "properties": {"command": {"type": "string"}}, "required": ["command"]},
15
+ },
16
+ {
17
+ "name": "list_files",
18
+ "description": "Lists files and directories in a given path within the sandbox.",
19
+ "input_schema": {"type": "object", "properties": {"path": {"type": "string"}}, "required": ["path"]},
20
+ }
21
+ ]
22
+ }
23
+
24
+ @app.post("/mcp/tools/execute_shell")
25
+ async def execute_shell(payload: dict):
26
+ params = payload.get("params", {})
27
+ command = params.get("command")
28
+ if not command:
29
+ return {"status": "error", "result": "No command provided."}
30
+
31
+ print(f"[Sandbox Server] Executing: {command}")
32
+ try:
33
+ # In a real-world scenario, this would be a heavily secured, isolated container.
34
+ # For this demo, we use subprocess with a timeout.
35
+ result = subprocess.run(command, shell=True, capture_output=True, text=True, timeout=30, check=True)
36
+ output = f"STDOUT:\n{result.stdout}\nSTDERR:\n{result.stderr}"
37
+ return {"status": "success", "result": output}
38
+ except subprocess.CalledProcessError as e:
39
+ return {"status": "error", "result": f"Command failed with exit code {e.returncode}.\nSTDOUT:\n{e.stdout}\nSTDERR:\n{e.stderr}"}
40
+ except subprocess.TimeoutExpired:
41
+ return {"status": "error", "result": "Command timed out after 30 seconds."}
42
+
43
+ @app.post("/mcp/tools/list_files")
44
+ async def list_files(payload: dict):
45
+ path = payload.get("params", {}).get("path", ".")
46
+ try:
47
+ files = os.listdir(path)
48
+ return {"status": "success", "result": f"Files in '{path}': {files}"}
49
+ except FileNotFoundError:
50
+ return {"status": "error", "result": f"Path not found: {path}"}
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ gradio==4.28.3
2
+ httpx==0.27.0
3
+ fastapi==0.111.0
4
+ uvicorn==0.29.0
5
+ aiohttp==3.9.5
6
+ huggingface_hub==0.22.2
src/forge_agent.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ from typing import List, Dict, Any
3
+ from .mcp_client import MCPClient
4
+
5
+ class ToolRegistry:
6
+ """Manages connections to all required MCP servers and their tools."""
7
+ def __init__(self, server_urls: List[str]):
8
+ self.servers: Dict[str, MCPClient] = {url: MCPClient(url) for url in server_urls}
9
+ self.tools: Dict[str, Dict[str, Any]] = {}
10
+
11
+ async def discover_tools(self):
12
+ """Discovers all available tools from all connected MCP servers."""
13
+ discovery_tasks = [client.list_tools() for client in self.servers.values()]
14
+ results = await asyncio.gather(*discovery_tasks, return_exceptions=True)
15
+
16
+ for i, client in enumerate(self.servers.values()):
17
+ server_tools = results[i]
18
+ if isinstance(server_tools, list):
19
+ for tool in server_tools:
20
+ self.tools[tool["name"]] = {"client": client, "description": tool["description"]}
21
+
22
+ async def execute(self, tool_name: str, params: Dict[str, Any]) -> Dict[str, Any]:
23
+ """Finds the correct MCP server and executes the tool."""
24
+ if tool_name not in self.tools:
25
+ raise ValueError(f"Tool '{tool_name}' not found in registry.")
26
+ tool_info = self.tools[tool_name]
27
+ return await tool_info["client"].execute_tool(tool_name, params)
28
+
29
+ async def close_all(self):
30
+ """Closes all client connections."""
31
+ await asyncio.gather(*(client.close() for client in self.servers.values()))
32
+
33
+ class AIAgent:
34
+ """A mock AI agent that generates a plan based on a goal."""
35
+ def __init__(self, model_name: str = "mock-planner"):
36
+ self.model = model_name
37
+
38
+ async def generate_plan(self, goal: str, available_tools: List[str]) -> List[Dict[str, Any]]:
39
+ """
40
+ Generates a step-by-step plan.
41
+ In a real application, this would involve a call to a powerful LLM.
42
+ Here, we use a hardcoded plan for demonstration.
43
+ """
44
+ await asyncio.sleep(1) # Simulate LLM thinking time
45
+
46
+ # This is a mock plan. A real LLM would generate this dynamically.
47
+ plan = [
48
+ {"step": 1, "thought": "I need a place to store the code. I'll use the `create_repo` tool.", "tool": "create_repo", "params": {"name": "my-awesome-blog", "private": False}},
49
+ {"step": 2, "thought": "Now I need to scaffold a new Next.js application inside a secure sandbox.", "tool": "execute_shell", "params": {"command": "npx create-next-app@latest my-awesome-blog --yes"}},
50
+ {"step": 3, "thought": "The project is created. I should commit the initial code.", "tool": "execute_shell", "params": {"command": "cd my-awesome-blog && git init && git add . && git commit -m 'Initial commit'"}},
51
+ {"step": 4, "thought": "The plan is complete. I will report success.", "tool": "report_success", "params": {"message": "Blog project scaffolded successfully."}}
52
+ ]
53
+ return plan
54
+
55
+ class ForgeApp:
56
+ """The main orchestrator for the Forge application."""
57
+ def __init__(self, goal: str, mcp_server_urls: List[str]):
58
+ self.goal = goal
59
+ self.planner = AIAgent()
60
+ self.tool_registry = ToolRegistry(server_urls=mcp_server_urls)
61
+
62
+ async def run(self):
63
+ """
64
+ Runs the agent and yields status updates as a generator.
65
+ """
66
+ yield "πŸš€ **Starting Forge... Initializing systems.**"
67
+ await self.tool_registry.discover_tools()
68
+ yield f"βœ… **Tool Discovery Complete.** Found {len(self.tool_registry.tools)} tools."
69
+
70
+ available_tool_names = list(self.tool_registry.tools.keys())
71
+ yield f"🧠 **Generating a plan for your goal:** '{self.goal}'"
72
+ plan = await self.planner.generate_plan(self.goal, available_tool_names)
73
+ yield "πŸ“ **Plan Generated!** Starting execution..."
74
+
75
+ for task in plan:
76
+ yield f"\n**[Step {task['step']}]** πŸ€” **Thought:** {task['thought']}"
77
+
78
+ if task["tool"] == "report_success":
79
+ yield f"πŸŽ‰ **Final Result:** {task['params']['message']}"
80
+ break
81
+
82
+ try:
83
+ yield f"πŸ› οΈ **Action:** Executing tool `{task['tool']}` with params: `{task['params']}`"
84
+ result = await self.tool_registry.execute(task["tool"], task["params"])
85
+
86
+ if result.get("status") == "error":
87
+ yield f"❌ **Error:** {result.get('result', 'Unknown error')}"
88
+ yield "πŸ›‘ **Execution Halted due to error.**"
89
+ break
90
+ else:
91
+ yield f"βœ… **Observation:** {result.get('result', 'Tool executed successfully.')}"
92
+
93
+ except Exception as e:
94
+ yield f"❌ **Critical Error executing step {task['step']}:** {e}"
95
+ yield "πŸ›‘ **Execution Halted due to critical error.**"
96
+ break
97
+
98
+ await self.tool_registry.close_all()
99
+ yield "\n🏁 **Forge execution finished.**"
src/mcp_client.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import httpx
2
+ from typing import List, Dict, Any
3
+
4
+ class MCPClient:
5
+ """
6
+ A client for interacting with a Model Context Protocol (MCP) server.
7
+ Handles listing and executing tools via HTTP requests.
8
+ """
9
+ def __init__(self, server_url: str):
10
+ self.server_url = server_url.rstrip('/')
11
+ self.http_client = httpx.AsyncClient(timeout=30.0)
12
+
13
+ async def list_tools(self) -> List[Dict[str, Any]]:
14
+ """Fetches the list of available tools from the MCP server."""
15
+ try:
16
+ response = await self.http_client.get(f"{self.server_url}/mcp/tools")
17
+ response.raise_for_status()
18
+ tools_response = response.json()
19
+ # Ensure the response is in the expected format
20
+ if "tools" in tools_response and isinstance(tools_response["tools"], list):
21
+ return tools_response["tools"]
22
+ return []
23
+ except (httpx.RequestError, httpx.HTTPStatusError) as e:
24
+ print(f"Error fetching tools from {self.server_url}: {e}")
25
+ return []
26
+
27
+ async def execute_tool(self, tool_name: str, params: Dict[str, Any]) -> Dict[str, Any]:
28
+ """Executes a specific tool on the MCP server with the given parameters."""
29
+ try:
30
+ response = await self.http_client.post(
31
+ f"{self.server_url}/mcp/tools/{tool_name}",
32
+ json={"params": params}
33
+ )
34
+ response.raise_for_status()
35
+ return response.json()
36
+ except (httpx.RequestError, httpx.HTTPStatusError) as e:
37
+ print(f"Error executing tool '{tool_name}' on {self.server_url}: {e}")
38
+ return {
39
+ "status": "error",
40
+ "result": f"Failed to connect or execute tool on {self.server_url}. Error: {e}"
41
+ }
42
+
43
+ async def close(self):
44
+ """Closes the underlying HTTP client."""
45
+ await self.http_client.aclose()