File size: 2,960 Bytes
cac3976
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55e16c4
 
 
 
 
 
 
 
 
 
 
915668d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import gradio as gr
import asyncio
import os
import threading
import uvicorn
import time
from src.forge_agent import ForgeApp

def run_server(app_path: str, port: int):
    """Helper function to run a uvicorn server."""
    uvicorn.run(app_path, host="127.0.0.1", port=port, log_level="info")

async def run_forge_agent(goal: str, hf_token: str, progress=gr.Progress(track_tqdm=True)):
    """
    The main function to be called by the Gradio interface.
    It instantiates and runs the ForgeApp, yielding updates to the UI.
    """
    if not goal:
        yield "Please enter a goal.", ""
        return
    
    if not hf_token:
        yield [("Please provide a Hugging Face API Token to use the planner agent.", None)]
        return

    # These are the URLs for our mock servers. In a real scenario,
    # these could point to any deployed MCP server.
    mcp_server_urls = [
        "http://127.0.0.1:8001",  # Mock GitHub Server
        "http://127.0.0.1:8002",  # Mock Sandbox Server
    ]

    app = ForgeApp(goal=goal, mcp_server_urls=mcp_server_urls, hf_token=hf_token)
    
    # The chatbot history will store the conversation
    chatbot_history = []
    full_log = ""

    # The run method is a generator, yielding updates as it progresses.
    async for update in app.run():
        # Append the update to the full log and update the chatbot history
        full_log += update + "\n"
        chatbot_history.append((None, update.strip()))
        yield chatbot_history


with gr.Blocks(theme=gr.themes.Soft()) as demo:
    gr.Markdown("# πŸš€ Forge - The Autonomous AI Agent")
    gr.Markdown("Enter a high-level goal, and watch the AI agent create and execute a plan to achieve it.")

    chatbot = gr.Chatbot(label="Agent Log", height=500, show_copy_button=True)
    
    with gr.Row():
        goal_input = gr.Textbox(
            label="Agent's Goal",
            placeholder="e.g., Scaffold a new Next.js blog and create a GitHub repo for it.",
            scale=4,
        )
        run_button = gr.Button("Start", variant="primary", scale=1)
    
    with gr.Accordion("Advanced Settings", open=False):
        hf_token_input = gr.Textbox(
            label="Hugging Face API Token",
            placeholder="hf_...",
            type="password",
            value=os.environ.get("HF_TOKEN", ""), # Read from Space secrets if available
        )

    run_button.click(fn=run_forge_agent, inputs=[goal_input, hf_token_input], outputs=[chatbot])

if __name__ == "__main__":
    # --- Launch background MCP servers ---
    # This code runs once when the Gradio app starts.
    servers_to_run = {
        "mock_mcp_servers.github_server:app": 8001,
        "mock_mcp_servers.sandbox_server:app": 8002,
    }
    for app_path, port in servers_to_run.items():
        thread = threading.Thread(target=run_server, args=(app_path, port), daemon=True)
        thread.start()
    time.sleep(2) # Give servers a moment to start

    demo.launch()