mgbam commited on
Commit
cac3976
Β·
verified Β·
1 Parent(s): 77a97b9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +82 -82
app.py CHANGED
@@ -1,83 +1,83 @@
1
- import gradio as gr
2
- import asyncio
3
- import os
4
- import threading
5
- import uvicorn
6
- import time
7
- from src.forge_agent import ForgeApp
8
-
9
- def run_server(app_path: str, port: int):
10
- """Helper function to run a uvicorn server."""
11
- uvicorn.run(app_path, host="127.0.0.1", port=port, log_level="info")
12
-
13
- # --- Launch background MCP servers ---
14
- # This code runs once when the Gradio app starts.
15
- servers_to_run = {
16
- "mock_mcp_servers.github_server:app": 8001,
17
- "mock_mcp_servers.sandbox_server:app": 8002,
18
- }
19
- for app_path, port in servers_to_run.items():
20
- thread = threading.Thread(target=run_server, args=(app_path, port), daemon=True)
21
- thread.start()
22
- time.sleep(2) # Give servers a moment to start
23
-
24
- async def run_forge_agent(goal: str, hf_token: str, progress=gr.Progress(track_tqdm=True)):
25
- """
26
- The main function to be called by the Gradio interface.
27
- It instantiates and runs the ForgeApp, yielding updates to the UI.
28
- """
29
- if not goal:
30
- yield "Please enter a goal.", ""
31
- return
32
-
33
- if not hf_token:
34
- yield [("Please provide a Hugging Face API Token to use the planner agent.", None)]
35
- return
36
-
37
- # These are the URLs for our mock servers. In a real scenario,
38
- # these could point to any deployed MCP server.
39
- mcp_server_urls = [
40
- "http://127.0.0.1:8001", # Mock GitHub Server
41
- "http://127.0.0.1:8002", # Mock Sandbox Server
42
- ]
43
-
44
- app = ForgeApp(goal=goal, mcp_server_urls=mcp_server_urls, hf_token=hf_token)
45
-
46
- # The chatbot history will store the conversation
47
- chatbot_history = []
48
- full_log = ""
49
-
50
- # The run method is a generator, yielding updates as it progresses.
51
- async for update in app.run():
52
- # Append the update to the full log and update the chatbot history
53
- full_log += update + "\n"
54
- chatbot_history.append((None, update.strip()))
55
- yield chatbot_history
56
-
57
-
58
- with gr.Blocks(theme=gr.themes.Soft()) as demo:
59
- gr.Markdown("# πŸš€ Forge - The Autonomous AI Agent")
60
- gr.Markdown("Enter a high-level goal, and watch the AI agent create and execute a plan to achieve it.")
61
-
62
- chatbot = gr.Chatbot(label="Agent Log", height=500, show_copy_button=True)
63
-
64
- with gr.Row():
65
- goal_input = gr.Textbox(
66
- label="Agent's Goal",
67
- placeholder="e.g., Scaffold a new Next.js blog and create a GitHub repo for it.",
68
- scale=4,
69
- )
70
- run_button = gr.Button("Start", variant="primary", scale=1)
71
-
72
- with gr.Accordion("Advanced Settings", open=False):
73
- hf_token_input = gr.Textbox(
74
- label="Hugging Face API Token",
75
- placeholder="hf_...",
76
- type="password",
77
- value=os.environ.get("HF_TOKEN", ""), # Read from Space secrets if available
78
- )
79
-
80
- run_button.click(fn=run_forge_agent, inputs=[goal_input, hf_token_input], outputs=[chatbot])
81
-
82
- if __name__ == "__main__":
83
  demo.launch()
 
1
+ import gradio as gr
2
+ import asyncio
3
+ import os
4
+ import threading
5
+ import uvicorn
6
+ import time
7
+ from src.forge_agent import ForgeApp
8
+
9
+ def run_server(app_path: str, port: int):
10
+ """Helper function to run a uvicorn server."""
11
+ uvicorn.run(app_path, host="127.0.0.1", port=port, log_level="info")
12
+
13
+ # --- Launch background MCP servers ---
14
+ # This code runs once when the Gradio app starts.
15
+ servers_to_run = {
16
+ "mock_mcp_servers.github_server:app": 8001,
17
+ "mock_mcp_servers.sandbox_server:app": 8002,
18
+ }
19
+ for app_path, port in servers_to_run.items():
20
+ thread = threading.Thread(target=run_server, args=(app_path, port), daemon=True)
21
+ thread.start()
22
+ time.sleep(2) # Give servers a moment to start
23
+
24
+ async def run_forge_agent(goal: str, hf_token: str, progress=gr.Progress(track_tqdm=True)):
25
+ """
26
+ The main function to be called by the Gradio interface.
27
+ It instantiates and runs the ForgeApp, yielding updates to the UI.
28
+ """
29
+ if not goal:
30
+ yield "Please enter a goal.", ""
31
+ return
32
+
33
+ if not hf_token:
34
+ yield [("Please provide a Hugging Face API Token to use the planner agent.", None)]
35
+ return
36
+
37
+ # These are the URLs for our mock servers. In a real scenario,
38
+ # these could point to any deployed MCP server.
39
+ mcp_server_urls = [
40
+ "http://127.0.0.1:8001", # Mock GitHub Server
41
+ "http://127.0.0.1:8002", # Mock Sandbox Server
42
+ ]
43
+
44
+ app = ForgeApp(goal=goal, mcp_server_urls=mcp_server_urls, hf_token=hf_token)
45
+
46
+ # The chatbot history will store the conversation
47
+ chatbot_history = []
48
+ full_log = ""
49
+
50
+ # The run method is a generator, yielding updates as it progresses.
51
+ async for update in app.run():
52
+ # Append the update to the full log and update the chatbot history
53
+ full_log += update + "\n"
54
+ chatbot_history.append((None, update.strip()))
55
+ yield chatbot_history
56
+
57
+
58
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
59
+ gr.Markdown("# πŸš€ Forge - The Autonomous AI Agent")
60
+ gr.Markdown("Enter a high-level goal, and watch the AI agent create and execute a plan to achieve it.")
61
+
62
+ chatbot = gr.Chatbot(label="Agent Log", height=500, show_copy_button=True)
63
+
64
+ with gr.Row():
65
+ goal_input = gr.Textbox(
66
+ label="Agent's Goal",
67
+ placeholder="e.g., Scaffold a new Next.js blog and create a GitHub repo for it.",
68
+ scale=4,
69
+ )
70
+ run_button = gr.Button("Start", variant="primary", scale=1)
71
+
72
+ with gr.Accordion("Advanced Settings", open=False):
73
+ hf_token_input = gr.Textbox(
74
+ label="Hugging Face API Token",
75
+ placeholder="hf_...",
76
+ type="password",
77
+ value=os.environ.get("HF_TOKEN", ""), # Read from Space secrets if available
78
+ )
79
+
80
+ run_button.click(fn=run_forge_agent, inputs=[goal_input, hf_token_input], outputs=[chatbot])
81
+
82
+ if __name__ == "__main__":
83
  demo.launch()