Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -5,29 +5,57 @@ from mcp import StdioServerParameters
|
|
5 |
from smolagents import InferenceClientModel, CodeAgent, ToolCollection, MCPClient
|
6 |
from smolagents import LiteLLMModel
|
7 |
|
|
|
|
|
|
|
|
|
8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
try:
|
|
|
10 |
mcp_client = MCPClient(
|
11 |
-
{"url":
|
12 |
)
|
13 |
tools = mcp_client.get_tools()
|
|
|
14 |
|
15 |
# model = InferenceClientModel(token=os.getenv("HUGGINGFACE_API_TOKEN"))
|
|
|
|
|
|
|
16 |
model = LiteLLMModel(
|
17 |
-
model_id="gemini/gemini-
|
18 |
temperature=0.2,
|
19 |
-
api_key=
|
20 |
)
|
|
|
|
|
21 |
agent = CodeAgent(tools=[*tools], model=model)
|
22 |
|
|
|
23 |
demo = gr.ChatInterface(
|
24 |
fn=lambda message, history: str(agent.run(message)),
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
|
|
|
|
|
|
|
|
29 |
)
|
30 |
|
31 |
demo.launch()
|
|
|
32 |
finally:
|
33 |
-
|
|
|
|
|
|
|
|
5 |
from smolagents import InferenceClientModel, CodeAgent, ToolCollection, MCPClient
|
6 |
from smolagents import LiteLLMModel
|
7 |
|
8 |
+
# --- Configuration ---
|
9 |
+
# Ensure you have your GOOGLE_API_KEY set as an environment variable
|
10 |
+
# You can get one from Google AI Studio: https://aistudio.google.com/app/apikey
|
11 |
+
API_KEY = os.getenv("GOOGLE_API_KEY")
|
12 |
|
13 |
+
# This is the public URL of the MCP server we built.
|
14 |
+
# It's derived from your Space name: https://huggingface.co/spaces/Agents-MCP-Hackathon/HuggingFaceDoc
|
15 |
+
MCP_SERVER_URL = "https://agents-mcp-hackathon-huggingfacedoc.hf.space/gradio_api/mcp/sse"
|
16 |
+
|
17 |
+
if not API_KEY:
|
18 |
+
raise ValueError("GOOGLE_API_KEY environment variable not set. Please set your API key to run this app.")
|
19 |
+
|
20 |
+
# --- Main Application ---
|
21 |
try:
|
22 |
+
print(f"π Connecting to MCP Server: {MCP_SERVER_URL}")
|
23 |
mcp_client = MCPClient(
|
24 |
+
{"url": MCP_SERVER_URL}
|
25 |
)
|
26 |
tools = mcp_client.get_tools()
|
27 |
+
print(f"β
Successfully connected. Found {len(tools)} tools.")
|
28 |
|
29 |
# model = InferenceClientModel(token=os.getenv("HUGGINGFACE_API_TOKEN"))
|
30 |
+
|
31 |
+
|
32 |
+
# We use LiteLLM to connect to the Gemini API
|
33 |
model = LiteLLMModel(
|
34 |
+
model_id="gemini/gemini-1.5-flash",
|
35 |
temperature=0.2,
|
36 |
+
api_key=API_KEY
|
37 |
)
|
38 |
+
|
39 |
+
# The CodeAgent is effective at using tools
|
40 |
agent = CodeAgent(tools=[*tools], model=model)
|
41 |
|
42 |
+
# Create the Gradio ChatInterface
|
43 |
demo = gr.ChatInterface(
|
44 |
fn=lambda message, history: str(agent.run(message)),
|
45 |
+
title="π Hugging Face Research Agent",
|
46 |
+
description="This agent uses the Hugging Face Information Server to answer questions about models, datasets, and documentation.",
|
47 |
+
examples=[
|
48 |
+
"What is a Hugging Face pipeline?",
|
49 |
+
"Find 3 popular models for text classification",
|
50 |
+
"Get the info for the 'squad' dataset",
|
51 |
+
"What is PEFT?"
|
52 |
+
],
|
53 |
)
|
54 |
|
55 |
demo.launch()
|
56 |
+
|
57 |
finally:
|
58 |
+
# Ensure the connection is closed when the app stops
|
59 |
+
if 'mcp_client' in locals() and mcp_client.is_connected:
|
60 |
+
print("π Disconnecting from MCP Server...")
|
61 |
+
mcp_client.disconnect()
|