Spaces:
Running
Running
Upload folder using huggingface_hub
Browse files
README.md
CHANGED
@@ -5,7 +5,7 @@ emoji: 🔥
|
|
5 |
colorFrom: indigo
|
6 |
colorTo: indigo
|
7 |
sdk: gradio
|
8 |
-
sdk_version: 5.
|
9 |
app_file: run.py
|
10 |
pinned: false
|
11 |
hf_oauth: true
|
|
|
5 |
colorFrom: indigo
|
6 |
colorTo: indigo
|
7 |
sdk: gradio
|
8 |
+
sdk_version: 5.10.0
|
9 |
app_file: run.py
|
10 |
pinned: false
|
11 |
hf_oauth: true
|
requirements.txt
CHANGED
@@ -1 +1 @@
|
|
1 |
-
|
|
|
1 |
+
transformers>=4.47.0
|
run.ipynb
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: agent_chatbot"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio
|
|
|
1 |
+
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: agent_chatbot"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio transformers>=4.47.0"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from dataclasses import asdict\n", "from transformers import Tool, ReactCodeAgent # type: ignore\n", "from transformers.agents import stream_to_gradio, HfApiEngine # type: ignore\n", "\n", "# Import tool from Hub\n", "image_generation_tool = Tool.from_space( # type: ignore\n", " space_id=\"black-forest-labs/FLUX.1-schnell\",\n", " name=\"image_generator\",\n", " description=\"Generates an image following your prompt. Returns a PIL Image.\",\n", " api_name=\"/infer\",\n", ")\n", "\n", "llm_engine = HfApiEngine(\"Qwen/Qwen2.5-Coder-32B-Instruct\")\n", "# Initialize the agent with both tools and engine\n", "agent = ReactCodeAgent(tools=[image_generation_tool], llm_engine=llm_engine)\n", "\n", "\n", "def interact_with_agent(prompt, history):\n", " messages = []\n", " yield messages\n", " for msg in stream_to_gradio(agent, prompt):\n", " messages.append(asdict(msg)) # type: ignore\n", " yield messages\n", " yield messages\n", "\n", "\n", "demo = gr.ChatInterface(\n", " interact_with_agent,\n", " chatbot= gr.Chatbot(\n", " label=\"Agent\",\n", " type=\"messages\",\n", " avatar_images=(\n", " None,\n", " \"https://em-content.zobj.net/source/twitter/53/robot-face_1f916.png\",\n", " ),\n", " ),\n", " examples=[\n", " [\"Generate an image of an astronaut riding an alligator\"],\n", " [\"I am writing a children's book for my daughter. Can you help me with some illustrations?\"],\n", " ],\n", " type=\"messages\",\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
run.py
CHANGED
@@ -1,30 +1,46 @@
|
|
1 |
import gradio as gr
|
2 |
-
from
|
3 |
-
from transformers import
|
4 |
-
from
|
5 |
|
6 |
# Import tool from Hub
|
7 |
-
image_generation_tool =
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
-
llm_engine =
|
10 |
-
# Initialize the agent with both tools
|
11 |
agent = ReactCodeAgent(tools=[image_generation_tool], llm_engine=llm_engine)
|
12 |
|
13 |
-
|
14 |
-
|
|
|
15 |
yield messages
|
16 |
-
for msg in
|
17 |
-
messages.append(msg)
|
18 |
yield messages
|
19 |
yield messages
|
20 |
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
|
29 |
if __name__ == "__main__":
|
30 |
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
+
from dataclasses import asdict
|
3 |
+
from transformers import Tool, ReactCodeAgent # type: ignore
|
4 |
+
from transformers.agents import stream_to_gradio, HfApiEngine # type: ignore
|
5 |
|
6 |
# Import tool from Hub
|
7 |
+
image_generation_tool = Tool.from_space( # type: ignore
|
8 |
+
space_id="black-forest-labs/FLUX.1-schnell",
|
9 |
+
name="image_generator",
|
10 |
+
description="Generates an image following your prompt. Returns a PIL Image.",
|
11 |
+
api_name="/infer",
|
12 |
+
)
|
13 |
|
14 |
+
llm_engine = HfApiEngine("Qwen/Qwen2.5-Coder-32B-Instruct")
|
15 |
+
# Initialize the agent with both tools and engine
|
16 |
agent = ReactCodeAgent(tools=[image_generation_tool], llm_engine=llm_engine)
|
17 |
|
18 |
+
|
19 |
+
def interact_with_agent(prompt, history):
|
20 |
+
messages = []
|
21 |
yield messages
|
22 |
+
for msg in stream_to_gradio(agent, prompt):
|
23 |
+
messages.append(asdict(msg)) # type: ignore
|
24 |
yield messages
|
25 |
yield messages
|
26 |
|
27 |
+
|
28 |
+
demo = gr.ChatInterface(
|
29 |
+
interact_with_agent,
|
30 |
+
chatbot= gr.Chatbot(
|
31 |
+
label="Agent",
|
32 |
+
type="messages",
|
33 |
+
avatar_images=(
|
34 |
+
None,
|
35 |
+
"https://em-content.zobj.net/source/twitter/53/robot-face_1f916.png",
|
36 |
+
),
|
37 |
+
),
|
38 |
+
examples=[
|
39 |
+
["Generate an image of an astronaut riding an alligator"],
|
40 |
+
["I am writing a children's book for my daughter. Can you help me with some illustrations?"],
|
41 |
+
],
|
42 |
+
type="messages",
|
43 |
+
)
|
44 |
|
45 |
if __name__ == "__main__":
|
46 |
demo.launch()
|