freddyaboulton HF staff commited on
Commit
262a4e1
·
verified ·
1 Parent(s): f56b566

Upload folder using huggingface_hub

Browse files
Files changed (4) hide show
  1. README.md +1 -1
  2. requirements.txt +1 -1
  3. run.ipynb +1 -1
  4. run.py +33 -17
README.md CHANGED
@@ -5,7 +5,7 @@ emoji: 🔥
5
  colorFrom: indigo
6
  colorTo: indigo
7
  sdk: gradio
8
- sdk_version: 5.9.1
9
  app_file: run.py
10
  pinned: false
11
  hf_oauth: true
 
5
  colorFrom: indigo
6
  colorTo: indigo
7
  sdk: gradio
8
+ sdk_version: 5.10.0
9
  app_file: run.py
10
  pinned: false
11
  hf_oauth: true
requirements.txt CHANGED
@@ -1 +1 @@
1
- git+https://github.com/huggingface/transformers.git#egg=transformers[agents]
 
1
+ transformers>=4.47.0
run.ipynb CHANGED
@@ -1 +1 @@
1
- {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: agent_chatbot"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio git+https://github.com/huggingface/transformers.git#egg=transformers[agents]"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/agent_chatbot/utils.py"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from gradio import ChatMessage\n", "from transformers import load_tool, ReactCodeAgent, HfEngine # type: ignore\n", "from utils import stream_from_transformers_agent\n", "\n", "# Import tool from Hub\n", "image_generation_tool = load_tool(\"m-ric/text-to-image\")\n", "\n", "llm_engine = HfEngine(\"meta-llama/Meta-Llama-3-70B-Instruct\")\n", "# Initialize the agent with both tools\n", "agent = ReactCodeAgent(tools=[image_generation_tool], llm_engine=llm_engine)\n", "\n", "def interact_with_agent(prompt, messages):\n", " messages.append(ChatMessage(role=\"user\", content=prompt))\n", " yield messages\n", " for msg in stream_from_transformers_agent(agent, prompt):\n", " messages.append(msg)\n", " yield messages\n", " yield messages\n", "\n", "with gr.Blocks() as demo:\n", " stored_message = gr.State([])\n", " chatbot = gr.Chatbot(label=\"Agent\",\n", " type=\"messages\",\n", " avatar_images=(None, \"https://em-content.zobj.net/source/twitter/53/robot-face_1f916.png\"))\n", " text_input = gr.Textbox(lines=1, label=\"Chat Message\")\n", " text_input.submit(lambda s: (s, \"\"), [text_input], [stored_message, text_input]).then(interact_with_agent, [stored_message, chatbot], [chatbot])\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
 
1
+ {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: agent_chatbot"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio transformers>=4.47.0"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from dataclasses import asdict\n", "from transformers import Tool, ReactCodeAgent # type: ignore\n", "from transformers.agents import stream_to_gradio, HfApiEngine # type: ignore\n", "\n", "# Import tool from Hub\n", "image_generation_tool = Tool.from_space( # type: ignore\n", " space_id=\"black-forest-labs/FLUX.1-schnell\",\n", " name=\"image_generator\",\n", " description=\"Generates an image following your prompt. Returns a PIL Image.\",\n", " api_name=\"/infer\",\n", ")\n", "\n", "llm_engine = HfApiEngine(\"Qwen/Qwen2.5-Coder-32B-Instruct\")\n", "# Initialize the agent with both tools and engine\n", "agent = ReactCodeAgent(tools=[image_generation_tool], llm_engine=llm_engine)\n", "\n", "\n", "def interact_with_agent(prompt, history):\n", " messages = []\n", " yield messages\n", " for msg in stream_to_gradio(agent, prompt):\n", " messages.append(asdict(msg)) # type: ignore\n", " yield messages\n", " yield messages\n", "\n", "\n", "demo = gr.ChatInterface(\n", " interact_with_agent,\n", " chatbot= gr.Chatbot(\n", " label=\"Agent\",\n", " type=\"messages\",\n", " avatar_images=(\n", " None,\n", " \"https://em-content.zobj.net/source/twitter/53/robot-face_1f916.png\",\n", " ),\n", " ),\n", " examples=[\n", " [\"Generate an image of an astronaut riding an alligator\"],\n", " [\"I am writing a children's book for my daughter. Can you help me with some illustrations?\"],\n", " ],\n", " type=\"messages\",\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
run.py CHANGED
@@ -1,30 +1,46 @@
1
  import gradio as gr
2
- from gradio import ChatMessage
3
- from transformers import load_tool, ReactCodeAgent, HfEngine # type: ignore
4
- from utils import stream_from_transformers_agent
5
 
6
  # Import tool from Hub
7
- image_generation_tool = load_tool("m-ric/text-to-image")
 
 
 
 
 
8
 
9
- llm_engine = HfEngine("meta-llama/Meta-Llama-3-70B-Instruct")
10
- # Initialize the agent with both tools
11
  agent = ReactCodeAgent(tools=[image_generation_tool], llm_engine=llm_engine)
12
 
13
- def interact_with_agent(prompt, messages):
14
- messages.append(ChatMessage(role="user", content=prompt))
 
15
  yield messages
16
- for msg in stream_from_transformers_agent(agent, prompt):
17
- messages.append(msg)
18
  yield messages
19
  yield messages
20
 
21
- with gr.Blocks() as demo:
22
- stored_message = gr.State([])
23
- chatbot = gr.Chatbot(label="Agent",
24
- type="messages",
25
- avatar_images=(None, "https://em-content.zobj.net/source/twitter/53/robot-face_1f916.png"))
26
- text_input = gr.Textbox(lines=1, label="Chat Message")
27
- text_input.submit(lambda s: (s, ""), [text_input], [stored_message, text_input]).then(interact_with_agent, [stored_message, chatbot], [chatbot])
 
 
 
 
 
 
 
 
 
 
28
 
29
  if __name__ == "__main__":
30
  demo.launch()
 
1
  import gradio as gr
2
+ from dataclasses import asdict
3
+ from transformers import Tool, ReactCodeAgent # type: ignore
4
+ from transformers.agents import stream_to_gradio, HfApiEngine # type: ignore
5
 
6
  # Import tool from Hub
7
+ image_generation_tool = Tool.from_space( # type: ignore
8
+ space_id="black-forest-labs/FLUX.1-schnell",
9
+ name="image_generator",
10
+ description="Generates an image following your prompt. Returns a PIL Image.",
11
+ api_name="/infer",
12
+ )
13
 
14
+ llm_engine = HfApiEngine("Qwen/Qwen2.5-Coder-32B-Instruct")
15
+ # Initialize the agent with both tools and engine
16
  agent = ReactCodeAgent(tools=[image_generation_tool], llm_engine=llm_engine)
17
 
18
+
19
+ def interact_with_agent(prompt, history):
20
+ messages = []
21
  yield messages
22
+ for msg in stream_to_gradio(agent, prompt):
23
+ messages.append(asdict(msg)) # type: ignore
24
  yield messages
25
  yield messages
26
 
27
+
28
+ demo = gr.ChatInterface(
29
+ interact_with_agent,
30
+ chatbot= gr.Chatbot(
31
+ label="Agent",
32
+ type="messages",
33
+ avatar_images=(
34
+ None,
35
+ "https://em-content.zobj.net/source/twitter/53/robot-face_1f916.png",
36
+ ),
37
+ ),
38
+ examples=[
39
+ ["Generate an image of an astronaut riding an alligator"],
40
+ ["I am writing a children's book for my daughter. Can you help me with some illustrations?"],
41
+ ],
42
+ type="messages",
43
+ )
44
 
45
  if __name__ == "__main__":
46
  demo.launch()