Spaces:
Runtime error
Runtime error
Dacho688
commited on
Add files via upload
Browse files- ImgageGenerator/README.md.txt +14 -0
- ImgageGenerator/app.py +85 -0
- ImgageGenerator/requirements.txt +5 -0
- ImgageGenerator/streaming.py +32 -0
ImgageGenerator/README.md.txt
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Image Generator
|
| 3 |
+
emoji: 🤔📊
|
| 4 |
+
colorFrom: yellow
|
| 5 |
+
colorTo: red
|
| 6 |
+
sdk: gradio
|
| 7 |
+
sdk_version: 4.38.1
|
| 8 |
+
app_file: app.py
|
| 9 |
+
pinned: false
|
| 10 |
+
license: apache-2.0
|
| 11 |
+
short_description: Image generator agent.
|
| 12 |
+
---
|
| 13 |
+
|
| 14 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
ImgageGenerator/app.py
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
"""
|
| 3 |
+
Created on Thu Aug 1 19:04:11 2024
|
| 4 |
+
|
| 5 |
+
@author: rkram
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import os
|
| 9 |
+
import gradio as gr
|
| 10 |
+
from transformers import HfEngine, Tool,CodeAgent,load_tool
|
| 11 |
+
from gradio_tools import StableDiffusionPromptGeneratorTool
|
| 12 |
+
from streaming import stream_to_gradio
|
| 13 |
+
from huggingface_hub import login
|
| 14 |
+
|
| 15 |
+
# turn caching off
|
| 16 |
+
#client.headers["x-use-cache"] = "0"
|
| 17 |
+
|
| 18 |
+
#login
|
| 19 |
+
login(os.getenv("HUGGINGFACEHUB_API_TOKEN"))
|
| 20 |
+
#define llm engine
|
| 21 |
+
llm_engine = HfEngine("meta-llama/Meta-Llama-3.1-70B-Instruct")
|
| 22 |
+
#load tools
|
| 23 |
+
image_gen_tool = load_tool("huggingface-tools/text-to-image")
|
| 24 |
+
gradio_pg_tool = StableDiffusionPromptGeneratorTool()
|
| 25 |
+
pg_tool = Tool.from_gradio(gradio_pg_tool)
|
| 26 |
+
#create agent
|
| 27 |
+
agent = CodeAgent(
|
| 28 |
+
tools=[pg_tool,image_gen_tool],
|
| 29 |
+
llm_engine=llm_engine,
|
| 30 |
+
additional_authorized_imports=[],
|
| 31 |
+
max_iterations=10,
|
| 32 |
+
)
|
| 33 |
+
#base prompt
|
| 34 |
+
base_prompt = """Improve the following prompt and generate an image.
|
| 35 |
+
Prompt:"""
|
| 36 |
+
#Main function to interact with streaming
|
| 37 |
+
def interact_with_agent(add_prompt):
|
| 38 |
+
prompt = base_prompt
|
| 39 |
+
if add_prompt and len(add_prompt) > 0:
|
| 40 |
+
prompt += add_prompt
|
| 41 |
+
else:
|
| 42 |
+
prompt="There is no prompt made. Reply exactly with:'***ERROR: Please input a prompt.***'"
|
| 43 |
+
|
| 44 |
+
messages = [gr.ChatMessage(role="assistant", content="⏳ _Generating image..._")]
|
| 45 |
+
yield messages
|
| 46 |
+
|
| 47 |
+
for msg in stream_to_gradio(agent, prompt):
|
| 48 |
+
messages.append(msg)
|
| 49 |
+
yield messages + [
|
| 50 |
+
gr.ChatMessage(role="assistant", content="⏳ _Still processing..._")
|
| 51 |
+
]
|
| 52 |
+
yield messages
|
| 53 |
+
|
| 54 |
+
#Gradio blocks and markdowns
|
| 55 |
+
with gr.Blocks(
|
| 56 |
+
theme=gr.themes.Soft(
|
| 57 |
+
primary_hue=gr.themes.colors.blue,
|
| 58 |
+
secondary_hue=gr.themes.colors.yellow,
|
| 59 |
+
)
|
| 60 |
+
) as demo:
|
| 61 |
+
gr.Markdown("""# Image Generator""")
|
| 62 |
+
chatbot = gr.Chatbot(
|
| 63 |
+
label="ImageBot",
|
| 64 |
+
type="messages",
|
| 65 |
+
avatar_images=(
|
| 66 |
+
None,
|
| 67 |
+
"https://em-content.zobj.net/source/twitter/53/robot-face_1f916.png"
|
| 68 |
+
),
|
| 69 |
+
)
|
| 70 |
+
text_input = gr.Textbox(
|
| 71 |
+
label="What image would you like to generate?"
|
| 72 |
+
)
|
| 73 |
+
submit = gr.Button("Run", variant="primary")
|
| 74 |
+
|
| 75 |
+
# gr.Examples(
|
| 76 |
+
# examples=[["./example/titanic.csv", example_notes]],
|
| 77 |
+
# inputs=[file_input, text_input],
|
| 78 |
+
# cache_examples=False,
|
| 79 |
+
# label='Click anywhere below to try this example.'
|
| 80 |
+
# )
|
| 81 |
+
|
| 82 |
+
submit.click(interact_with_agent, [text_input], [chatbot])
|
| 83 |
+
|
| 84 |
+
if __name__ == "__main__":
|
| 85 |
+
demo.launch()
|
ImgageGenerator/requirements.txt
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
git+https://github.com/huggingface/transformers.git#egg=transformers[agents]
|
| 2 |
+
matplotlib
|
| 3 |
+
seaborn
|
| 4 |
+
scikit-learn
|
| 5 |
+
scipy
|
ImgageGenerator/streaming.py
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from transformers.agents.agent_types import AgentAudio, AgentImage, AgentText, AgentType
|
| 2 |
+
from transformers.agents import ReactAgent,CodeAgent
|
| 3 |
+
import spaces
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
@spaces.GPU
|
| 7 |
+
def stream_to_gradio(agent: CodeAgent, task: str, **kwargs):
|
| 8 |
+
"""Runs an agent with the given task and streams the messages from the agent as gradio ChatMessages."""
|
| 9 |
+
|
| 10 |
+
try:
|
| 11 |
+
from gradio import ChatMessage
|
| 12 |
+
except ImportError:
|
| 13 |
+
raise ImportError("Gradio should be installed in order to launch a gradio demo.")
|
| 14 |
+
|
| 15 |
+
class Output:
|
| 16 |
+
output: AgentType | str = None
|
| 17 |
+
|
| 18 |
+
Output.output = agent.run(task,**kwargs)
|
| 19 |
+
if isinstance(Output.output, AgentText):
|
| 20 |
+
yield ChatMessage(role="assistant", content=f"{Output.output}")
|
| 21 |
+
elif isinstance(Output.output, AgentImage):
|
| 22 |
+
yield ChatMessage(
|
| 23 |
+
role="assistant",
|
| 24 |
+
content={"path": Output.output.to_string(), "mime_type": "image/png"},
|
| 25 |
+
)
|
| 26 |
+
elif isinstance(Output.output, AgentAudio):
|
| 27 |
+
yield ChatMessage(
|
| 28 |
+
role="assistant",
|
| 29 |
+
content={"path": Output.output.to_string(), "mime_type": "audio/wav"},
|
| 30 |
+
)
|
| 31 |
+
else:
|
| 32 |
+
yield ChatMessage(role="assistant", content=Output.output)
|