AMfeta99's picture
Update app.py
5a0dc88 verified
from transformers import load_tool, ReactCodeAgent, HfApiEngine
from PIL import Image, ImageDraw, ImageFont
import tempfile
import gradio as gr
#%% Methods
# Function to add a label to an image
def add_label_to_image(image, label):
# Create a drawing context
draw = ImageDraw.Draw(image)
# Define font size and color (adjust font path for your environment)
font_path = "/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf" # Example font path
font_size = 30 # Larger font size for better visibility
try:
font = ImageFont.truetype(font_path, font_size)
except:
font = ImageFont.load_default()
# Calculate the size and position of the text (aligned to the left)
text_bbox = draw.textbbox((0, 0), label, font=font)
text_width, text_height = text_bbox[2] - text_bbox[0], text_bbox[3] - text_bbox[1]
position = (image.width - text_width - 20, image.height - text_height - 20)# right-aligned with margin
# Add a semi-transparent rectangle behind the text for better visibility
rect_margin = 10
rect_position = [
position[0] - rect_margin,
position[1] - rect_margin,
position[0] + text_width + rect_margin,
position[1] + text_height + rect_margin,
]
draw.rectangle(rect_position, fill=(0, 0, 0, 128)) # Semi-transparent black
draw.text(position, label, fill="white", font=font)
return image
# Function to plot, label, and save an image
def plot_and_save_agent_image(agent_image, label, save_path=None):
# Convert AgentImage to a raw PIL Image
pil_image = agent_image.to_raw()
# Add a label to the image
labeled_image = add_label_to_image(pil_image, label)
# Plot the image using PIL's show method
labeled_image.show()
# If save_path is provided, save the image
if save_path:
labeled_image.save(save_path)
print(f"Image saved to {save_path}")
else:
print("No save path provided. Image not saved.")
# Function to generate prompts for an object
def generate_prompts_for_object(object_name):
prompts = {
"past": f"Show an old version of a {object_name} from its early days.",
"present": f"Show a {object_name} with current features/design/technology.",
"future": f"Show a futuristic version of a {object_name}, by predicting advanced features and futuristic design."
}
return prompts
# Function to generate the object's history images and GIF
def generate_object_history(object_name):
images = []
# Get prompts for the object
prompts = generate_prompts_for_object(object_name)
labels = {
"past": f"{object_name} - Past",
"present": f"{object_name} - Present",
"future": f"{object_name} - Future"
}
# Generate sequential images and display them
for time_period, frame in prompts.items():
print(f"Generating {time_period} frame: {frame}")
result = agent.run(frame) # The tool generates the image
# Append the image to the list for GIF creation
images.append(result.to_raw()) # Ensure we're using raw image for GIF
# Save each image with the appropriate name and label
image_filename = f"{object_name}_{time_period}.png"
plot_and_save_agent_image(result, labels[time_period], save_path=image_filename)
# Create GIF from images
gif_path = f"{object_name}_evolution.gif"
images[0].save(
gif_path,
save_all=True,
append_images=images[1:],
duration=1000, # Duration in milliseconds for each frame
loop=0 # Infinite loop
)
# Return images and GIF path
return images, gif_path
#%% Initialization of tools and AI_Agent
# Import text-to-image tool from Hub
image_generation_tool = load_tool("m-ric/text-to-image", cache=False)
# Import search tool from LangChain
from transformers.agents.search import DuckDuckGoSearchTool
search_tool = DuckDuckGoSearchTool()
# Load the LLM engine
llm_engine = HfApiEngine("Qwen/Qwen2.5-72B-Instruct")
# Initialize the agent with both tools
agent = ReactCodeAgent(tools=[image_generation_tool, search_tool], llm_engine=llm_engine)
# Gradio interface
def create_gradio_interface():
with gr.Blocks() as demo:
gr.Markdown("# TimeMetamorphy: an object Evolution Generator")
# Add a section for instructions
gr.Markdown("""
## Unlocking the secrets of time!
This app unveils these mysteries by offering a unique/magic lens that allows us "time travel".
Powered by AI agents equipped with cutting-edge tools, it provides the superpower to explore the past, witness the present, and dream up the future like never before.
This system allows you to generate visualizations of how an object/concept, like a bicycle or a car, may have evolved over time.
It generates images of the object in the past, present, and future based on your input.
### Default Example: Evolution of a Car
Below, you can see a precomputed example of a "car" evolution. Enter another object to generate its evolution.
""")
# Paths to the precomputed files
default_images = [
("car_past.png", "Car - Past"),
("car_present.png", "Car - Present"),
("car_future.png", "Car - Future")
]
default_gif_path = "car_evolution.gif"
with gr.Row():
with gr.Column():
# Textbox for user to input an object name
object_name_input = gr.Textbox(label="Enter an object name (e.g., bicycle, phone)",
placeholder="Enter an object name",
lines=1)
# Button to trigger the generation of images and GIF
generate_button = gr.Button("Generate Evolution")
# Gradio Gallery component to display the images
image_gallery = gr.Gallery(label="Generated Images", show_label=True, columns=3, rows=1, value=default_images)
# Output for the generated GIF
gif_output = gr.Image(label="Generated GIF", show_label=True, value=default_gif_path)
# Set the action when the button is clicked
generate_button.click(fn=generate_object_history, inputs=[object_name_input], outputs=[image_gallery, gif_output])
return demo
# Launch the Gradio app
demo = create_gradio_interface()
demo.launch(share=True)