import gradio as gr from PIL import Image, ImageDraw, ImageFont import random # Placeholder function for image generation (replace with actual model inference if desired) def generate_image(text_description): # Create a blank image img = Image.new('RGB', (512, 512), color=(random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))) draw = ImageDraw.Draw(img) # Add text to the image (simulating a generated image based on description) try: font = ImageFont.truetype("arial.ttf", 40) except: font = ImageFont.load_default() # Wrap text description if too long text = text_description if len(text_description) < 30 else text_description[:27] + "..." draw.text((20, 20), f"Generated: {text}", font=font, fill=(255, 255, 255)) # Here you could integrate a real model like Stable Diffusion # Example (commented out): # from diffusers import StableDiffusionPipeline # pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") # img = pipe(text_description).images[0] return img # Gradio interface with gr.Blocks(title="Text-to-Image Generator") as demo: gr.Markdown("# Text-to-Image Generator") gr.Markdown("Enter a description below and generate an image!") with gr.Row(): with gr.Column(): text_input = gr.Textbox(label="Description", placeholder="Type your image description here...") generate_btn = gr.Button("Generate Image") with gr.Column(): output_image = gr.Image(label="Generated Image") # Connect the button to the function generate_btn.click(fn=generate_image, inputs=text_input, outputs=output_image) # Launch the app demo.launch()