Raushan-123's picture
Create app.py
297632a verified
raw
history blame
1.43 kB
import gradio as gr
import torch
from transformers import DiffusionModel, DiffusionImageProcessor, AutoTokenizer
from threading import Thread
print("Starting to load the model to memory")
# Load the diffusion model and image processor
model = DiffusionModel.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0")
processor = DiffusionImageProcessor.from_model(model)
tokenizer = AutoTokenizer.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0")
print("Successfully loaded the model to memory")
def generate_image(text):
# Generate an image from the given text prompt
inputs = tokenizer(text, return_tensors="pt")
# Run generation on GPU if available
inputs.to(torch.device("cuda" if torch.cuda.is_available() else "cpu"))
# Run diffusion model for image generation
with torch.no_grad():
result = processor.generate(**inputs)
# Return the generated image
return result[0]
# Define a function to handle user input and generate images
def image_generator(text):
generated_image = generate_image(text)
return generated_image
# Create a Gradio interface for the image generation
interface = gr.Interface(
fn=image_generator,
inputs="text",
outputs="image",
title="Image Generation from Text",
description="Enter a text prompt to generate an image.",
examples=["a cat sitting on a couch"]
)
# Launch the interface
interface.launch(share=True)