Spaces:
Sleeping
Sleeping
import gradio as gr | |
from PIL import Image | |
import torch | |
from diffusers import StableDiffusionPipeline | |
# Load the diffusion pipeline from Hugging Face | |
model_name = "Yaquv/rickthenpc" | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
try: | |
pipe = StableDiffusionPipeline.from_pretrained(model_name) | |
pipe = pipe.to(device) | |
except Exception as e: | |
print(f"Error loading the model: {e}") | |
pipe = None | |
def generate_image(prompt): | |
""" | |
Generates an image from the given prompt using the Hugging Face model. | |
""" | |
if pipe is None: | |
raise ValueError("The model couldn't be loaded.") | |
try: | |
# Generate the image | |
result = pipe(prompt) | |
# Check that the result contains images | |
if not hasattr(result, 'images') or len(result.images) == 0: | |
raise ValueError("The model couldn't generate an image.") | |
image = result.images[0] | |
# Ensure the image is in PIL.Image format and convert to RGB | |
if not isinstance(image, Image.Image): | |
image = Image.fromarray(image) | |
image = image.convert("RGB") | |
return image | |
except Exception as e: | |
# Raise an exception for Gradio to handle | |
raise ValueError(f"Error during image generation: {str(e)}") | |
# Define the Gradio Interface | |
iface = gr.Interface( | |
fn=generate_image, | |
inputs=gr.Textbox( | |
label="Prompt", | |
lines=2, | |
placeholder="Enter your prompt here..." | |
), | |
outputs=gr.Image( | |
label="Generated Image", | |
type="pil" # Ensure the output is a PIL Image | |
), | |
title="Rick Generator", | |
description="Enter a prompt to generate an image with the Rick Generator model." | |
) | |
# Launch the Gradio app | |
if __name__ == "__main__": | |
iface.launch() |