Spaces:
Sleeping
Sleeping
File size: 1,770 Bytes
9448c74 a63e5fb cb18fce 9448c74 d0e188e a63e5fb cb18fce d0e188e cb18fce a63e5fb d0e188e a63e5fb cb18fce 9b1f863 a63e5fb cb18fce d0e188e 9b1f863 d0e188e 9b1f863 d0e188e 9b1f863 a63e5fb d0e188e a63e5fb d0e188e a63e5fb d0e188e a63e5fb d0e188e 9b1f863 a63e5fb d0e188e a63e5fb cb18fce |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 |
import gradio as gr
from PIL import Image
import torch
from diffusers import StableDiffusionPipeline
# Load the diffusion pipeline from Hugging Face
model_name = "Yaquv/rickthenpc"
device = "cuda" if torch.cuda.is_available() else "cpu"
try:
pipe = StableDiffusionPipeline.from_pretrained(model_name)
pipe = pipe.to(device)
except Exception as e:
print(f"Error loading the model: {e}")
pipe = None
def generate_image(prompt):
"""
Generates an image from the given prompt using the Hugging Face model.
"""
if pipe is None:
raise ValueError("The model couldn't be loaded.")
try:
# Generate the image
result = pipe(prompt)
# Check that the result contains images
if not hasattr(result, 'images') or len(result.images) == 0:
raise ValueError("The model couldn't generate an image.")
image = result.images[0]
# Ensure the image is in PIL.Image format and convert to RGB
if not isinstance(image, Image.Image):
image = Image.fromarray(image)
image = image.convert("RGB")
return image
except Exception as e:
# Raise an exception for Gradio to handle
raise ValueError(f"Error during image generation: {str(e)}")
# Define the Gradio Interface
iface = gr.Interface(
fn=generate_image,
inputs=gr.Textbox(
label="Prompt",
lines=2,
placeholder="Enter your prompt here..."
),
outputs=gr.Image(
label="Generated Image",
type="pil" # Ensure the output is a PIL Image
),
title="Rick Generator",
description="Enter a prompt to generate an image with the Rick Generator model."
)
# Launch the Gradio app
if __name__ == "__main__":
iface.launch() |