File size: 951 Bytes
b0d7d77
 
 
 
262d758
 
460a473
 
 
 
901b781
460a473
 
 
 
 
 
 
c584054
 
b0d7d77
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
import gradio as gr
from diffusers import StableDiffusionPipeline
import torch

from transformers import logging
logging.set_verbosity_error()  # This suppresses warnings, including cache migration
#GPU error
from diffusers import StableDiffusionPipeline

# Load the model
model_id = "MostafaAly/stable-diffusion-finetuned"
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)

# Check if CUDA is available and move the model to GPU if it is, otherwise use CPU
device = "cuda" if torch.cuda.is_available() else "cpu"
pipe.to(device)

print(f"Using device: {device}")


# Define the function for text-to-image generation
def generate_image(prompt):
    image = pipe(prompt).images[0]
    return image

# Create a Gradio interface
interface = gr.Interface(
    fn=generate_image,
    inputs=gr.Textbox(label="Enter your prompt"),
    outputs=gr.Image(label="Generated Image"),
)

# Launch the interface
interface.launch()