text2img / app.py
sbicy's picture
Update app.py
b9a6225 verified
raw
history blame
1.57 kB
import gradio as gr
from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler
import torch
def load_model():
# Specify the Stable Diffusion pipeline with an appropriate model type
pipeline = StableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1",
torch_dtype=torch.float16,
revision="fp16",
safety_checker=None # Disable safety checker if necessary
)
# Set the scheduler (optional but recommended)
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
# Move pipeline to GPU or ZeroGPU
pipeline = pipeline.to("cuda") # or ZeroGPU-specific setup
return pipeline
# Initialize the model
try:
model = load_model()
except Exception as e:
print(f"Error loading the model: {e}")
# Define Gradio interface
def generate(prompt, guidance_scale=7.5, num_inference_steps=50):
# Generate the image
images = model(prompt, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images
return images[0]
# Gradio Interface
with gr.Blocks() as demo:
with gr.Row():
prompt = gr.Textbox(label="Enter your prompt")
guidance_scale = gr.Slider(1.0, 10.0, value=7.5, label="Guidance Scale")
steps = gr.Slider(10, 100, value=50, label="Number of Inference Steps")
with gr.Row():
submit = gr.Button("Generate")
with gr.Row():
output = gr.Image()
submit.click(generate, inputs=[prompt, guidance_scale, steps], outputs=output)
demo.launch()