image_generator / app.py
trashchenkov's picture
Update app.py
ae4daf1 verified
raw
history blame
4.09 kB
import gradio as gr
import numpy as np
import random
from diffusers import DiffusionPipeline
import torch
device = "cuda" if torch.cuda.is_available() else "cpu"
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 1024
def load_pipeline(model_id):
if torch.cuda.is_available():
torch_dtype = torch.float16
else:
torch_dtype = torch.float32
pipe = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch_dtype)
return pipe.to(device)
# Initialize with default model
pipe = load_pipeline("CompVis/stable-diffusion-v1-4")
available_models = [
"CompVis/stable-diffusion-v1-4",
"runwayml/stable-diffusion-v1-5",
"stabilityai/stable-diffusion-2-1",
"prompthero/openjourney",
]
def infer(
model_id,
prompt,
negative_prompt,
seed,
randomize_seed,
width,
height,
guidance_scale,
num_inference_steps,
progress=None,
):
global pipe
if model_id:
pipe = load_pipeline(model_id)
if randomize_seed:
seed = random.randint(0, MAX_SEED)
# Ensure width and height are divisible by 8
width = max(256, (width // 8) * 8)
height = max(256, (height // 8) * 8)
# Set default value if num_inference_steps is None
if num_inference_steps is None:
num_inference_steps = 20
generator = torch.Generator().manual_seed(seed)
image = pipe(
prompt=prompt,
negative_prompt=negative_prompt,
guidance_scale=guidance_scale,
num_inference_steps=int(num_inference_steps), # Ensure it's an integer
width=width,
height=height,
generator=generator,
).images[0]
return image, seed
examples = [
"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
"An astronaut riding a green horse",
"A delicious ceviche cheesecake slice",
]
css = """
#col-container {
margin: 0 auto;
max-width: 640px;
}
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown(" # Text-to-Image Gradio Template with Model Selection")
model_id = gr.Dropdown(
label="Model Selection",
choices=available_models,
value="CompVis/stable-diffusion-v1-4",
)
prompt = gr.Text(
label="Prompt",
show_label=True,
max_lines=1,
placeholder="Enter your prompt",
)
negative_prompt = gr.Text(
label="Negative prompt",
max_lines=1,
placeholder="Enter a negative prompt",
)
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=42,
)
guidance_scale = gr.Slider(
label="Guidance scale",
minimum=0.0,
maximum=20.0,
step=0.1,
value=7.0,
)
num_inference_steps = gr.Slider(
label="Number of inference steps",
minimum=1,
maximum=100,
step=1,
value=20,
)
with gr.Row():
width = gr.Slider(
label="Width",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=8,
value=512,
)
height = gr.Slider(
label="Height",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=8,
value=512,
)
run_button = gr.Button("Run", scale=0, variant="primary")
result = gr.Image(label="Result", show_label=False)
gr.Examples(examples=examples, inputs=[prompt])
gr.on(
triggers=[run_button.click, prompt.submit],
fn=infer,
inputs=[
model_id,
prompt,
negative_prompt,
seed,
guidance_scale,
num_inference_steps,
width,
height,
],
outputs=[result, seed],
)
if __name__ == "__main__":
demo.launch()