sdxl-turbo / app.py
TogetherAI's picture
Update app.py
a4a0991
import gradio as gr
from diffusers import AutoPipelineForText2Image, AutoPipelineForImage2Image
import torch
# Überprüfe, welches Gerät verfügbar ist
if torch.cuda.is_available():
device = "cuda"
elif torch.backends.mps.is_available():
device = "mps"
else:
device = "cpu"
# Erstelle die Pipelines
pipes = {
"txt2img": AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16").to(device),
"img2img": AutoPipelineForImage2Image.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16").to(device)
}
# Aktiviere CPU-Offloading bei Bedarf
if device == "cpu":
pipes["txt2img"].enable_model_cpu_offload()
pipes["img2img"].enable_model_cpu_offload()
# Definiere die Run-Funktion
def run(prompt, image):
print(f"prompt={prompt}, image={image}")
if image is None:
return pipes["txt2img"](prompt=prompt, num_inference_steps=1, guidance_scale=0.0).images[0]
else:
image = image.resize((512,512))
print(f"img2img image={image}")
return pipes["img2img"](prompt, image=image, num_inference_steps=2, strength=0.5, guidance_scale=0.0).images[0]
# Erstelle die Gradio-Schnittstelle
demo = gr.Interface(
run,
inputs=[
gr.Textbox(label="Prompt"),
gr.Image(type="pil")
],
outputs=gr.Image(width=512, height=512),
live=True,
theme="ParityError/Interstellar"
)
# Starte die Anwendung
demo.launch()