Spaces:
Sleeping
Sleeping
File size: 1,478 Bytes
70546b5 a4a0991 70546b5 a4a0991 70546b5 a4a0991 70546b5 a4a0991 70546b5 a4a0991 70546b5 a4a0991 70546b5 a4a0991 e38f741 a4a0991 70546b5 a4a0991 70546b5 a4a0991 70546b5 a4a0991 70546b5 a4a0991 70546b5 a4a0991 70546b5 a4a0991 70546b5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
import gradio as gr
from diffusers import AutoPipelineForText2Image, AutoPipelineForImage2Image
import torch
# Überprüfe, welches Gerät verfügbar ist
if torch.cuda.is_available():
device = "cuda"
elif torch.backends.mps.is_available():
device = "mps"
else:
device = "cpu"
# Erstelle die Pipelines
pipes = {
"txt2img": AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16").to(device),
"img2img": AutoPipelineForImage2Image.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16").to(device)
}
# Aktiviere CPU-Offloading bei Bedarf
if device == "cpu":
pipes["txt2img"].enable_model_cpu_offload()
pipes["img2img"].enable_model_cpu_offload()
# Definiere die Run-Funktion
def run(prompt, image):
print(f"prompt={prompt}, image={image}")
if image is None:
return pipes["txt2img"](prompt=prompt, num_inference_steps=1, guidance_scale=0.0).images[0]
else:
image = image.resize((512,512))
print(f"img2img image={image}")
return pipes["img2img"](prompt, image=image, num_inference_steps=2, strength=0.5, guidance_scale=0.0).images[0]
# Erstelle die Gradio-Schnittstelle
demo = gr.Interface(
run,
inputs=[
gr.Textbox(label="Prompt"),
gr.Image(type="pil")
],
outputs=gr.Image(width=512, height=512),
live=True,
theme="ParityError/Interstellar"
)
# Starte die Anwendung
demo.launch()
|