TogetherAI commited on
Commit
a4a0991
·
1 Parent(s): 3c87bd8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -22
app.py CHANGED
@@ -1,43 +1,47 @@
1
  import gradio as gr
2
  from diffusers import AutoPipelineForText2Image, AutoPipelineForImage2Image
3
- from diffusers.utils import load_image
4
  import torch
5
 
 
6
  if torch.cuda.is_available():
7
- device = "cuda"
8
  elif torch.backends.mps.is_available():
9
- device = "mps"
10
  else:
11
- device = "cpu"
12
-
13
 
 
14
  pipes = {
15
- "txt2img": AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16").to(device),
16
- "img2img": AutoPipelineForImage2Image.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16").to(device)
17
  }
18
 
 
19
  if device == "cpu":
20
- pipes["txt2img"].enable_model_cpu_offload()
21
- pipes["img2img"].enable_model_cpu_offload()
22
-
23
 
 
24
  def run(prompt, image):
25
- print(f"prompt={prompt}, image={image}")
26
- if image is None:
27
- return pipes["txt2img"](prompt=prompt, num_inference_steps=1, guidance_scale=0.0).images[0]
28
- else:
29
- image = image.resize((512,512))
30
- print(f"img2img image={image}")
31
- return pipes["img2img"](prompt, image=image, num_inference_steps=2, strength=0.5, guidance_scale=0.0).images[0]
32
 
 
33
  demo = gr.Interface(
34
  run,
35
  inputs=[
36
- gr.Textbox(label="Prompt"),
37
- gr.Image(type="pil")
38
  ],
39
- outputs=gr.Image(width=512,height=512),
40
- live=True
 
41
  )
42
- #demo.dependencies[0]["show_progress"] = "minimal"
 
43
  demo.launch()
 
1
  import gradio as gr
2
  from diffusers import AutoPipelineForText2Image, AutoPipelineForImage2Image
 
3
  import torch
4
 
5
+ # Überprüfe, welches Gerät verfügbar ist
6
  if torch.cuda.is_available():
7
+ device = "cuda"
8
  elif torch.backends.mps.is_available():
9
+ device = "mps"
10
  else:
11
+ device = "cpu"
 
12
 
13
+ # Erstelle die Pipelines
14
  pipes = {
15
+ "txt2img": AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16").to(device),
16
+ "img2img": AutoPipelineForImage2Image.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16").to(device)
17
  }
18
 
19
+ # Aktiviere CPU-Offloading bei Bedarf
20
  if device == "cpu":
21
+ pipes["txt2img"].enable_model_cpu_offload()
22
+ pipes["img2img"].enable_model_cpu_offload()
 
23
 
24
+ # Definiere die Run-Funktion
25
  def run(prompt, image):
26
+ print(f"prompt={prompt}, image={image}")
27
+ if image is None:
28
+ return pipes["txt2img"](prompt=prompt, num_inference_steps=1, guidance_scale=0.0).images[0]
29
+ else:
30
+ image = image.resize((512,512))
31
+ print(f"img2img image={image}")
32
+ return pipes["img2img"](prompt, image=image, num_inference_steps=2, strength=0.5, guidance_scale=0.0).images[0]
33
 
34
+ # Erstelle die Gradio-Schnittstelle
35
  demo = gr.Interface(
36
  run,
37
  inputs=[
38
+ gr.Textbox(label="Prompt"),
39
+ gr.Image(type="pil")
40
  ],
41
+ outputs=gr.Image(width=512, height=512),
42
+ live=True,
43
+ theme="ParityError/Interstellar"
44
  )
45
+
46
+ # Starte die Anwendung
47
  demo.launch()