Anonymous commited on
Commit
f91119a
·
1 Parent(s): 5d4e5d9

load in advance

Browse files
Files changed (2) hide show
  1. app.py +23 -20
  2. pipeline_freescale_turbo.py +1 -1
app.py CHANGED
@@ -1,14 +1,21 @@
1
  import gradio as gr
 
2
  import spaces
3
 
4
- import os
5
  import torch
6
- from PIL import Image
7
 
8
  from free_lunch_utils import register_free_upblock2d, register_free_crossattn_upblock2d
9
 
 
 
 
 
 
 
 
 
10
  @spaces.GPU(duration=120)
11
- def infer_gpu_normal(pipe, seed, prompt, negative_prompt, ddim_steps, guidance_scale, resolutions_list, fast_mode, cosine_scale, disable_freeu, restart_steps):
12
  pipe = pipe.to("cuda")
13
  generator = torch.Generator(device='cuda')
14
  generator = generator.manual_seed(seed)
@@ -22,8 +29,8 @@ def infer_gpu_normal(pipe, seed, prompt, negative_prompt, ddim_steps, guidance_s
22
  ).images[0]
23
  return result
24
 
25
- @spaces.GPU(duration=30)
26
- def infer_gpu_turbo(pipe, seed, prompt, negative_prompt, ddim_steps, guidance_scale, resolutions_list, fast_mode, cosine_scale, disable_freeu, restart_steps):
27
  pipe = pipe.to("cuda")
28
  generator = torch.Generator(device='cuda')
29
  generator = generator.manual_seed(seed)
@@ -39,12 +46,13 @@ def infer_gpu_turbo(pipe, seed, prompt, negative_prompt, ddim_steps, guidance_sc
39
 
40
  def infer(prompt, output_size, ddim_steps, guidance_scale, cosine_scale, seed, options, negative_prompt):
41
 
 
 
 
42
  disable_turbo = 'Disable Turbo' in options
43
  disable_freeu = 'Disable FreeU' in options
44
 
45
  if disable_turbo:
46
- from pipeline_freescale import StableDiffusionXLPipeline
47
- model_ckpt = "stabilityai/stable-diffusion-xl-base-1.0"
48
  fast_mode = True
49
  if output_size == "2048 x 2048":
50
  resolutions_list = [[1024, 1024],
@@ -55,12 +63,13 @@ def infer(prompt, output_size, ddim_steps, guidance_scale, cosine_scale, seed, o
55
  elif output_size == "2048 x 1024":
56
  resolutions_list = [[1024, 512],
57
  [2048, 1024]]
58
- infer_gpu_part = infer_gpu_normal
59
  restart_steps = [int(ddim_steps * 0.3)] * len(resolutions_list)
60
 
 
 
 
 
61
  else:
62
- from pipeline_freescale_turbo import StableDiffusionXLPipeline
63
- model_ckpt = "stabilityai/sdxl-turbo"
64
  fast_mode = False
65
  if output_size == "2048 x 2048":
66
  resolutions_list = [[512, 512],
@@ -74,19 +83,13 @@ def infer(prompt, output_size, ddim_steps, guidance_scale, cosine_scale, seed, o
74
  resolutions_list = [[512, 256],
75
  [1024, 512],
76
  [2048, 1024]]
77
- infer_gpu_part = infer_gpu_turbo
78
  restart_steps = [int(ddim_steps * 0.5)] * len(resolutions_list)
79
 
80
- pipe = StableDiffusionXLPipeline.from_pretrained(model_ckpt, torch_dtype=torch.float16)
81
-
82
- print('GPU starts')
83
- result = infer_gpu_part(pipe, seed, prompt, negative_prompt, ddim_steps, guidance_scale, resolutions_list, fast_mode, cosine_scale, disable_freeu, restart_steps)
84
- print('GPU ends')
85
 
86
- save_path = 'output.png'
87
- result.save(save_path)
88
-
89
- return save_path
90
 
91
 
92
  examples = [
 
1
  import gradio as gr
2
+
3
  import spaces
4
 
 
5
  import torch
 
6
 
7
  from free_lunch_utils import register_free_upblock2d, register_free_crossattn_upblock2d
8
 
9
+ from pipeline_freescale import StableDiffusionXLPipeline
10
+ from pipeline_freescale_turbo import StableDiffusionXLPipeline_Turbo
11
+ model_ckpt = "stabilityai/stable-diffusion-xl-base-1.0"
12
+ model_ckpt_turbo = "stabilityai/sdxl-turbo"
13
+
14
+ pipe = StableDiffusionXLPipeline.from_pretrained(model_ckpt, torch_dtype=torch.float16)
15
+ pipe_turbo = StableDiffusionXLPipeline_Turbo.from_pretrained(model_ckpt_turbo, torch_dtype=torch.float16)
16
+
17
  @spaces.GPU(duration=120)
18
+ def infer_gpu_part(pipe, seed, prompt, negative_prompt, ddim_steps, guidance_scale, resolutions_list, fast_mode, cosine_scale, disable_freeu, restart_steps):
19
  pipe = pipe.to("cuda")
20
  generator = torch.Generator(device='cuda')
21
  generator = generator.manual_seed(seed)
 
29
  ).images[0]
30
  return result
31
 
32
+ @spaces.GPU(duration=40)
33
+ def infer_gpu_part_turbo(pipe, seed, prompt, negative_prompt, ddim_steps, guidance_scale, resolutions_list, fast_mode, cosine_scale, disable_freeu, restart_steps):
34
  pipe = pipe.to("cuda")
35
  generator = torch.Generator(device='cuda')
36
  generator = generator.manual_seed(seed)
 
46
 
47
  def infer(prompt, output_size, ddim_steps, guidance_scale, cosine_scale, seed, options, negative_prompt):
48
 
49
+ print(prompt)
50
+ print(negative_prompt)
51
+
52
  disable_turbo = 'Disable Turbo' in options
53
  disable_freeu = 'Disable FreeU' in options
54
 
55
  if disable_turbo:
 
 
56
  fast_mode = True
57
  if output_size == "2048 x 2048":
58
  resolutions_list = [[1024, 1024],
 
63
  elif output_size == "2048 x 1024":
64
  resolutions_list = [[1024, 512],
65
  [2048, 1024]]
 
66
  restart_steps = [int(ddim_steps * 0.3)] * len(resolutions_list)
67
 
68
+ # print('GPU starts')
69
+ result = infer_gpu_part_turbo(pipe_turbo, seed, prompt, negative_prompt, ddim_steps, guidance_scale, resolutions_list, fast_mode, cosine_scale, disable_freeu, restart_steps)
70
+ # print('GPU ends')
71
+
72
  else:
 
 
73
  fast_mode = False
74
  if output_size == "2048 x 2048":
75
  resolutions_list = [[512, 512],
 
83
  resolutions_list = [[512, 256],
84
  [1024, 512],
85
  [2048, 1024]]
 
86
  restart_steps = [int(ddim_steps * 0.5)] * len(resolutions_list)
87
 
88
+ # print('GPU starts')
89
+ result = infer_gpu_part(pipe, seed, prompt, negative_prompt, ddim_steps, guidance_scale, resolutions_list, fast_mode, cosine_scale, disable_freeu, restart_steps)
90
+ # print('GPU ends')
 
 
91
 
92
+ return result
 
 
 
93
 
94
 
95
  examples = [
pipeline_freescale_turbo.py CHANGED
@@ -191,7 +191,7 @@ def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
191
  return noise_cfg
192
 
193
 
194
- class StableDiffusionXLPipeline(DiffusionPipeline, FromSingleFileMixin, LoraLoaderMixin):
195
  r"""
196
  Pipeline for text-to-image generation using Stable Diffusion XL.
197
 
 
191
  return noise_cfg
192
 
193
 
194
+ class StableDiffusionXLPipeline_Turbo(DiffusionPipeline, FromSingleFileMixin, LoraLoaderMixin):
195
  r"""
196
  Pipeline for text-to-image generation using Stable Diffusion XL.
197