eienmojiki commited on
Commit
0d344cd
·
verified ·
1 Parent(s): eee1512

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +43 -2
app.py CHANGED
@@ -1,4 +1,5 @@
1
  import os
 
2
  import random
3
  from typing import Callable, Dict, Optional, Tuple
4
 
@@ -9,7 +10,7 @@ import spaces
9
  import torch
10
 
11
  from transformers import CLIPTextModel
12
- from diffusers import AutoencoderKL, StableDiffusionXLPipeline, DDIMScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler
13
 
14
  MODEL = "eienmojiki/Starry-XL-v5.2"
15
  HF_TOKEN = os.getenv("HF_TOKEN")
@@ -93,6 +94,28 @@ def load_pipeline(model_name):
93
  pipe.to(device)
94
  return pipe
95
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
  @spaces.GPU(enable_queue=False)
97
  def generate(
98
  prompt: str,
@@ -118,9 +141,11 @@ def generate(
118
 
119
  pipe.to(device)
120
 
 
 
121
  try:
122
 
123
- img = pipe(
124
  prompt = prompt,
125
  negative_prompt = negative_prompt,
126
  width = width,
@@ -129,6 +154,19 @@ def generate(
129
  num_inference_steps = num_inference_steps,
130
  generator = generator,
131
  num_images_per_prompt=1,
 
 
 
 
 
 
 
 
 
 
 
 
 
132
  output_type="pil",
133
  ).images[0]
134
 
@@ -136,6 +174,9 @@ def generate(
136
 
137
  except Exception as e:
138
  print(f"An error occurred: {e}")
 
 
 
139
 
140
  if torch.cuda.is_available():
141
  pipe = load_pipeline(MODEL)
 
1
  import os
2
+ import gc
3
  import random
4
  from typing import Callable, Dict, Optional, Tuple
5
 
 
10
  import torch
11
 
12
  from transformers import CLIPTextModel
13
+ from diffusers import AutoencoderKL, StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline, DDIMScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler
14
 
15
  MODEL = "eienmojiki/Starry-XL-v5.2"
16
  HF_TOKEN = os.getenv("HF_TOKEN")
 
94
  pipe.to(device)
95
  return pipe
96
 
97
+ def common_upscale(
98
+ samples: torch.Tensor,
99
+ width: int,
100
+ height: int,
101
+ upscale_method: str,
102
+ ) -> torch.Tensor:
103
+ return torch.nn.functional.interpolate(
104
+ samples, size=(height, width), mode=upscale_method
105
+ )
106
+
107
+
108
+ def upscale(
109
+ samples: torch.Tensor, upscale_method: str, scale_by: float
110
+ ) -> torch.Tensor:
111
+ width = round(samples.shape[3] * scale_by)
112
+ height = round(samples.shape[2] * scale_by)
113
+ return common_upscale(samples, width, height, upscale_method)
114
+
115
+ def free_memory() -> None:
116
+ torch.cuda.empty_cache()
117
+ gc.collect()
118
+
119
  @spaces.GPU(enable_queue=False)
120
  def generate(
121
  prompt: str,
 
141
 
142
  pipe.to(device)
143
 
144
+ upscaler_pipe = StableDiffusionXLImg2ImgPipeline(**pipe.components)
145
+
146
  try:
147
 
148
+ latents = pipe(
149
  prompt = prompt,
150
  negative_prompt = negative_prompt,
151
  width = width,
 
154
  num_inference_steps = num_inference_steps,
155
  generator = generator,
156
  num_images_per_prompt=1,
157
+ output_type="latents",
158
+ ).images
159
+
160
+ upscaled_latents = upscale(latents, "nearest-exact", 2.0)
161
+
162
+ img = upscaler_pipe(
163
+ prompt=prompt,
164
+ negative_prompt=negative_prompt,
165
+ image=upscaled_latents,
166
+ guidance_scale=guidance_scale,
167
+ num_inference_steps=num_inference_steps,
168
+ strength=0.55,
169
+ generator=generator,
170
  output_type="pil",
171
  ).images[0]
172
 
 
174
 
175
  except Exception as e:
176
  print(f"An error occurred: {e}")
177
+ finally:
178
+ del upscaler_pipe
179
+ free_memory()
180
 
181
  if torch.cuda.is_available():
182
  pipe = load_pipeline(MODEL)