eienmojiki commited on
Commit
1db6719
·
verified ·
1 Parent(s): 501dcd0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -18
app.py CHANGED
@@ -141,11 +141,9 @@ def generate(
141
 
142
  pipe.to(device)
143
 
144
- upscaler_pipe = StableDiffusionXLImg2ImgPipeline(**pipe.components)
145
-
146
  try:
147
  gr.Info("Generating image...")
148
- latents = pipe(
149
  prompt = prompt,
150
  negative_prompt = negative_prompt,
151
  width = width,
@@ -154,20 +152,6 @@ def generate(
154
  num_inference_steps = num_inference_steps,
155
  generator = generator,
156
  num_images_per_prompt=1,
157
- output_type="latent",
158
- ).images
159
-
160
- upscaled_latents = upscale(latents, "nearest-exact", 2.0)
161
-
162
- gr.Info("Upscaling...")
163
- img = upscaler_pipe(
164
- prompt=prompt,
165
- negative_prompt=negative_prompt,
166
- image=upscaled_latents,
167
- guidance_scale=guidance_scale,
168
- num_inference_steps=num_inference_steps,
169
- strength=0.55,
170
- generator=generator,
171
  output_type="pil",
172
  ).images[0]
173
 
@@ -176,7 +160,6 @@ def generate(
176
  except Exception as e:
177
  print(f"An error occurred: {e}")
178
  finally:
179
- del upscaler_pipe
180
  free_memory()
181
 
182
  if torch.cuda.is_available():
 
141
 
142
  pipe.to(device)
143
 
 
 
144
  try:
145
  gr.Info("Generating image...")
146
+ img = pipe(
147
  prompt = prompt,
148
  negative_prompt = negative_prompt,
149
  width = width,
 
152
  num_inference_steps = num_inference_steps,
153
  generator = generator,
154
  num_images_per_prompt=1,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
155
  output_type="pil",
156
  ).images[0]
157
 
 
160
  except Exception as e:
161
  print(f"An error occurred: {e}")
162
  finally:
 
163
  free_memory()
164
 
165
  if torch.cuda.is_available():