Manjushri commited on
Commit
00da6c1
1 Parent(s): 9fc2f5e

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +369 -0
app.py ADDED
@@ -0,0 +1,369 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ import numpy as np
4
+ import modin.pandas as pd
5
+ from PIL import Image
6
+ from diffusers import DiffusionPipeline, StableDiffusionLatentUpscalePipeline
7
+ from huggingface_hub import hf_hub_download
8
+
9
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
10
+ torch.cuda.max_memory_allocated(device=device)
11
+ torch.cuda.empty_cache()
12
+
13
+ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, refine, high_noise_frac, upscale):
14
+ generator = np.random.seed(0) if seed == 0 else torch.manual_seed(seed)
15
+
16
+ if Model == "PhotoReal":
17
+ pipe = DiffusionPipeline.from_pretrained("circulus/canvers-real-v3.8.1", torch_dtype=torch.float16, safety_checker=None) if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("circulus/canvers-real-v3.8.1")
18
+ pipe.enable_xformers_memory_efficient_attention()
19
+ pipe = pipe.to(device)
20
+ torch.cuda.empty_cache()
21
+ if refine == "Yes":
22
+ refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")
23
+ refiner.enable_xformers_memory_efficient_attention()
24
+ refiner = refiner.to(device)
25
+ torch.cuda.empty_cache()
26
+ int_image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images
27
+ image = refiner(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=high_noise_frac).images[0]
28
+ torch.cuda.empty_cache()
29
+ if upscale == "Yes":
30
+ refiner = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
31
+ refiner.enable_xformers_memory_efficient_attention()
32
+ refiner = refiner.to(device)
33
+ torch.cuda.empty_cache()
34
+ upscaled = refiner(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
35
+ torch.cuda.empty_cache()
36
+ return upscaled
37
+ else:
38
+ return image
39
+ else:
40
+ if upscale == "Yes":
41
+ image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
42
+ upscaler = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
43
+ upscaler.enable_xformers_memory_efficient_attention()
44
+ upscaler = upscaler.to(device)
45
+ torch.cuda.empty_cache()
46
+ upscaled = upscaler(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
47
+ torch.cuda.empty_cache()
48
+ return upscaled
49
+ else:
50
+ image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
51
+ torch.cuda.empty_cache()
52
+ return image
53
+
54
+ if Model == "Anime":
55
+ anime = DiffusionPipeline.from_pretrained("circulus/canvers-anime-v3.8.1", torch_dtype=torch.float16, safety_checker=None) if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("circulus/canvers-anime-v3.8.1")
56
+ anime.enable_xformers_memory_efficient_attention()
57
+ anime = anime.to(device)
58
+ torch.cuda.empty_cache()
59
+ if refine == "Yes":
60
+ refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")
61
+ refiner.enable_xformers_memory_efficient_attention()
62
+ refiner = refiner.to(device)
63
+ torch.cuda.empty_cache()
64
+ int_image = anime(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images
65
+ image = refiner(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=high_noise_frac).images[0]
66
+ torch.cuda.empty_cache()
67
+ if upscale == "Yes":
68
+ refiner = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
69
+ refiner.enable_xformers_memory_efficient_attention()
70
+ refiner = refiner.to(device)
71
+ torch.cuda.empty_cache()
72
+ upscaled = refiner(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
73
+ torch.cuda.empty_cache()
74
+ return upscaled
75
+ else:
76
+ return image
77
+ else:
78
+ if upscale == "Yes":
79
+ image = anime(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
80
+ upscaler = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
81
+ upscaler.enable_xformers_memory_efficient_attention()
82
+ upscaler = upscaler.to(device)
83
+ torch.cuda.empty_cache()
84
+ upscaled = upscaler(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
85
+ torch.cuda.empty_cache()
86
+ return upscaled
87
+ else:
88
+ image = anime(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
89
+ torch.cuda.empty_cache()
90
+ return image
91
+
92
+ if Model == "Disney":
93
+ disney = DiffusionPipeline.from_pretrained("circulus/canvers-disney-v3.8.1", torch_dtype=torch.float16, safety_checker=None) if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("circulus/canvers-disney-v3.8.1")
94
+ disney.enable_xformers_memory_efficient_attention()
95
+ disney = disney.to(device)
96
+ torch.cuda.empty_cache()
97
+ if refine == "Yes":
98
+ refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")
99
+ refiner.enable_xformers_memory_efficient_attention()
100
+ refiner = refiner.to(device)
101
+ torch.cuda.empty_cache()
102
+ int_image = disney(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images
103
+ image = refiner(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=high_noise_frac).images[0]
104
+ torch.cuda.empty_cache()
105
+
106
+ if upscale == "Yes":
107
+ refiner = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
108
+ refiner.enable_xformers_memory_efficient_attention()
109
+ refiner = refiner.to(device)
110
+ torch.cuda.empty_cache()
111
+ upscaled = refiner(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
112
+ torch.cuda.empty_cache()
113
+ return upscaled
114
+ else:
115
+ return image
116
+ else:
117
+ if upscale == "Yes":
118
+ image = disney(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
119
+ upscaler = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
120
+ upscaler.enable_xformers_memory_efficient_attention()
121
+ upscaler = upscaler.to(device)
122
+ torch.cuda.empty_cache()
123
+ upscaled = upscaler(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
124
+ torch.cuda.empty_cache()
125
+ return upscaled
126
+ else:
127
+ image = disney(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
128
+ torch.cuda.empty_cache()
129
+ return image
130
+
131
+ if Model == "StoryBook":
132
+ story = DiffusionPipeline.from_pretrained("circulus/canvers-story-v3.8.1", torch_dtype=torch.float16, safety_checker=None) if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("circulus/canvers-story-v3.8.1")
133
+ story.enable_xformers_memory_efficient_attention()
134
+ story = story.to(device)
135
+ torch.cuda.empty_cache()
136
+ if refine == "Yes":
137
+ refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")
138
+ refiner.enable_xformers_memory_efficient_attention()
139
+ refiner = refiner.to(device)
140
+ torch.cuda.empty_cache()
141
+ int_image = story(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images
142
+ image = refiner(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=high_noise_frac).images[0]
143
+ torch.cuda.empty_cache()
144
+
145
+ if upscale == "Yes":
146
+ refiner = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
147
+ refiner.enable_xformers_memory_efficient_attention()
148
+ refiner = refiner.to(device)
149
+ torch.cuda.empty_cache()
150
+ upscaled = refiner(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
151
+ torch.cuda.empty_cache()
152
+ return upscaled
153
+ else:
154
+ return image
155
+ else:
156
+ if upscale == "Yes":
157
+ image = story(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
158
+
159
+ upscaler = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
160
+ upscaler.enable_xformers_memory_efficient_attention()
161
+ upscaler = upscaler.to(device)
162
+ torch.cuda.empty_cache()
163
+ upscaled = upscaler(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
164
+ torch.cuda.empty_cache()
165
+ return upscaled
166
+ else:
167
+
168
+ image = story(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
169
+ torch.cuda.empty_cache()
170
+ return image
171
+
172
+ if Model == "SemiReal":
173
+ semi = DiffusionPipeline.from_pretrained("circulus/canvers-semi-v3.8.1", torch_dtype=torch.float16, safety_checker=None) if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("circulus/canvers-semi-v3.8.1")
174
+ semi.enable_xformers_memory_efficient_attention()
175
+ semi = semi.to(device)
176
+ torch.cuda.empty_cache()
177
+ if refine == "Yes":
178
+ refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")
179
+ refiner.enable_xformers_memory_efficient_attention()
180
+ refiner = refiner.to(device)
181
+ torch.cuda.empty_cache()
182
+ image = semi(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images
183
+ image = refiner(Prompt, negative_prompt=negative_prompt, image=image, denoising_start=high_noise_frac).images[0]
184
+ torch.cuda.empty_cache()
185
+
186
+ if upscale == "Yes":
187
+ refiner = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
188
+ refiner.enable_xformers_memory_efficient_attention()
189
+ refiner = refiner.to(device)
190
+ torch.cuda.empty_cache()
191
+ upscaled = refiner(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
192
+ torch.cuda.empty_cache()
193
+ return upscaled
194
+ else:
195
+ return image
196
+ else:
197
+ if upscale == "Yes":
198
+ image = semi(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
199
+
200
+ upscaler = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
201
+ upscaler.enable_xformers_memory_efficient_attention()
202
+ upscaler = upscaler.to(device)
203
+ torch.cuda.empty_cache()
204
+ upscaled = upscaler(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
205
+ torch.cuda.empty_cache()
206
+ return upscaled
207
+ else:
208
+
209
+ image = semi(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
210
+ torch.cuda.empty_cache()
211
+ return image
212
+
213
+ if Model == "Animagine XL 3.0":
214
+ animagine = DiffusionPipeline.from_pretrained("cagliostrolab/animagine-xl-3.0", torch_dtype=torch.float16, safety_checker=None) if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("cagliostrolab/animagine-xl-3.0")
215
+ animagine.enable_xformers_memory_efficient_attention()
216
+ animagine = animagine.to(device)
217
+ torch.cuda.empty_cache()
218
+ if refine == "Yes":
219
+ torch.cuda.empty_cache()
220
+ torch.cuda.max_memory_allocated(device=device)
221
+ int_image = animagine(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, output_type="latent").images
222
+ torch.cuda.empty_cache()
223
+ animagine = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")
224
+ animagine.enable_xformers_memory_efficient_attention()
225
+ animagine = animagine.to(device)
226
+ torch.cuda.empty_cache()
227
+ image = animagine(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=high_noise_frac).images[0]
228
+ torch.cuda.empty_cache()
229
+
230
+ if upscale == "Yes":
231
+ animagine = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
232
+ animagine.enable_xformers_memory_efficient_attention()
233
+ animagine = animagine.to(device)
234
+ torch.cuda.empty_cache()
235
+ upscaled = animagine(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
236
+ torch.cuda.empty_cache()
237
+ return upscaled
238
+ else:
239
+ return image
240
+ else:
241
+ if upscale == "Yes":
242
+ image = animagine(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
243
+
244
+ upscaler = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
245
+ upscaler.enable_xformers_memory_efficient_attention()
246
+ upscaler = upscaler.to(device)
247
+ torch.cuda.empty_cache()
248
+ upscaled = upscaler(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
249
+ torch.cuda.empty_cache()
250
+ return upscaled
251
+ else:
252
+
253
+ image = animagine(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
254
+ torch.cuda.empty_cache()
255
+ return image
256
+
257
+ if Model == "SDXL 1.0":
258
+
259
+ torch.cuda.empty_cache()
260
+ torch.cuda.max_memory_allocated(device=device)
261
+ sdxl = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
262
+ sdxl.enable_xformers_memory_efficient_attention()
263
+ sdxl = sdxl.to(device)
264
+ torch.cuda.empty_cache()
265
+
266
+ if refine == "Yes":
267
+ torch.cuda.max_memory_allocated(device=device)
268
+ torch.cuda.empty_cache()
269
+ image = sdxl(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, output_type="latent").images
270
+ torch.cuda.empty_cache()
271
+ sdxl = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")
272
+ sdxl.enable_xformers_memory_efficient_attention()
273
+ sdxl = sdxl.to(device)
274
+ torch.cuda.empty_cache()
275
+ refined = sdxl(Prompt, negative_prompt=negative_prompt, image=image, denoising_start=high_noise_frac).images[0]
276
+ torch.cuda.empty_cache()
277
+
278
+ if upscale == "Yes":
279
+ sdxl = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
280
+ sdxl.enable_xformers_memory_efficient_attention()
281
+ sdxl = sdxl.to(device)
282
+ torch.cuda.empty_cache()
283
+ upscaled = sdxl(prompt=Prompt, negative_prompt=negative_prompt, image=refined, num_inference_steps=15, guidance_scale=0).images[0]
284
+ torch.cuda.empty_cache()
285
+ return upscaled
286
+ else:
287
+ return refined
288
+ else:
289
+ if upscale == "Yes":
290
+ image = sdxl(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
291
+ torch.cuda.empty_cache()
292
+ torch.cuda.max_memory_allocated(device=device)
293
+ upscaler = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
294
+ upscaler.enable_xformers_memory_efficient_attention()
295
+ upscaler = upscaler.to(device)
296
+ torch.cuda.empty_cache()
297
+ upscaled = upscaler(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
298
+ torch.cuda.empty_cache()
299
+ return upscaled
300
+ else:
301
+
302
+ image = sdxl(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
303
+ torch.cuda.empty_cache()
304
+
305
+ if Model == 'FusionXL':
306
+ torch.cuda.empty_cache()
307
+ torch.cuda.max_memory_allocated(device=device)
308
+ pipe = DiffusionPipeline.from_pretrained("circulus/canvers-fusionXL-v1", torch_dtype=torch.float16, safety_checker=None) if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("circulus/canvers-real-v3.8.1")
309
+ pipe.enable_xformers_memory_efficient_attention()
310
+ pipe = pipe.to(device)
311
+ torch.cuda.empty_cache()
312
+ if refine == "Yes":
313
+ torch.cuda.empty_cache()
314
+ torch.cuda.max_memory_allocated(device=device)
315
+ int_image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, output_type="latent").images
316
+ pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")
317
+ pipe.enable_xformers_memory_efficient_attention()
318
+ pipe = pipe.to(device)
319
+ torch.cuda.empty_cache()
320
+ image = pipe(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=high_noise_frac).images[0]
321
+ torch.cuda.empty_cache()
322
+ if upscale == "Yes":
323
+ torch.cuda.empty_cache()
324
+ torch.cuda.max_memory_allocated(device=device)
325
+ pipe = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
326
+ pipe.enable_xformers_memory_efficient_attention()
327
+ pipe = pipe.to(device)
328
+ torch.cuda.empty_cache()
329
+ upscaled = pipe(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
330
+ torch.cuda.empty_cache()
331
+ return upscaled
332
+ else:
333
+ return image
334
+ else:
335
+ if upscale == "Yes":
336
+
337
+ image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
338
+ torch.cuda.empty_cache()
339
+ torch.cuda.max_memory_allocated(device=device)
340
+
341
+ pipe = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
342
+ pipe.enable_xformers_memory_efficient_attention()
343
+ pipe = pipe.to(device)
344
+ torch.cuda.empty_cache()
345
+ upscaled = pipe(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
346
+ torch.cuda.empty_cache()
347
+ return upscaled
348
+ else:
349
+ image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
350
+ torch.cuda.empty_cache()
351
+ return image
352
+
353
+ return image
354
+
355
+ gr.Interface(fn=genie, inputs=[gr.Radio(['PhotoReal', 'Anime', 'Disney', 'StoryBook', 'SemiReal', 'Animagine XL 3.0', 'SDXL 1.0', 'FusionXL'], value='PhotoReal', label='Choose Model'),
356
+ gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'),
357
+ gr.Textbox(label='What you Do Not want the AI to generate. 77 Token Limit'),
358
+ gr.Slider(512, 1536, 768, step=128, label='Height'),
359
+ gr.Slider(512, 1536, 768, step=128, label='Width'),
360
+ gr.Slider(1, maximum=15, value=5, step=.25, label='Guidance Scale'),
361
+ gr.Slider(5, maximum=100, value=50, step=5, label='Number of Iterations'),
362
+ gr.Slider(minimum=0, step=1, maximum=9999999999999999, randomize=True, label='Seed: 0 is Random'),
363
+ gr.Radio(["Yes", "No"], label='SDXL 1.0 Refiner: Use if the Image has too much Noise', value='No'),
364
+ gr.Slider(minimum=.9, maximum=.99, value=.95, step=.01, label='Refiner Denoise Start %'),
365
+ gr.Radio(["Yes", "No"], label = 'SD X2 Latent Upscaler?', value="No")],
366
+ outputs=gr.Image(label='Generated Image'),
367
+ title="Manju Dream Booth V1.8 with SDXL 1.0 Refiner and SD X2 Latent Upscaler - GPU",
368
+ description="<br><br><b/>Warning: This Demo is capable of producing NSFW content.",
369
+ article = "If You Enjoyed this Demo and would like to Donate, you can send any amount to any of these Wallets. <br><br>SHIB (BEP20): 0xbE8f2f3B71DFEB84E5F7E3aae1909d60658aB891 <br>PayPal: https://www.paypal.me/ManjushriBodhisattva <br>ETH: 0xbE8f2f3B71DFEB84E5F7E3aae1909d60658aB891 <br>DOGE: D9QdVPtcU1EFH8jDC8jhU9uBcSTqUiA8h6<br><br>Code Monkey: <a href=\"https://huggingface.co/Manjushri\">Manjushri</a>").launch(debug=True, max_threads=80)