ameerazam08 commited on
Commit
7b7c0d1
Β·
verified Β·
1 Parent(s): c02afec

revert for prev will setup for next

Browse files
Files changed (1) hide show
  1. app.py +72 -178
app.py CHANGED
@@ -1,4 +1,3 @@
1
- # pip install diffusers, transformers, accelerate, safetensors, huggingface_hub
2
 
3
 
4
  import os
@@ -12,273 +11,168 @@ import PIL.Image
12
 
13
  import spaces
14
  import torch
15
- from diffusers import (
16
- StableDiffusionXLPipeline,
17
- UNet2DConditionModel,
18
- EulerDiscreteScheduler,
19
- )
20
  from huggingface_hub import hf_hub_download
21
- from safetensors.torch import load_file
22
 
23
  DESCRIPTION = """
24
  # Res-Adapter :Domain Consistent Resolution Adapter for Diffusion Models
25
  **Demo by [ameer azam] - [Twitter](https://twitter.com/Ameerazam18) - [GitHub](https://github.com/AMEERAZAM08)) - [Hugging Face](https://huggingface.co/ameerazam08)**
26
- This is a demo of https://huggingface.co/jiaxiangc/res-adapter ResAdapter by ByteDance.
 
27
 
28
- ByteDance provide a demo of [ResAdapter](https://huggingface.co/jiaxiangc/res-adapter) with [SDXL-Lightning-Step4](https://huggingface.co/ByteDance/SDXL-Lightning) to expand resolution range from 1024-only to 256~1024.
29
  """
30
  if not torch.cuda.is_available():
31
- DESCRIPTION += (
32
- "\n<h1>Running on CPU πŸ₯Ά This demo does not work on CPU.</a> instead</h1>"
33
- )
34
 
35
  MAX_SEED = np.iinfo(np.int32).max
36
  CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES", "0") == "1"
37
- MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1024"))
38
- USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE") == "1"
39
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"
40
 
41
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
 
 
 
 
 
 
 
 
 
 
 
42
 
43
- base = "stabilityai/stable-diffusion-xl-base-1.0"
44
- repo = "ByteDance/SDXL-Lightning"
45
- ckpt = "sdxl_lightning_4step_unet.safetensors" # Use the correct ckpt for your step setting!
46
 
47
- unet = UNet2DConditionModel.from_config(base, subfolder="unet").to("cuda", torch.float16)
48
- unet.load_state_dict(load_file(hf_hub_download(repo, ckpt), device="cuda"))
49
- pipe = StableDiffusionXLPipeline.from_pretrained(base, unet=unet, torch_dtype=torch.float16, variant="fp16")
50
- pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing")
51
- pipe = pipe.to(device)
52
 
53
- # Load resadapter
54
  pipe.load_lora_weights(
55
  hf_hub_download(
56
- repo_id="jiaxiangc/res-adapter",
57
- subfolder="sdxl-i",
58
  filename="resolution_lora.safetensors",
59
  ),
60
  adapter_name="res_adapter",
61
  )
 
 
62
 
63
  def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
64
  if randomize_seed:
65
- seed = random.randint(0, MAX_SEED)
66
- return seed
67
-
68
-
69
- @spaces.GPU(enable_queue=True)
70
- def generate(
71
- prompt: str,
72
- negative_prompt: str = "",
73
- prompt_2: str = "",
74
- negative_prompt_2: str = "",
75
- use_negative_prompt: bool = False,
76
- use_prompt_2: bool = False,
77
- use_negative_prompt_2: bool = False,
78
  seed: int = 0,
79
  width: int = 1024,
80
  height: int = 1024,
81
- guidance_scale: float = 0,
82
- num_inference_steps: int = 4,
83
  progress=gr.Progress(track_tqdm=True),
84
  ) -> PIL.Image.Image:
85
- print(f'** Generating image for: "{prompt}" **')
86
  generator = torch.Generator().manual_seed(seed)
87
 
88
  if not use_negative_prompt:
89
- negative_prompt = None # type: ignore
90
- if not use_prompt_2:
91
  prompt_2 = None # type: ignore
92
  if not use_negative_prompt_2:
93
  negative_prompt_2 = None # type: ignore
 
 
94
 
95
- pipe.set_adapters(["res_adapter"], adapter_weights=[0.0])
96
- base_image = pipe(
97
  prompt=prompt,
98
  negative_prompt=negative_prompt,
99
  prompt_2=prompt_2,
100
  negative_prompt_2=negative_prompt_2,
101
  width=width,
102
  height=height,
103
- num_inference_steps=num_inference_steps,
104
- guidance_scale=guidance_scale,
105
- output_type="pil",
106
  generator=generator,
 
 
107
  ).images[0]
108
 
 
 
109
 
110
- pipe.set_adapters(["res_adapter"], adapter_weights=[1.0])
111
- res_adapt = pipe(
112
  prompt=prompt,
113
  negative_prompt=negative_prompt,
114
  prompt_2=prompt_2,
115
  negative_prompt_2=negative_prompt_2,
116
  width=width,
117
  height=height,
118
- num_inference_steps=num_inference_steps,
119
- guidance_scale=guidance_scale,
120
- output_type="pil",
121
  generator=generator,
122
- ).images[0]
123
 
124
- return [res_adapt, base_image]
 
 
 
125
 
126
 
127
  examples = [
128
- "A girl smiling",
129
- "A boy smiling",
 
130
  ]
131
 
132
  theme = gr.themes.Base(
133
- font=[
134
- gr.themes.GoogleFont("Libre Franklin"),
135
- gr.themes.GoogleFont("Public Sans"),
136
- "system-ui",
137
- "sans-serif",
138
- ],
139
  )
140
  with gr.Blocks(css="footer{display:none !important}", theme=theme) as demo:
141
  gr.Markdown(DESCRIPTION)
142
- gr.DuplicateButton(
143
- value="Duplicate Space for private use",
144
- elem_id="duplicate-button",
145
- visible=os.getenv("SHOW_DUPLICATE_BUTTON") == "1",
146
- )
147
- with gr.Group():
148
- prompt = gr.Text(
149
- label="Prompt",
150
- show_label=False,
151
- max_lines=1,
152
- container=False,
153
- placeholder="Enter your prompt",
154
- )
155
- run_button = gr.Button("Generate")
156
  # result = gr.Gallery(label="Right is Res-Adapt-LORA and Left is Base"),
157
  with gr.Accordion("Advanced options", open=False):
158
  with gr.Row():
159
- use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=False)
160
  use_prompt_2 = gr.Checkbox(label="Use prompt 2", value=False)
161
- use_negative_prompt_2 = gr.Checkbox(
162
- label="Use negative prompt 2", value=False
163
- )
164
  negative_prompt = gr.Text(
165
  label="Negative prompt",
166
  max_lines=1,
167
- placeholder="Enter your prompt",
168
  visible=True,
169
  )
170
  prompt_2 = gr.Text(
171
- label="Prompt 2",
172
- max_lines=1,
173
- placeholder="Enter your prompt",
174
- visible=False,
175
- )
176
- negative_prompt_2 = gr.Text(
177
- label="Negative prompt 2",
178
- max_lines=1,
179
- placeholder="Enter a negative prompt",
180
- visible=False,
181
- )
182
-
183
- seed = gr.Slider(
184
- label="Seed",
185
- minimum=0,
186
- maximum=MAX_SEED,
187
- step=1,
188
- value=0,
189
- )
190
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
191
- with gr.Row():
192
- width = gr.Slider(
193
- label="Width",
194
- minimum=256,
195
- maximum=MAX_IMAGE_SIZE,
196
- step=32,
197
- value=512,
198
- )
199
- height = gr.Slider(
200
- label="Height",
201
- minimum=256,
202
- maximum=MAX_IMAGE_SIZE,
203
- step=32,
204
  value=512,
205
  )
206
  with gr.Row():
207
- guidance_scale = gr.Slider(
208
- label="Guidance scale",
209
- minimum=0,
210
  maximum=20,
211
  step=0.1,
212
- value=0,
213
  )
214
- num_inference_steps = gr.Slider(
215
- label="Number of inference steps",
216
- minimum=1,
217
- maximum=50,
218
  step=1,
219
- value=4,
220
  )
221
  gr.Examples(
222
  examples=examples,
223
- inputs=prompt,
224
- outputs=None,
225
- fn=generate,
226
- cache_examples=CACHE_EXAMPLES,
227
- )
228
-
229
- use_negative_prompt.change(
230
- fn=lambda x: gr.update(visible=x),
231
- inputs=use_negative_prompt,
232
- outputs=negative_prompt,
233
- queue=False,
234
- api_name=False,
235
- )
236
- use_prompt_2.change(
237
- fn=lambda x: gr.update(visible=x),
238
- inputs=use_prompt_2,
239
- outputs=prompt_2,
240
- queue=False,
241
- api_name=False,
242
- )
243
- use_negative_prompt_2.change(
244
- fn=lambda x: gr.update(visible=x),
245
- inputs=use_negative_prompt_2,
246
- outputs=negative_prompt_2,
247
- queue=False,
248
- api_name=False,
249
- )
250
- gr.on(
251
- triggers=[
252
- prompt.submit,
253
- negative_prompt.submit,
254
- prompt_2.submit,
255
- negative_prompt_2.submit,
256
- run_button.click,
257
- ],
258
- fn=randomize_seed_fn,
259
- inputs=[seed, randomize_seed],
260
- outputs=seed,
261
- queue=False,
262
- api_name=False,
263
- ).then(
264
- fn=generate,
265
- inputs=[
266
- prompt,
267
- negative_prompt,
268
- prompt_2,
269
- negative_prompt_2,
270
- use_negative_prompt,
271
- use_prompt_2,
272
- use_negative_prompt_2,
273
  seed,
274
  width,
275
  height,
276
- guidance_scale,
277
- num_inference_steps,
278
  ],
279
- outputs=gr.Gallery(label="Left is ResAdapter and Right is Base"),
280
  api_name="run",
281
  )
282
 
283
  if __name__ == "__main__":
284
- demo.queue(max_size=20, api_open=False).launch(show_api=False)
 
 
1
 
2
 
3
  import os
 
11
 
12
  import spaces
13
  import torch
14
+ from diffusers import AutoPipelineForText2Image, DPMSolverMultistepScheduler
15
+
16
+
17
+
18
+
19
  from huggingface_hub import hf_hub_download
20
+ from diffusers.models.attention_processor import AttnProcessor2_0
21
 
22
  DESCRIPTION = """
23
  # Res-Adapter :Domain Consistent Resolution Adapter for Diffusion Models
24
  **Demo by [ameer azam] - [Twitter](https://twitter.com/Ameerazam18) - [GitHub](https://github.com/AMEERAZAM08)) - [Hugging Face](https://huggingface.co/ameerazam08)**
25
+ This is a demo of https://huggingface.co/jiaxiangc/res-adapter LORAs by ByteDance
26
+
27
 
 
28
  """
29
  if not torch.cuda.is_available():
30
+ DESCRIPTION += "\n<h1>Running on CPU πŸ₯Ά This demo does not work on CPU.</a> instead</h1>"
31
+
32
+
33
 
34
  MAX_SEED = np.iinfo(np.int32).max
35
  CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES", "0") == "1"
 
 
36
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"
37
 
38
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
39
+ pipe = AutoPipelineForText2Image.from_pretrained('stabilityai/stable-diffusion-xl-base-1.0',use_safetensors=True)# torch_dtype=torch.float16, variant="safetensors")
40
+ pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config, use_karras_sigmas=True, algorithm_type="sde-dpmsolver++")
41
+
42
+
43
+
44
+
45
+
46
+
47
+
48
+
49
+
50
 
 
 
 
51
 
 
 
 
 
 
52
 
 
53
  pipe.load_lora_weights(
54
  hf_hub_download(
55
+ repo_id="jiaxiangc/res-adapter",
56
+ subfolder="sdxl-i",
57
  filename="resolution_lora.safetensors",
58
  ),
59
  adapter_name="res_adapter",
60
  )
61
+ pipe.set_adapters(["res_adapter"], adapter_weights=[1.0])
62
+ pipe = pipe.to(device)
63
 
64
  def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
65
  if randomize_seed:
 
 
 
 
 
 
 
 
 
 
 
 
 
66
  seed: int = 0,
67
  width: int = 1024,
68
  height: int = 1024,
69
+ guidance_scale_base: float = 5.0,
70
+ num_inference_steps_base: int = 20,
71
  progress=gr.Progress(track_tqdm=True),
72
  ) -> PIL.Image.Image:
73
+ print(f"** Generating image for: \"{prompt}\" **")
74
  generator = torch.Generator().manual_seed(seed)
75
 
76
  if not use_negative_prompt:
 
 
77
  prompt_2 = None # type: ignore
78
  if not use_negative_prompt_2:
79
  negative_prompt_2 = None # type: ignore
80
+ res_adapt=pipe(
81
+
82
 
 
 
83
  prompt=prompt,
84
  negative_prompt=negative_prompt,
85
  prompt_2=prompt_2,
86
  negative_prompt_2=negative_prompt_2,
87
  width=width,
88
  height=height,
89
+ guidance_scale=guidance_scale_base,
90
+ num_inference_steps=num_inference_steps_base,
 
91
  generator=generator,
92
+ output_type="pil",
93
+
94
  ).images[0]
95
 
96
+ pipe.unet.set_attn_processor(AttnProcessor2_0())
97
+ base_image = pipe(
98
 
 
 
99
  prompt=prompt,
100
  negative_prompt=negative_prompt,
101
  prompt_2=prompt_2,
102
  negative_prompt_2=negative_prompt_2,
103
  width=width,
104
  height=height,
105
+ guidance_scale=guidance_scale_base,
106
+ num_inference_steps=num_inference_steps_base,
107
+
108
  generator=generator,
109
+ output_type="pil").images[0]
110
 
111
+
112
+
113
+
114
+ return [res_adapt,base_image]
115
 
116
 
117
  examples = [
118
+ "A realistic photograph of an astronaut in a jungle, cold color palette, detailed, 8k",
119
+ "An astronaut riding a green horse",
120
+ "cinematic film still, photo of a girl, cyberpunk, neonpunk, headset, city at night, sony fe 12-24mm f/2.8 gm, close up, 32k uhd, wallpaper, analog film grain, SONY headset"
121
  ]
122
 
123
  theme = gr.themes.Base(
124
+ font=[gr.themes.GoogleFont('Libre Franklin'), gr.themes.GoogleFont('Public Sans'), 'system-ui', 'sans-serif'],
125
+
126
+
127
+
128
+
129
+
130
  )
131
  with gr.Blocks(css="footer{display:none !important}", theme=theme) as demo:
132
  gr.Markdown(DESCRIPTION)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133
  # result = gr.Gallery(label="Right is Res-Adapt-LORA and Left is Base"),
134
  with gr.Accordion("Advanced options", open=False):
135
  with gr.Row():
136
+ use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=True)
137
  use_prompt_2 = gr.Checkbox(label="Use prompt 2", value=False)
138
+ use_negative_prompt_2 = gr.Checkbox(label="Use negative prompt 2", value=False)
139
+
140
+
141
  negative_prompt = gr.Text(
142
  label="Negative prompt",
143
  max_lines=1,
144
+ placeholder="ugly, deformed, noisy, blurry, nsfw, low contrast, text, BadDream, 3d, cgi, render, fake, anime, open mouth, big forehead, long neck",
145
  visible=True,
146
  )
147
  prompt_2 = gr.Text(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
148
  value=512,
149
  )
150
  with gr.Row():
151
+ guidance_scale_base = gr.Slider(
152
+ label="Guidance scale for base",
153
+ minimum=1,
154
  maximum=20,
155
  step=0.1,
156
+ value=9.5,
157
  )
158
+ num_inference_steps_base = gr.Slider(
159
+ label="Number of inference steps for base",
160
+ minimum=10,
161
+ maximum=100,
162
  step=1,
163
+ value=25,
164
  )
165
  gr.Examples(
166
  examples=examples,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
167
  seed,
168
  width,
169
  height,
170
+ guidance_scale_base,
171
+ num_inference_steps_base,
172
  ],
173
+ outputs=gr.Gallery(label="Left is Res-Adapt-LORA and Right is Base"),
174
  api_name="run",
175
  )
176
 
177
  if __name__ == "__main__":
178
+ demo.queue(max_size=20, api_open=False).launch(show_api=False)