hysts HF Staff commited on
Commit
7795c61
·
1 Parent(s): 7b511c8
Files changed (2) hide show
  1. app.py +22 -37
  2. style.css +0 -7
app.py CHANGED
@@ -11,47 +11,29 @@ import torch
11
  from diffusers import AutoencoderKL, DiffusionPipeline
12
 
13
  DESCRIPTION = "# SDXL"
14
- if not torch.cuda.is_available():
15
- DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
16
 
17
  MAX_SEED = np.iinfo(np.int32).max
18
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1024"))
19
- USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE") == "1"
20
- ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"
21
  ENABLE_REFINER = os.getenv("ENABLE_REFINER", "1") == "1"
22
 
23
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
24
- if torch.cuda.is_available():
25
- vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
26
- pipe = DiffusionPipeline.from_pretrained(
27
- "stabilityai/stable-diffusion-xl-base-1.0",
 
 
 
 
 
 
 
 
28
  vae=vae,
29
  torch_dtype=torch.float16,
30
  use_safetensors=True,
31
  variant="fp16",
32
- )
33
- if ENABLE_REFINER:
34
- refiner = DiffusionPipeline.from_pretrained(
35
- "stabilityai/stable-diffusion-xl-refiner-1.0",
36
- vae=vae,
37
- torch_dtype=torch.float16,
38
- use_safetensors=True,
39
- variant="fp16",
40
- )
41
-
42
- if ENABLE_CPU_OFFLOAD:
43
- pipe.enable_model_cpu_offload()
44
- if ENABLE_REFINER:
45
- refiner.enable_model_cpu_offload()
46
- else:
47
- pipe.to(device)
48
- if ENABLE_REFINER:
49
- refiner.to(device)
50
-
51
- if USE_TORCH_COMPILE:
52
- pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
53
- if ENABLE_REFINER:
54
- refiner.unet = torch.compile(refiner.unet, mode="reduce-overhead", fullgraph=True)
55
 
56
 
57
  def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
@@ -136,7 +118,7 @@ with gr.Blocks(css_paths="style.css") as demo:
136
 
137
  with gr.Group():
138
  with gr.Row():
139
- prompt = gr.Text(
140
  label="Prompt",
141
  show_label=False,
142
  max_lines=1,
@@ -149,23 +131,26 @@ with gr.Blocks(css_paths="style.css") as demo:
149
  use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=False)
150
  use_prompt_2 = gr.Checkbox(label="Use prompt 2", value=False)
151
  use_negative_prompt_2 = gr.Checkbox(label="Use negative prompt 2", value=False)
152
- negative_prompt = gr.Text(
153
  label="Negative prompt",
154
  max_lines=1,
155
  placeholder="Enter a negative prompt",
156
  visible=False,
 
157
  )
158
- prompt_2 = gr.Text(
159
  label="Prompt 2",
160
  max_lines=1,
161
  placeholder="Enter your prompt",
162
  visible=False,
 
163
  )
164
- negative_prompt_2 = gr.Text(
165
  label="Negative prompt 2",
166
  max_lines=1,
167
  placeholder="Enter a negative prompt",
168
  visible=False,
 
169
  )
170
 
171
  seed = gr.Slider(
@@ -291,8 +276,8 @@ with gr.Blocks(css_paths="style.css") as demo:
291
  apply_refiner,
292
  ],
293
  outputs=result,
294
- api_name="run",
295
  )
296
 
297
  if __name__ == "__main__":
298
- demo.queue(max_size=20).launch()
 
11
  from diffusers import AutoencoderKL, DiffusionPipeline
12
 
13
  DESCRIPTION = "# SDXL"
 
 
14
 
15
  MAX_SEED = np.iinfo(np.int32).max
16
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1024"))
 
 
17
  ENABLE_REFINER = os.getenv("ENABLE_REFINER", "1") == "1"
18
 
19
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
20
+
21
+ vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
22
+ pipe = DiffusionPipeline.from_pretrained(
23
+ "stabilityai/stable-diffusion-xl-base-1.0",
24
+ vae=vae,
25
+ torch_dtype=torch.float16,
26
+ use_safetensors=True,
27
+ variant="fp16",
28
+ ).to(device)
29
+ if ENABLE_REFINER:
30
+ refiner = DiffusionPipeline.from_pretrained(
31
+ "stabilityai/stable-diffusion-xl-refiner-1.0",
32
  vae=vae,
33
  torch_dtype=torch.float16,
34
  use_safetensors=True,
35
  variant="fp16",
36
+ ).to(device)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
 
38
 
39
  def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
 
118
 
119
  with gr.Group():
120
  with gr.Row():
121
+ prompt = gr.Textbox(
122
  label="Prompt",
123
  show_label=False,
124
  max_lines=1,
 
131
  use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=False)
132
  use_prompt_2 = gr.Checkbox(label="Use prompt 2", value=False)
133
  use_negative_prompt_2 = gr.Checkbox(label="Use negative prompt 2", value=False)
134
+ negative_prompt = gr.Textbox(
135
  label="Negative prompt",
136
  max_lines=1,
137
  placeholder="Enter a negative prompt",
138
  visible=False,
139
+ value="",
140
  )
141
+ prompt_2 = gr.Textbox(
142
  label="Prompt 2",
143
  max_lines=1,
144
  placeholder="Enter your prompt",
145
  visible=False,
146
+ value="",
147
  )
148
+ negative_prompt_2 = gr.Textbox(
149
  label="Negative prompt 2",
150
  max_lines=1,
151
  placeholder="Enter a negative prompt",
152
  visible=False,
153
+ value="",
154
  )
155
 
156
  seed = gr.Slider(
 
276
  apply_refiner,
277
  ],
278
  outputs=result,
279
+ api_name="predict",
280
  )
281
 
282
  if __name__ == "__main__":
283
+ demo.launch()
style.css CHANGED
@@ -3,13 +3,6 @@ h1 {
3
  display: block;
4
  }
5
 
6
- #duplicate-button {
7
- margin: auto;
8
- color: #fff;
9
- background: #1565c0;
10
- border-radius: 100vh;
11
- }
12
-
13
  .contain {
14
  max-width: 730px;
15
  margin: auto;
 
3
  display: block;
4
  }
5
 
 
 
 
 
 
 
 
6
  .contain {
7
  max-width: 730px;
8
  margin: auto;