Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,7 @@
|
|
1 |
#!/usr/bin/env python
|
2 |
|
|
|
|
|
3 |
import os
|
4 |
import random
|
5 |
import uuid
|
@@ -27,30 +29,29 @@ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
|
27 |
|
28 |
MAX_SEED = np.iinfo(np.int32).max
|
29 |
|
30 |
-
if not torch.cuda.is_available():
|
31 |
-
DESCRIPTION += "\n<p>Running on CPU 🥶 This demo may not work on CPU.</p>"
|
32 |
-
|
33 |
-
MAX_SEED = np.iinfo(np.int32).max
|
34 |
-
|
35 |
USE_TORCH_COMPILE = 0
|
36 |
ENABLE_CPU_OFFLOAD = 0
|
37 |
|
38 |
-
|
39 |
if torch.cuda.is_available():
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
pipe.load_lora_weights("ehristoforu/dalle-3-xl-v2", weight_name="dalle-3-xl-lora-v2.safetensors", adapter_name="dalle")
|
49 |
pipe.set_adapters("dalle")
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
|
55 |
@spaces.GPU(enable_queue=True)
|
56 |
def generate(
|
@@ -64,13 +65,13 @@ def generate(
|
|
64 |
randomize_seed: bool = False,
|
65 |
progress=gr.Progress(track_tqdm=True),
|
66 |
):
|
67 |
-
|
68 |
-
|
69 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
70 |
|
71 |
if not use_negative_prompt:
|
72 |
negative_prompt = "" # type: ignore
|
73 |
|
|
|
|
|
74 |
images = pipe(
|
75 |
prompt=prompt,
|
76 |
negative_prompt=negative_prompt,
|
@@ -81,9 +82,10 @@ def generate(
|
|
81 |
num_images_per_prompt=1,
|
82 |
cross_attention_kwargs={"scale": 0.65},
|
83 |
output_type="pil",
|
|
|
84 |
).images
|
|
|
85 |
image_paths = [save_image(img) for img in images]
|
86 |
-
print(image_paths)
|
87 |
return image_paths, seed
|
88 |
|
89 |
examples = [
|
@@ -102,6 +104,7 @@ footer {
|
|
102 |
visibility: hidden
|
103 |
}
|
104 |
'''
|
|
|
105 |
with gr.Blocks(css=css, theme="pseudolab/huggingface-korea-theme") as demo:
|
106 |
gr.Markdown(DESCRIPTION)
|
107 |
gr.DuplicateButton(
|
@@ -178,7 +181,6 @@ with gr.Blocks(css=css, theme="pseudolab/huggingface-korea-theme") as demo:
|
|
178 |
outputs=negative_prompt,
|
179 |
api_name=False,
|
180 |
)
|
181 |
-
|
182 |
|
183 |
gr.on(
|
184 |
triggers=[
|
@@ -200,6 +202,6 @@ with gr.Blocks(css=css, theme="pseudolab/huggingface-korea-theme") as demo:
|
|
200 |
outputs=[result, seed],
|
201 |
api_name="run",
|
202 |
)
|
203 |
-
|
204 |
if __name__ == "__main__":
|
205 |
-
demo.queue(max_size=20).launch(show_api=False, debug=False)
|
|
|
1 |
#!/usr/bin/env python
|
2 |
|
3 |
+
#!/usr/bin/env python
|
4 |
+
|
5 |
import os
|
6 |
import random
|
7 |
import uuid
|
|
|
29 |
|
30 |
MAX_SEED = np.iinfo(np.int32).max
|
31 |
|
|
|
|
|
|
|
|
|
|
|
32 |
USE_TORCH_COMPILE = 0
|
33 |
ENABLE_CPU_OFFLOAD = 0
|
34 |
|
|
|
35 |
if torch.cuda.is_available():
|
36 |
+
DESCRIPTION += "\n<p>Running on GPU 🚀</p>"
|
37 |
+
device = "cuda"
|
38 |
+
else:
|
39 |
+
DESCRIPTION += "\n<p>Running on CPU 🥶 This demo may be slower on CPU.</p>"
|
40 |
+
device = "cpu"
|
41 |
+
|
42 |
+
pipe = StableDiffusionXLPipeline.from_pretrained(
|
43 |
+
"fluently/Fluently-XL-Final",
|
44 |
+
torch_dtype=torch.float16 if device == "cuda" else torch.float32,
|
45 |
+
use_safetensors=True,
|
46 |
+
)
|
47 |
+
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
|
48 |
+
|
49 |
+
if device == "cuda":
|
50 |
pipe.load_lora_weights("ehristoforu/dalle-3-xl-v2", weight_name="dalle-3-xl-lora-v2.safetensors", adapter_name="dalle")
|
51 |
pipe.set_adapters("dalle")
|
52 |
+
pipe.to(device)
|
53 |
+
else:
|
54 |
+
pipe.to("cpu") # Ensure compatibility with CPU
|
|
|
55 |
|
56 |
@spaces.GPU(enable_queue=True)
|
57 |
def generate(
|
|
|
65 |
randomize_seed: bool = False,
|
66 |
progress=gr.Progress(track_tqdm=True),
|
67 |
):
|
|
|
|
|
68 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
69 |
|
70 |
if not use_negative_prompt:
|
71 |
negative_prompt = "" # type: ignore
|
72 |
|
73 |
+
generator = torch.manual_seed(seed)
|
74 |
+
|
75 |
images = pipe(
|
76 |
prompt=prompt,
|
77 |
negative_prompt=negative_prompt,
|
|
|
82 |
num_images_per_prompt=1,
|
83 |
cross_attention_kwargs={"scale": 0.65},
|
84 |
output_type="pil",
|
85 |
+
generator=generator,
|
86 |
).images
|
87 |
+
|
88 |
image_paths = [save_image(img) for img in images]
|
|
|
89 |
return image_paths, seed
|
90 |
|
91 |
examples = [
|
|
|
104 |
visibility: hidden
|
105 |
}
|
106 |
'''
|
107 |
+
|
108 |
with gr.Blocks(css=css, theme="pseudolab/huggingface-korea-theme") as demo:
|
109 |
gr.Markdown(DESCRIPTION)
|
110 |
gr.DuplicateButton(
|
|
|
181 |
outputs=negative_prompt,
|
182 |
api_name=False,
|
183 |
)
|
|
|
184 |
|
185 |
gr.on(
|
186 |
triggers=[
|
|
|
202 |
outputs=[result, seed],
|
203 |
api_name="run",
|
204 |
)
|
205 |
+
|
206 |
if __name__ == "__main__":
|
207 |
+
demo.queue(max_size=20).launch(show_api=False, debug=False)
|