Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
import gradio as gr
|
2 |
import spaces
|
|
|
3 |
|
4 |
import torch
|
5 |
from diffusers import StableDiffusionXLPipeline
|
@@ -12,9 +13,14 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
12 |
good_vae = AutoencoderKL.from_pretrained("bobber/bigasp2", subfolder="vae", torch_dtype=dtype).to(device)
|
13 |
pipeline = StableDiffusionXLPipeline.from_pretrained("bobber/bigasp2", torch_dtype=dtype, vae=good_vae).to(device)
|
14 |
|
|
|
|
|
15 |
@spaces.GPU
|
16 |
-
def generate(prompt, negative_prompt, width, height, sample_steps, guidance_scale):
|
17 |
-
|
|
|
|
|
|
|
18 |
|
19 |
with gr.Blocks() as interface:
|
20 |
with gr.Column():
|
@@ -33,10 +39,11 @@ with gr.Blocks() as interface:
|
|
33 |
height = gr.Slider(label="Height", info="The height in pixels of the generated image.", value=1024, minimum=128, maximum=4096, step=64, interactive=True)
|
34 |
with gr.Column():
|
35 |
sampling_steps = gr.Slider(label="Sampling Steps", info="The number of denoising steps.", value=40, minimum=4, maximum=50, step=1, interactive=True)
|
|
|
36 |
with gr.Column():
|
37 |
guidance_scale = gr.Slider(label="Guidance Scale", info="Guidance scale.", value=2.5, minimum=1, maximum=10, step=0.1, interactive=True)
|
38 |
|
39 |
-
generate_button.click(fn=generate, inputs=[prompt, negative_prompt, width, height, sampling_steps, guidance_scale], outputs=[output])
|
40 |
|
41 |
if __name__ == "__main__":
|
42 |
interface.launch()
|
|
|
1 |
import gradio as gr
|
2 |
import spaces
|
3 |
+
import random
|
4 |
|
5 |
import torch
|
6 |
from diffusers import StableDiffusionXLPipeline
|
|
|
13 |
good_vae = AutoencoderKL.from_pretrained("bobber/bigasp2", subfolder="vae", torch_dtype=dtype).to(device)
|
14 |
pipeline = StableDiffusionXLPipeline.from_pretrained("bobber/bigasp2", torch_dtype=dtype, vae=good_vae).to(device)
|
15 |
|
16 |
+
MAX_SEED = np.iinfo(np.int32).max
|
17 |
+
|
18 |
@spaces.GPU
|
19 |
+
def generate(prompt, negative_prompt, width, height, sample_steps, guidance_scale, seed):
|
20 |
+
if seed ==0:
|
21 |
+
seed = random.randint(0, MAX_SEED)
|
22 |
+
generator = torch.Generator().manual_seed(seed)
|
23 |
+
return pipeline(prompt=prompt, generator=generator, negative_prompt=negative_prompt, width=width, height=height, guidance_scale=guidance_scale, num_inference_steps=sample_steps).images[0]
|
24 |
|
25 |
with gr.Blocks() as interface:
|
26 |
with gr.Column():
|
|
|
39 |
height = gr.Slider(label="Height", info="The height in pixels of the generated image.", value=1024, minimum=128, maximum=4096, step=64, interactive=True)
|
40 |
with gr.Column():
|
41 |
sampling_steps = gr.Slider(label="Sampling Steps", info="The number of denoising steps.", value=40, minimum=4, maximum=50, step=1, interactive=True)
|
42 |
+
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0,)
|
43 |
with gr.Column():
|
44 |
guidance_scale = gr.Slider(label="Guidance Scale", info="Guidance scale.", value=2.5, minimum=1, maximum=10, step=0.1, interactive=True)
|
45 |
|
46 |
+
generate_button.click(fn=generate, inputs=[prompt, negative_prompt, width, height, sampling_steps, guidance_scale, seed], outputs=[output])
|
47 |
|
48 |
if __name__ == "__main__":
|
49 |
interface.launch()
|