Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse filesfrom bobber/bigasp2 to John6666/biglove-ponyv20-sdxl
app.py
CHANGED
@@ -11,8 +11,10 @@ dtype = torch.bfloat16
|
|
11 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
12 |
#taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
|
13 |
#good_vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=dtype).to(device)
|
14 |
-
|
15 |
-
|
|
|
|
|
16 |
|
17 |
MAX_SEED = np.iinfo(np.int32).max
|
18 |
|
@@ -27,8 +29,8 @@ with gr.Blocks() as interface:
|
|
27 |
with gr.Column():
|
28 |
with gr.Row():
|
29 |
with gr.Column():
|
30 |
-
prompt = gr.Textbox(label="Prompt", info="What do you want?", value="
|
31 |
-
negative_prompt = gr.Textbox(label="Negative Prompt", info="What do you want to exclude from the image?", value="
|
32 |
with gr.Column():
|
33 |
generate_button = gr.Button("Generate")
|
34 |
output = gr.Image()
|
@@ -36,13 +38,13 @@ with gr.Blocks() as interface:
|
|
36 |
with gr.Accordion(label="Advanced Settings", open=False):
|
37 |
with gr.Row():
|
38 |
with gr.Column():
|
39 |
-
width = gr.Slider(label="Width", info="The width in pixels of the generated image.", value=
|
40 |
-
height = gr.Slider(label="Height", info="The height in pixels of the generated image.", value=
|
41 |
with gr.Column():
|
42 |
-
sampling_steps = gr.Slider(label="Sampling Steps", info="The number of denoising steps.", value=
|
43 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=163829704,)
|
44 |
with gr.Column():
|
45 |
-
guidance_scale = gr.Slider(label="Guidance Scale", info="Guidance scale.", value=
|
46 |
|
47 |
generate_button.click(fn=generate, inputs=[prompt, negative_prompt, width, height, sampling_steps, guidance_scale, seed], outputs=[output])
|
48 |
|
|
|
11 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
12 |
#taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
|
13 |
#good_vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=dtype).to(device)
|
14 |
+
|
15 |
+
# from bobber/bigasp2 to John6666/biglove-ponyv20-sdxl
|
16 |
+
good_vae = AutoencoderKL.from_pretrained("John6666/biglove-ponyv20-sdxl", subfolder="vae", torch_dtype=dtype).to(device)
|
17 |
+
pipeline = StableDiffusionXLPipeline.from_pretrained("John6666/biglove-ponyv20-sdxl", torch_dtype=dtype, vae=good_vae).to(device)
|
18 |
|
19 |
MAX_SEED = np.iinfo(np.int32).max
|
20 |
|
|
|
29 |
with gr.Column():
|
30 |
with gr.Row():
|
31 |
with gr.Column():
|
32 |
+
prompt = gr.Textbox(label="Prompt", info="What do you want?", value="selfie, holding phone, 18 years old, red and blonde hair, (tattoos), messy long hair, stockings, wet pussy, toned body, oni tattoo, spread pussy, basement bath room, vibrant colors, ", lines=4, interactive=True)
|
33 |
+
negative_prompt = gr.Textbox(label="Negative Prompt", info="What do you want to exclude from the image?", value="monochrome", lines=4, interactive=True)
|
34 |
with gr.Column():
|
35 |
generate_button = gr.Button("Generate")
|
36 |
output = gr.Image()
|
|
|
38 |
with gr.Accordion(label="Advanced Settings", open=False):
|
39 |
with gr.Row():
|
40 |
with gr.Column():
|
41 |
+
width = gr.Slider(label="Width", info="The width in pixels of the generated image.", value=1248, minimum=128, maximum=4096, step=64, interactive=True)
|
42 |
+
height = gr.Slider(label="Height", info="The height in pixels of the generated image.", value=1824, minimum=128, maximum=4096, step=64, interactive=True)
|
43 |
with gr.Column():
|
44 |
+
sampling_steps = gr.Slider(label="Sampling Steps", info="The number of denoising steps.", value=8, minimum=4, maximum=50, step=1, interactive=True)
|
45 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=163829704,)
|
46 |
with gr.Column():
|
47 |
+
guidance_scale = gr.Slider(label="Guidance Scale", info="Guidance scale.", value=2.5, minimum=1, maximum=10, step=0.1, interactive=True)
|
48 |
|
49 |
generate_button.click(fn=generate, inputs=[prompt, negative_prompt, width, height, sampling_steps, guidance_scale, seed], outputs=[output])
|
50 |
|