Spaces:
Running
on
Zero
Running
on
Zero
File size: 3,206 Bytes
9ab3040 f5924d4 11d154b 9ab3040 7488de9 9ab3040 6c2508c f47b463 7488de9 3725129 2275c1e 9577c0f 5ca9be0 9ab3040 f5924d4 9ab3040 f5924d4 9ab3040 2275c1e 9ab3040 2275c1e 9ab3040 2275c1e 6dd269f 5ae8d3f 2275c1e 9ab3040 f5924d4 9ab3040 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
import gradio as gr
import spaces
import random
import numpy as np
import torch
from diffusers import StableDiffusionXLPipeline
from diffusers import AutoencoderTiny, AutoencoderKL
dtype = torch.bfloat16
device = "cuda" if torch.cuda.is_available() else "cpu"
#taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
#good_vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=dtype).to(device)
# from bobber/bigasp2 to John6666/biglove-ponyv20-sdxl
good_vae = AutoencoderKL.from_pretrained("bobber/bigasp2", subfolder="vae", torch_dtype=dtype).to(device)
pipeline = StableDiffusionXLPipeline.from_pretrained("bobber/bigasp2", torch_dtype=dtype, vae=good_vae).to(device)
MAX_SEED = np.iinfo(np.int32).max
@spaces.GPU
def generate(prompt, negative_prompt, width, height, sample_steps, guidance_scale, seed):
if seed ==0:
seed = random.randint(0, MAX_SEED)
generator = torch.Generator().manual_seed(seed)
return pipeline(prompt=prompt, generator=generator, negative_prompt=negative_prompt, width=width, height=height, guidance_scale=guidance_scale, num_inference_steps=sample_steps).images[0]
with gr.Blocks() as interface:
with gr.Column():
with gr.Row():
with gr.Column():
prompt = gr.Textbox(label="Prompt", info="What do you want?", value="selfie, holding phone, 18 years old, red and blonde hair, (tattoos), messy long hair, stockings, wet pussy, toned body, oni tattoo, spread pussy, basement bath room, vibrant colors, ", lines=4, interactive=True)
negative_prompt = gr.Textbox(label="Negative Prompt", info="What do you want to exclude from the image?", value="monochrome", lines=4, interactive=True)
with gr.Column():
generate_button = gr.Button("Generate")
output = gr.Image()
with gr.Row():
with gr.Accordion(label="Advanced Settings", open=False):
with gr.Row():
with gr.Column():
width = gr.Slider(label="Width", info="The width in pixels of the generated image.", value=1248, minimum=128, maximum=4096, step=64, interactive=True)
height = gr.Slider(label="Height", info="The height in pixels of the generated image.", value=1824, minimum=128, maximum=4096, step=64, interactive=True)
with gr.Column():
sampling_steps = gr.Slider(label="Sampling Steps", info="The number of denoising steps.", value=8, minimum=4, maximum=50, step=1, interactive=True)
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=163829704,)
with gr.Column():
guidance_scale = gr.Slider(label="Guidance Scale", info="Guidance scale.", value=2.5, minimum=1, maximum=10, step=0.1, interactive=True)
generate_button.click(fn=generate, inputs=[prompt, negative_prompt, width, height, sampling_steps, guidance_scale, seed], outputs=[output])
if __name__ == "__main__":
interface.launch() |