Samuel L Meyers
MEOW
b3c0af8
from diffusers import AutoPipelineForText2Image, AutoencoderKL, DiffusionPipeline
import torch
import gradio as gr
# vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
# pipeline_text2image = AutoPipelineForText2Image.from_pretrained(
# "stabilityai/stable-diffusion-xl-base-1.0", use_safetensors=True, revision="fp16", torch_dtype=torch.float16, vae=vae
# )
pipeline_text2image = DiffusionPipeline.from_pretrained(
"ARDICAI/stable-diffusion-2-1-finetuned"
)
def gradio_txt2img(prompt):
return pipeline_text2image(prompt=prompt).images[0]
def api_txt2base64(prompt):
return gradio_txt2img(prompt).to_base64()
with gr.Blocks() as demo:
gr.Markdown("## Stable Diffusion XL - Modded to Hell Edition")
gr.Markdown("## Text to Image\n\nConverts a prompt into an image.")
with gr.Row():
with gr.Column():
txt2img_prompt = gr.Textbox(label="Input", lines=2, max_lines=2)
txt2img_btn = gr.Button("Generate")
with gr.Column():
txt2img_out = gr.Image()
with gr.Row():
with gr.Column():
txt2b64_prompt = gr.Textbox(label="Input", lines=2, max_lines=2)
txt2b64_btn = gr.Button("Generate")
with gr.Column():
txt2b64_out = gr.Image()
txt2img_btn.click(fn=gradio_txt2img, inputs=txt2img_prompt, outputs=txt2img_out, queue=True, api_name="gradio_txt2img")
txt2b64_btn.click(fn=api_txt2base64, inputs=txt2b64_prompt, outputs=txt2b64_out, queue=True, api_name="gradio_txt2b64")
demo.launch(server_name="0.0.0.0", server_port=7860)