Spaces:
Runtime error
Runtime error
File size: 3,651 Bytes
fd47757 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 |
from email import generator
from diffusers import DiffusionPipeline
import gradio as gr
import torch
from PIL import Image, ImageDraw, ImageFont
## VAE - Special VAE used for training: madebyollin/sdxl-vae-fp16-fix.
from diffusers import AutoencoderKL
model = "stabilityai/stable-diffusion-xl-base-1.0"
finetuningLayer = "bbsgp/10xFWDLora"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
torch_dtype = torch.float16 if device.type == 'cuda' else torch.float32
import os
HF_API_TOKEN = os.getenv("HF_API_TOKEN")
from huggingface_hub import login
login(token=HF_API_TOKEN)
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch_dtype)
pipe = DiffusionPipeline.from_pretrained(
model,
vae=vae,
torch_dtype=torch_dtype,
use_safetensors=True
)
pipe.load_lora_weights(finetuningLayer)
pipe = pipe.to(device)
def create_error_image(message):
# Create a blank image with white background
width, height = 512, 512
image = Image.new('RGB', (width, height), 'white')
draw = ImageDraw.Draw(image)
# Load a truetype or opentype font file
font = ImageFont.load_default()
# Position and message
draw.text((127,251), message, font=font, fill="black")
return image
def inference(model,finetuningLayer, prompt, guidance, steps, seed):
if not prompt:
return create_error_image("Sorry, add your text prompt and try again!!")
else:
generator = torch.Generator(device).manual_seed(seed)
image = pipe(
prompt,
num_inference_steps=int(steps),
guidance_scale=guidance,
generator=generator).images[0]
return image
css = """
<style>
.finetuned-diffusion-div {
text-align: center;
max-width: 700px;
margin: 0 auto;
}
.finetuned-diffusion-div div {
display: inline-flex;
align-items: center;
gap: 0.8rem;
font-size: 1.75rem;
}
.finetuned-diffusion-div div h1 {
font-weight: 900;
margin-bottom: 7px;
}
.finetuned-diffusion-div p {
margin-bottom: 10px;
font-size: 94%;
}
.finetuned-diffusion-div p a {
text-decoration: underline;
}
</style>
"""
with gr.Blocks(css=css) as demo:
gr.HTML(
"""
<div class="finetuned-diffusion-div">
<div>
<h1>Finetuned Diffusion</h1>
</div>
</div>
"""
)
with gr.Row():
with gr.Column():
model = gr.Dropdown(label="baseModel",choices=[model], default=model)
finetuningLayer= gr.Dropdown(label="Finetuning Layer", choices=[finetuningLayer], default=finetuningLayer)
prompt = gr.Textbox(label="Prompt", placeholder="photo of McDBigMac - it is unique identifier need to be used to identify burgers")
with gr.Accordion("Advanced options", open=True):
guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15)
steps = gr.Slider(label="Steps", value=50, maximum=100, minimum=2)
seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1)
run = gr.Button(value="Run")
gr.Markdown(f"Running on: {device}")
with gr.Column():
image_out = gr.Image()
## Add prompt and press enter to run
##prompt.submit(inference, inputs=[model, finetuningLayer,prompt, guidance, steps, seed], outputs=image_out)
## Click run button to run
run.click(inference, inputs=[model, finetuningLayer, prompt, guidance, steps, seed], outputs=image_out)
demo.queue()
demo.launch() |