bbsgp commited on
Commit
fd47757
·
1 Parent(s): 342153c

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +138 -0
app.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from email import generator
2
+ from diffusers import DiffusionPipeline
3
+
4
+ import gradio as gr
5
+ import torch
6
+ from PIL import Image, ImageDraw, ImageFont
7
+ ## VAE - Special VAE used for training: madebyollin/sdxl-vae-fp16-fix.
8
+ from diffusers import AutoencoderKL
9
+
10
+
11
+
12
+
13
+ model = "stabilityai/stable-diffusion-xl-base-1.0"
14
+ finetuningLayer = "bbsgp/10xFWDLora"
15
+
16
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
17
+ torch_dtype = torch.float16 if device.type == 'cuda' else torch.float32
18
+
19
+
20
+
21
+ import os
22
+ HF_API_TOKEN = os.getenv("HF_API_TOKEN")
23
+
24
+ from huggingface_hub import login
25
+ login(token=HF_API_TOKEN)
26
+
27
+
28
+ vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch_dtype)
29
+ pipe = DiffusionPipeline.from_pretrained(
30
+ model,
31
+ vae=vae,
32
+ torch_dtype=torch_dtype,
33
+ use_safetensors=True
34
+ )
35
+ pipe.load_lora_weights(finetuningLayer)
36
+
37
+ pipe = pipe.to(device)
38
+
39
+
40
+
41
+
42
+ def create_error_image(message):
43
+ # Create a blank image with white background
44
+ width, height = 512, 512
45
+ image = Image.new('RGB', (width, height), 'white')
46
+ draw = ImageDraw.Draw(image)
47
+
48
+ # Load a truetype or opentype font file
49
+ font = ImageFont.load_default()
50
+
51
+ # Position and message
52
+
53
+ draw.text((127,251), message, font=font, fill="black")
54
+
55
+ return image
56
+
57
+ def inference(model,finetuningLayer, prompt, guidance, steps, seed):
58
+
59
+
60
+
61
+ if not prompt:
62
+ return create_error_image("Sorry, add your text prompt and try again!!")
63
+ else:
64
+ generator = torch.Generator(device).manual_seed(seed)
65
+ image = pipe(
66
+ prompt,
67
+ num_inference_steps=int(steps),
68
+ guidance_scale=guidance,
69
+ generator=generator).images[0]
70
+
71
+ return image
72
+
73
+
74
+ css = """
75
+ <style>
76
+ .finetuned-diffusion-div {
77
+ text-align: center;
78
+ max-width: 700px;
79
+ margin: 0 auto;
80
+ }
81
+ .finetuned-diffusion-div div {
82
+ display: inline-flex;
83
+ align-items: center;
84
+ gap: 0.8rem;
85
+ font-size: 1.75rem;
86
+ }
87
+ .finetuned-diffusion-div div h1 {
88
+ font-weight: 900;
89
+ margin-bottom: 7px;
90
+ }
91
+ .finetuned-diffusion-div p {
92
+ margin-bottom: 10px;
93
+ font-size: 94%;
94
+ }
95
+ .finetuned-diffusion-div p a {
96
+ text-decoration: underline;
97
+ }
98
+ </style>
99
+ """
100
+ with gr.Blocks(css=css) as demo:
101
+ gr.HTML(
102
+ """
103
+ <div class="finetuned-diffusion-div">
104
+ <div>
105
+ <h1>Finetuned Diffusion</h1>
106
+ </div>
107
+ </div>
108
+ """
109
+ )
110
+ with gr.Row():
111
+
112
+ with gr.Column():
113
+
114
+ model = gr.Dropdown(label="baseModel",choices=[model], default=model)
115
+ finetuningLayer= gr.Dropdown(label="Finetuning Layer", choices=[finetuningLayer], default=finetuningLayer)
116
+ prompt = gr.Textbox(label="Prompt", placeholder="photo of McDBigMac - it is unique identifier need to be used to identify burgers")
117
+
118
+
119
+ with gr.Accordion("Advanced options", open=True):
120
+ guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15)
121
+ steps = gr.Slider(label="Steps", value=50, maximum=100, minimum=2)
122
+ seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1)
123
+
124
+ run = gr.Button(value="Run")
125
+ gr.Markdown(f"Running on: {device}")
126
+ with gr.Column():
127
+ image_out = gr.Image()
128
+
129
+ ## Add prompt and press enter to run
130
+ ##prompt.submit(inference, inputs=[model, finetuningLayer,prompt, guidance, steps, seed], outputs=image_out)
131
+
132
+ ## Click run button to run
133
+ run.click(inference, inputs=[model, finetuningLayer, prompt, guidance, steps, seed], outputs=image_out)
134
+
135
+
136
+
137
+ demo.queue()
138
+ demo.launch()