Gauri Kishor Damle commited on
Commit
1fd4e85
·
1 Parent(s): 695f834
Files changed (4) hide show
  1. app.py +136 -0
  2. requirements.txt +11 -0
  3. style.css +24 -0
  4. utils.py +6 -0
app.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from email import generator
2
+ from diffusers import DiffusionPipeline
3
+
4
+ import gradio as gr
5
+ import torch
6
+ from PIL import Image, ImageDraw, ImageFont
7
+ ## VAE - Special VAE used for training: madebyollin/sdxl-vae-fp16-fix.
8
+ from diffusers import AutoencoderKL
9
+
10
+
11
+
12
+ model = "stabilityai/stable-diffusion-xl-base-1.0"
13
+ finetuningLayer = "Gauri54damle/sd-multi-object-model2"
14
+
15
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
16
+ torch_dtype = torch.float16 if device.type == 'cuda' else torch.float32
17
+
18
+
19
+
20
+ import os
21
+ HF_API_TOKEN = os.getenv("HF_API_TOKEN")
22
+
23
+ from huggingface_hub import login
24
+ login(token=HF_API_TOKEN)
25
+
26
+
27
+ vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch_dtype)
28
+ pipe = DiffusionPipeline.from_pretrained(
29
+ model,
30
+ vae=vae,
31
+ torch_dtype=torch_dtype,
32
+ use_safetensors=True
33
+ )
34
+ pipe.load_lora_weights(finetuningLayer)
35
+
36
+ pipe = pipe.to(device)
37
+
38
+
39
+
40
+
41
+ def create_error_image(message):
42
+ # Create a blank image with white background
43
+ width, height = 512, 512
44
+ image = Image.new('RGB', (width, height), 'white')
45
+ draw = ImageDraw.Draw(image)
46
+
47
+ # Load a truetype or opentype font file
48
+ font = ImageFont.load_default()
49
+
50
+ # Position and message
51
+
52
+ draw.text((127,251), message, font=font, fill="black")
53
+
54
+ return image
55
+
56
+ def inference(model,finetuningLayer, prompt, guidance, steps, seed):
57
+
58
+
59
+
60
+ if not prompt:
61
+ return create_error_image("Sorry, add your text prompt and try again!!")
62
+ else:
63
+ generator = torch.Generator(device).manual_seed(seed)
64
+ image = pipe(
65
+ prompt,
66
+ num_inference_steps=int(steps),
67
+ guidance_scale=guidance,
68
+ generator=generator).images[0]
69
+
70
+ return image
71
+
72
+
73
+ css = """
74
+ <style>
75
+ .finetuned-diffusion-div {
76
+ text-align: center;
77
+ max-width: 700px;
78
+ margin: 0 auto;
79
+ }
80
+ .finetuned-diffusion-div div {
81
+ display: inline-flex;
82
+ align-items: center;
83
+ gap: 0.8rem;
84
+ font-size: 1.75rem;
85
+ }
86
+ .finetuned-diffusion-div div h1 {
87
+ font-weight: 900;
88
+ margin-bottom: 7px;
89
+ }
90
+ .finetuned-diffusion-div p {
91
+ margin-bottom: 10px;
92
+ font-size: 94%;
93
+ }
94
+ .finetuned-diffusion-div p a {
95
+ text-decoration: underline;
96
+ }
97
+ </style>
98
+ """
99
+ with gr.Blocks(css=css) as demo:
100
+ gr.HTML(
101
+ """
102
+ <div class="finetuned-diffusion-div">
103
+ <div>
104
+ <h1>Finetuned Diffusion</h1>
105
+ </div>
106
+ </div>
107
+ """
108
+ )
109
+ with gr.Row():
110
+
111
+ with gr.Column():
112
+
113
+ model = gr.Dropdown(label="baseModel", value=model)
114
+ finetuningLayer= gr.Dropdown(label="finetuningLayer", value=finetuningLayer)
115
+ prompt = gr.Textbox(label="Prompt", placeholder="photo of McDCoke - it is unique identifier need to be used to identify drinks")
116
+
117
+ with gr.Accordion("Advanced options", open=True):
118
+ guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15)
119
+ steps = gr.Slider(label="Steps", value=50, maximum=100, minimum=2)
120
+ seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1)
121
+
122
+ run = gr.Button(value="Run")
123
+ gr.Markdown(f"Running on: {device}")
124
+ with gr.Column():
125
+ image_out = gr.Image()
126
+
127
+ ## Add prompt and press enter to run
128
+ ##prompt.submit(inference, inputs=[model, finetuningLayer,prompt, guidance, steps, seed], outputs=image_out)
129
+
130
+ ## Click run button to run
131
+ run.click(inference, inputs=[model, finetuningLayer, prompt, guidance, steps, seed], outputs=image_out)
132
+
133
+
134
+
135
+ demo.queue()
136
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Pillow
2
+ diffusers
3
+ transformers==4.28.1
4
+ peft
5
+ trl
6
+ xformers
7
+ torch
8
+ scipy
9
+ ftfy
10
+ psutil
11
+ triton
style.css ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .finetuned-diffusion-div div{
2
+ display:inline-flex;
3
+ align-items:center;
4
+ gap:.8rem;
5
+ font-size:1.75rem
6
+ }
7
+ .finetuned-diffusion-div div h1{
8
+ font-weight:900;
9
+ margin-bottom:7px
10
+ }
11
+ .finetuned-diffusion-div p{
12
+ margin-bottom:10px;
13
+ font-size:94%
14
+ }
15
+ a{
16
+ text-decoration:underline
17
+ }
18
+ .tabs{
19
+ margin-top:0;
20
+ margin-bottom:0
21
+ }
22
+ #gallery{
23
+ min-height:20rem
24
+ }
utils.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ def is_google_colab():
2
+ try:
3
+ import google.colab
4
+ return True
5
+ except:
6
+ return False