SemaSci commited on
Commit
769b4b8
·
verified ·
1 Parent(s): a71afed

Create app_only_lora.py

Browse files

Восстанавливаем файл app.py только с loRA, без ControlNet IpAdapter

Files changed (1) hide show
  1. app_only_lora.py +293 -0
app_only_lora.py ADDED
@@ -0,0 +1,293 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import gradio as gr
3
+ import numpy as np
4
+ import random
5
+
6
+ # import spaces #[uncomment to use ZeroGPU]
7
+ from diffusers import DiffusionPipeline
8
+ import torch
9
+
10
+ from peft import PeftModel, LoraConfig
11
+ import os
12
+
13
+ def get_lora_sd_pipeline(
14
+ ckpt_dir='./lora_logos',
15
+ base_model_name_or_path=None,
16
+ dtype=torch.float16,
17
+ adapter_name="default"
18
+ ):
19
+
20
+ unet_sub_dir = os.path.join(ckpt_dir, "unet")
21
+ text_encoder_sub_dir = os.path.join(ckpt_dir, "text_encoder")
22
+
23
+ if os.path.exists(text_encoder_sub_dir) and base_model_name_or_path is None:
24
+ config = LoraConfig.from_pretrained(text_encoder_sub_dir)
25
+ base_model_name_or_path = config.base_model_name_or_path
26
+
27
+ if base_model_name_or_path is None:
28
+ raise ValueError("Please specify the base model name or path")
29
+
30
+ pipe = DiffusionPipeline.from_pretrained(base_model_name_or_path, torch_dtype=dtype)
31
+ before_params = pipe.unet.parameters()
32
+ pipe.unet = PeftModel.from_pretrained(pipe.unet, unet_sub_dir, adapter_name=adapter_name)
33
+ pipe.unet.set_adapter(adapter_name)
34
+ after_params = pipe.unet.parameters()
35
+ print("Parameters changed:", any(torch.any(b != a) for b, a in zip(before_params, after_params)))
36
+
37
+ if os.path.exists(text_encoder_sub_dir):
38
+ pipe.text_encoder = PeftModel.from_pretrained(pipe.text_encoder, text_encoder_sub_dir, adapter_name=adapter_name)
39
+
40
+ if dtype in (torch.float16, torch.bfloat16):
41
+ pipe.unet.half()
42
+ pipe.text_encoder.half()
43
+
44
+ return pipe
45
+
46
+ def process_prompt(prompt, tokenizer, text_encoder, max_length=77):
47
+ tokens = tokenizer(prompt, truncation=False, return_tensors="pt")["input_ids"]
48
+ chunks = [tokens[:, i:i + max_length] for i in range(0, tokens.shape[1], max_length)]
49
+
50
+ with torch.no_grad():
51
+ embeds = [text_encoder(chunk.to(text_encoder.device))[0] for chunk in chunks]
52
+
53
+ return torch.cat(embeds, dim=1)
54
+
55
+ def align_embeddings(prompt_embeds, negative_prompt_embeds):
56
+ max_length = max(prompt_embeds.shape[1], negative_prompt_embeds.shape[1])
57
+ return torch.nn.functional.pad(prompt_embeds, (0, 0, 0, max_length - prompt_embeds.shape[1])), \
58
+ torch.nn.functional.pad(negative_prompt_embeds, (0, 0, 0, max_length - negative_prompt_embeds.shape[1]))
59
+
60
+ device = "cuda" if torch.cuda.is_available() else "cpu"
61
+ #model_repo_id = "stabilityai/sdxl-turbo" # Replace to the model you would like to use
62
+ model_id_default = "sd-legacy/stable-diffusion-v1-5"
63
+ model_dropdown = ['stabilityai/sdxl-turbo', 'CompVis/stable-diffusion-v1-4', 'sd-legacy/stable-diffusion-v1-5' ]
64
+
65
+ model_lora_default = "lora_pussinboots_logos"
66
+ model_lora_dropdown = ['lora_lady_and_cats_logos', 'lora_pussinboots_logos' ]
67
+
68
+ if torch.cuda.is_available():
69
+ torch_dtype = torch.float16
70
+ else:
71
+ torch_dtype = torch.float32
72
+
73
+ # pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
74
+ # pipe = pipe.to(device)
75
+
76
+ MAX_SEED = np.iinfo(np.int32).max
77
+ MAX_IMAGE_SIZE = 1024
78
+
79
+
80
+ # @spaces.GPU #[uncomment to use ZeroGPU]
81
+ def infer(
82
+ prompt,
83
+ negative_prompt,
84
+ randomize_seed,
85
+ width=512,
86
+ height=512,
87
+ model_repo_id=model_id_default,
88
+ seed=42,
89
+ guidance_scale=7,
90
+ num_inference_steps=20,
91
+ model_lora_id=model_lora_default,
92
+ lora_scale=0.5,
93
+ progress=gr.Progress(track_tqdm=True),
94
+ ):
95
+
96
+ if randomize_seed:
97
+ seed = random.randint(0, MAX_SEED)
98
+
99
+ generator = torch.Generator().manual_seed(seed)
100
+
101
+ # убираем обновление pipe всегда
102
+ #pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
103
+ #pipe = pipe.to(device)
104
+
105
+ # добавляем обновление pipe по условию
106
+ if model_repo_id != model_id_default:
107
+ pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype).to(device)
108
+ prompt_embeds = process_prompt(prompt, pipe.tokenizer, pipe.text_encoder)
109
+ negative_prompt_embeds = process_prompt(negative_prompt, pipe.tokenizer, pipe.text_encoder)
110
+ prompt_embeds, negative_prompt_embeds = align_embeddings(prompt_embeds, negative_prompt_embeds)
111
+ else:
112
+ # добавляем lora
113
+ #pipe = get_lora_sd_pipeline(ckpt_dir='./lora_lady_and_cats_logos', base_model_name_or_path=model_id_default, dtype=torch_dtype).to(device)
114
+ pipe = get_lora_sd_pipeline(ckpt_dir='./'+model_lora_id, base_model_name_or_path=model_id_default, dtype=torch_dtype).to(device)
115
+ prompt_embeds = process_prompt(prompt, pipe.tokenizer, pipe.text_encoder)
116
+ negative_prompt_embeds = process_prompt(negative_prompt, pipe.tokenizer, pipe.text_encoder)
117
+ prompt_embeds, negative_prompt_embeds = align_embeddings(prompt_embeds, negative_prompt_embeds)
118
+ print(f"LoRA adapter loaded: {pipe.unet.active_adapters}")
119
+ print(f"LoRA scale applied: {lora_scale}")
120
+ pipe.fuse_lora(lora_scale=lora_scale)
121
+
122
+
123
+ # заменяем просто вызов pipe с промптом
124
+ #image = pipe(
125
+ # prompt=prompt,
126
+ # negative_prompt=negative_prompt,
127
+ # guidance_scale=guidance_scale,
128
+ # num_inference_steps=num_inference_steps,
129
+ # width=width,
130
+ # height=height,
131
+ # generator=generator,
132
+ #).images[0]
133
+
134
+
135
+ # на вызов pipe с эмбеддингами
136
+ params = {
137
+ 'prompt_embeds': prompt_embeds,
138
+ 'negative_prompt_embeds': negative_prompt_embeds,
139
+ 'guidance_scale': guidance_scale,
140
+ 'num_inference_steps': num_inference_steps,
141
+ 'width': width,
142
+ 'height': height,
143
+ 'generator': generator,
144
+ }
145
+
146
+ return pipe(**params).images[0], seed
147
+
148
+ # return image, seed
149
+
150
+
151
+ examples = [
152
+ "Puss in Boots wearing a sombrero crosses the Grand Canyon on a tightrope with a guitar.",
153
+ "A cat is playing a song called ""About the Cat"" on an accordion by the sea at sunset. The sun is quickly setting behind the horizon, and the light is fading.",
154
+ "A cat walks through the grass on the streets of an abandoned city. The camera view is always focused on the cat's face.",
155
+ "A young lady in a Russian embroidered kaftan is sitting on a beautiful carved veranda, holding a cup to her mouth and drinking tea from the cup. With her other hand, the girl holds a saucer. The cup and saucer are painted with gzhel. Next to the girl on the table stands a samovar, and steam can be seen above it.",
156
+ "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
157
+ "An astronaut riding a green horse",
158
+ "A delicious ceviche cheesecake slice",
159
+ ]
160
+
161
+ css = """
162
+ #col-container {
163
+ margin: 0 auto;
164
+ max-width: 640px;
165
+ }
166
+ """
167
+
168
+ with gr.Blocks(css=css) as demo:
169
+ with gr.Column(elem_id="col-container"):
170
+ gr.Markdown(" # Text-to-Image SemaSci Template")
171
+
172
+ with gr.Row():
173
+ prompt = gr.Text(
174
+ label="Prompt",
175
+ show_label=False,
176
+ max_lines=1,
177
+ placeholder="Enter your prompt",
178
+ container=False,
179
+ )
180
+
181
+ run_button = gr.Button("Run", scale=0, variant="primary")
182
+
183
+ result = gr.Image(label="Result", show_label=False)
184
+
185
+ with gr.Accordion("Advanced Settings", open=False):
186
+ # model_repo_id = gr.Text(
187
+ # label="Model Id",
188
+ # max_lines=1,
189
+ # placeholder="Choose model",
190
+ # visible=True,
191
+ # value=model_repo_id,
192
+ # )
193
+ model_repo_id = gr.Dropdown(
194
+ label="Model Id",
195
+ choices=model_dropdown,
196
+ info="Choose model",
197
+ visible=True,
198
+ allow_custom_value=True,
199
+ # value=model_repo_id,
200
+ value=model_id_default,
201
+ )
202
+
203
+ negative_prompt = gr.Text(
204
+ label="Negative prompt",
205
+ max_lines=1,
206
+ placeholder="Enter a negative prompt",
207
+ visible=True,
208
+ )
209
+
210
+ seed = gr.Slider(
211
+ label="Seed",
212
+ minimum=0,
213
+ maximum=MAX_SEED,
214
+ step=1,
215
+ value=42,
216
+ )
217
+
218
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=False)
219
+
220
+ with gr.Row():
221
+ width = gr.Slider(
222
+ label="Width",
223
+ minimum=256,
224
+ maximum=MAX_IMAGE_SIZE,
225
+ step=32,
226
+ value=512, # Replace with defaults that work for your model
227
+ )
228
+
229
+ height = gr.Slider(
230
+ label="Height",
231
+ minimum=256,
232
+ maximum=MAX_IMAGE_SIZE,
233
+ step=32,
234
+ value=512, # Replace with defaults that work for your model
235
+ )
236
+
237
+ with gr.Row():
238
+ guidance_scale = gr.Slider(
239
+ label="Guidance scale",
240
+ minimum=0.0,
241
+ maximum=10.0,
242
+ step=0.1,
243
+ value=7.0, # Replace with defaults that work for your model
244
+ )
245
+
246
+ num_inference_steps = gr.Slider(
247
+ label="Number of inference steps",
248
+ minimum=1,
249
+ maximum=50,
250
+ step=1,
251
+ value=20, # Replace with defaults that work for your model
252
+ )
253
+
254
+ with gr.Row():
255
+ model_lora_id = gr.Dropdown(
256
+ label="Lora Id",
257
+ choices=model_lora_dropdown,
258
+ info="Choose LoRA model",
259
+ visible=True,
260
+ allow_custom_value=True,
261
+ value=model_lora_default,
262
+ )
263
+
264
+ lora_scale = gr.Slider(
265
+ label="LoRA scale",
266
+ minimum=0.0,
267
+ maximum=1.0,
268
+ step=0.1,
269
+ value=0.5,
270
+ )
271
+
272
+ gr.Examples(examples=examples, inputs=[prompt])
273
+ gr.on(
274
+ triggers=[run_button.click, prompt.submit],
275
+ fn=infer,
276
+ inputs=[
277
+ prompt,
278
+ negative_prompt,
279
+ randomize_seed,
280
+ width,
281
+ height,
282
+ model_repo_id,
283
+ seed,
284
+ guidance_scale,
285
+ num_inference_steps,
286
+ model_lora_id,
287
+ lora_scale,
288
+ ],
289
+ outputs=[result, seed],
290
+ )
291
+
292
+ if __name__ == "__main__":
293
+ demo.launch()