SemaSci commited on
Commit
a4d9b2f
·
verified ·
1 Parent(s): d6c9e09

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -56
app.py CHANGED
@@ -11,16 +11,13 @@ import torch
11
  from peft import PeftModel, LoraConfig
12
  import os
13
 
14
- # Добавляем глобальный кэш для пайплайнов
15
- pipe_cache = {}
16
-
17
  def get_lora_sd_pipeline(
18
  ckpt_dir='./lora_logos',
19
  base_model_name_or_path=None,
20
  dtype=torch.float16,
21
  adapter_name="default"
22
  ):
23
-
24
  unet_sub_dir = os.path.join(ckpt_dir, "unet")
25
  text_encoder_sub_dir = os.path.join(ckpt_dir, "text_encoder")
26
 
@@ -33,28 +30,17 @@ def get_lora_sd_pipeline(
33
 
34
  pipe = DiffusionPipeline.from_pretrained(base_model_name_or_path, torch_dtype=dtype)
35
  before_params = pipe.unet.parameters()
36
- # pipe.unet = PeftModel.from_pretrained(pipe.unet, unet_sub_dir, adapter_name=adapter_name)
37
- # Исправляем загрузку конфигурации
38
- config = LoraConfig.from_pretrained(unet_sub_dir)
39
-
40
- pipe.unet = PeftModel.from_pretrained(
41
- pipe.unet,
42
- unet_sub_dir,
43
- adapter_name=adapter_name,
44
- config=config # Явно передаем конфигурацию
45
- )
46
-
47
  pipe.unet.set_adapter(adapter_name)
48
  after_params = pipe.unet.parameters()
49
- print("UNet Parameters changed:", any(torch.any(b != a) for b, a in zip(before_params, after_params)))
50
 
51
  if os.path.exists(text_encoder_sub_dir):
52
  pipe.text_encoder = PeftModel.from_pretrained(pipe.text_encoder, text_encoder_sub_dir, adapter_name=adapter_name)
53
 
54
  if dtype in (torch.float16, torch.bfloat16):
55
  pipe.unet.half()
56
- if pipe.text_encoder is not None:
57
- pipe.text_encoder.half()
58
 
59
  return pipe
60
 
@@ -108,51 +94,43 @@ def infer(
108
  progress=gr.Progress(track_tqdm=True),
109
  ):
110
 
111
- global pipe_cache
112
-
113
  if randomize_seed:
114
  seed = random.randint(0, MAX_SEED)
115
 
116
  generator = torch.Generator().manual_seed(seed)
117
 
118
- # Кэширование пайплайнов
119
- cache_key = f"{model_repo_id}_{model_lora_id}"
120
- if cache_key not in pipe_cache:
121
- if model_repo_id != model_id_default:
122
- pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype).to(device)
123
- prompt_embeds = process_prompt(prompt, pipe.tokenizer, pipe.text_encoder)
124
- negative_prompt_embeds = process_prompt(negative_prompt, pipe.tokenizer, pipe.text_encoder)
125
- prompt_embeds, negative_prompt_embeds = align_embeddings(prompt_embeds, negative_prompt_embeds)
126
- else:
127
- pipe = get_lora_sd_pipeline(
128
- ckpt_dir='./'+model_lora_id,
129
- base_model_name_or_path=model_id_default,
130
- dtype=torch_dtype
131
- ).to(device)
132
-
133
- pipe_cache[cache_key] = pipe
134
  else:
135
- pipe = pipe_cache[cache_key]
136
-
137
- # Динамическое применение масштаба LoRA
138
- if model_repo_id == model_id_default:
139
- # Убираем fuse_lora()
140
- # pipe.fuse_lora(lora_scale=lora_scale) # Закомментировали проблемную строку
141
-
142
- # Вместо этого устанавливаем адаптеры динамически
143
- pipe.unet.set_adapters(
144
- [model_lora_id],
145
- adapter_weights=[lora_scale]
146
- )
147
- if hasattr(pipe, 'text_encoder') and pipe.text_encoder is not None:
148
- pipe.text_encoder.set_adapters(
149
- [model_lora_id],
150
- adapter_weights=[lora_scale]
151
- )
152
-
153
- print(f"Active adapters - UNet: {pipe.unet.active_adapters}, Text Encoder: {pipe.text_encoder.active_adapters if hasattr(pipe, 'text_encoder') else None}")
154
- print("UNet first layer weights:", pipe.unet.base_model.model[0].weight.data[0,0,:5])
155
  print(f"LoRA scale applied: {lora_scale}")
 
 
 
 
 
 
 
 
 
 
 
 
 
156
 
157
 
158
  # на вызов pipe с эмбеддингами
 
11
  from peft import PeftModel, LoraConfig
12
  import os
13
 
 
 
 
14
  def get_lora_sd_pipeline(
15
  ckpt_dir='./lora_logos',
16
  base_model_name_or_path=None,
17
  dtype=torch.float16,
18
  adapter_name="default"
19
  ):
20
+
21
  unet_sub_dir = os.path.join(ckpt_dir, "unet")
22
  text_encoder_sub_dir = os.path.join(ckpt_dir, "text_encoder")
23
 
 
30
 
31
  pipe = DiffusionPipeline.from_pretrained(base_model_name_or_path, torch_dtype=dtype)
32
  before_params = pipe.unet.parameters()
33
+ pipe.unet = PeftModel.from_pretrained(pipe.unet, unet_sub_dir, adapter_name=adapter_name)
 
 
 
 
 
 
 
 
 
 
34
  pipe.unet.set_adapter(adapter_name)
35
  after_params = pipe.unet.parameters()
36
+ print("Parameters changed:", any(torch.any(b != a) for b, a in zip(before_params, after_params)))
37
 
38
  if os.path.exists(text_encoder_sub_dir):
39
  pipe.text_encoder = PeftModel.from_pretrained(pipe.text_encoder, text_encoder_sub_dir, adapter_name=adapter_name)
40
 
41
  if dtype in (torch.float16, torch.bfloat16):
42
  pipe.unet.half()
43
+ pipe.text_encoder.half()
 
44
 
45
  return pipe
46
 
 
94
  progress=gr.Progress(track_tqdm=True),
95
  ):
96
 
 
 
97
  if randomize_seed:
98
  seed = random.randint(0, MAX_SEED)
99
 
100
  generator = torch.Generator().manual_seed(seed)
101
 
102
+ # убираем обновление pipe всегда
103
+ #pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
104
+ #pipe = pipe.to(device)
105
+
106
+ # добавляем обновление pipe по условию
107
+ if model_repo_id != model_id_default:
108
+ pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype).to(device)
109
+ prompt_embeds = process_prompt(prompt, pipe.tokenizer, pipe.text_encoder)
110
+ negative_prompt_embeds = process_prompt(negative_prompt, pipe.tokenizer, pipe.text_encoder)
111
+ prompt_embeds, negative_prompt_embeds = align_embeddings(prompt_embeds, negative_prompt_embeds)
 
 
 
 
 
 
112
  else:
113
+ # добавляем lora
114
+ #pipe = get_lora_sd_pipeline(ckpt_dir='./lora_lady_and_cats_logos', base_model_name_or_path=model_id_default, dtype=torch_dtype).to(device)
115
+ pipe = get_lora_sd_pipeline(ckpt_dir='./'+model_lora_id, base_model_name_or_path=model_id_default, dtype=torch_dtype).to(device)
116
+ prompt_embeds = process_prompt(prompt, pipe.tokenizer, pipe.text_encoder)
117
+ negative_prompt_embeds = process_prompt(negative_prompt, pipe.tokenizer, pipe.text_encoder)
118
+ prompt_embeds, negative_prompt_embeds = align_embeddings(prompt_embeds, negative_prompt_embeds)
119
+ print(f"LoRA adapter loaded: {pipe.unet.active_adapters}")
 
 
 
 
 
 
 
 
 
 
 
 
 
120
  print(f"LoRA scale applied: {lora_scale}")
121
+ pipe.fuse_lora(lora_scale=lora_scale)
122
+
123
+
124
+ # заменяем просто вызов pipe с промптом
125
+ #image = pipe(
126
+ # prompt=prompt,
127
+ # negative_prompt=negative_prompt,
128
+ # guidance_scale=guidance_scale,
129
+ # num_inference_steps=num_inference_steps,
130
+ # width=width,
131
+ # height=height,
132
+ # generator=generator,
133
+ #).images[0]
134
 
135
 
136
  # на вызов pipe с эмбеддингами