CSH-1220 commited on
Commit
aef267d
Β·
1 Parent(s): 2007ec0

Update filepath

Browse files
pipeline/morph_pipeline_successed_ver1.py CHANGED
@@ -1108,6 +1108,7 @@ class AudioLDM2MorphPipeline(DiffusionPipeline,TextualInversionLoaderMixin):
1108
  morphing_with_lora=False,
1109
  use_morph_prompt=False,
1110
  ):
 
1111
  # 0. Load the pre-trained AP-adapter model
1112
  layer_num = 0
1113
  cross = [None, None, 768, 768, 1024, 1024, None, None]
@@ -1140,7 +1141,7 @@ class AudioLDM2MorphPipeline(DiffusionPipeline,TextualInversionLoaderMixin):
1140
  else:
1141
  attn_procs[name] = AttnProcessor2_0()
1142
 
1143
- state_dict = torch.load(adapter_weight, map_location="cuda")
1144
  for name, processor in attn_procs.items():
1145
  if hasattr(processor, 'to_v_ip') or hasattr(processor, 'to_k_ip'):
1146
  weight_name_v = name + ".to_v_ip.weight"
@@ -1164,7 +1165,6 @@ class AudioLDM2MorphPipeline(DiffusionPipeline,TextualInversionLoaderMixin):
1164
  # print(f"height: {height}, original_waveform_length: {original_waveform_length}") # height: 1000, original_waveform_length: 160000
1165
 
1166
  # # 2. Define call parameters
1167
- device = "cuda" if torch.cuda.is_available() else "cpu"
1168
  do_classifier_free_guidance = guidance_scale > 1.0
1169
  self.use_lora = use_lora
1170
  self.use_adain = use_adain
 
1108
  morphing_with_lora=False,
1109
  use_morph_prompt=False,
1110
  ):
1111
+ device = "cuda" if torch.cuda.is_available() else "cpu"
1112
  # 0. Load the pre-trained AP-adapter model
1113
  layer_num = 0
1114
  cross = [None, None, 768, 768, 1024, 1024, None, None]
 
1141
  else:
1142
  attn_procs[name] = AttnProcessor2_0()
1143
 
1144
+ state_dict = torch.load(adapter_weight, map_location=device)
1145
  for name, processor in attn_procs.items():
1146
  if hasattr(processor, 'to_v_ip') or hasattr(processor, 'to_k_ip'):
1147
  weight_name_v = name + ".to_v_ip.weight"
 
1165
  # print(f"height: {height}, original_waveform_length: {original_waveform_length}") # height: 1000, original_waveform_length: 160000
1166
 
1167
  # # 2. Define call parameters
 
1168
  do_classifier_free_guidance = guidance_scale > 1.0
1169
  self.use_lora = use_lora
1170
  self.use_adain = use_adain