chong.zhang commited on
Commit
a3a0c9b
·
1 Parent(s): 2c50d95
inspiremusic/transformer/qwen_encoder.py CHANGED
@@ -39,7 +39,7 @@ class QwenEncoder(nn.Module):
39
  else:
40
  self.dtype = torch.float32
41
 
42
- self.model = AutoModelForCausalLM.from_pretrained(pretrain_path, device_map="auto", attn_implementation="flash_attention_2", torch_dtype=self.dtype)
43
  self._output_size = self.model.config.hidden_size
44
  self.do_fusion_emb = do_fusion_emb
45
  self.hidden_norm = torch.nn.LayerNorm(self._output_size)
@@ -109,7 +109,7 @@ class QwenEmbeddingEncoder(nn.Module):
109
  else:
110
  self.dtype = torch.float32
111
  from transformers import Qwen2ForCausalLM
112
- self.model = Qwen2ForCausalLM.from_pretrained(pretrain_path, device_map="auto", attn_implementation="flash_attention_2", torch_dtype=self.dtype)
113
  self._output_size = self.model.config.hidden_size
114
 
115
  def output_size(self) -> int:
 
39
  else:
40
  self.dtype = torch.float32
41
 
42
+ self.model = AutoModelForCausalLM.from_pretrained(pretrain_path, device_map="cpu")
43
  self._output_size = self.model.config.hidden_size
44
  self.do_fusion_emb = do_fusion_emb
45
  self.hidden_norm = torch.nn.LayerNorm(self._output_size)
 
109
  else:
110
  self.dtype = torch.float32
111
  from transformers import Qwen2ForCausalLM
112
+ self.model = Qwen2ForCausalLM.from_pretrained(pretrain_path, device_map="cpu")
113
  self._output_size = self.model.config.hidden_size
114
 
115
  def output_size(self) -> int: