Hajime Yagihara commited on
Commit
51e04fc
·
1 Parent(s): 577efb5

Change input_ids in MPTForCausalLM to Optional

Browse files
Files changed (2) hide show
  1. README.md +1 -0
  2. modeling_mpt.py +1 -1
README.md CHANGED
@@ -6,3 +6,4 @@ inference: false
6
 
7
  このモデルは[MPT-7B-Instruct](https://huggingface.co/mosaicml/mpt-7b-instruct)のコードを一部PEFT用に変更したものです。
8
  実験的なものですので使用は個人の判断でお願いします。
 
 
6
 
7
  このモデルは[MPT-7B-Instruct](https://huggingface.co/mosaicml/mpt-7b-instruct)のコードを一部PEFT用に変更したものです。
8
  実験的なものですので使用は個人の判断でお願いします。
9
+ 使用による損害のいかなる責任も負いません。
modeling_mpt.py CHANGED
@@ -281,7 +281,7 @@ class MPTForCausalLM(MPTPreTrainedModel):
281
  return self.transformer
282
 
283
  # def forward(self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None, labels: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, use_cache: Optional[bool]=None):
284
- def forward(self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None, labels: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, use_cache: Optional[bool]=None, inputs_embeds: Optional[torch.FloatTensor] = None):
285
  return_dict = return_dict if return_dict is not None else self.config.return_dict
286
  use_cache = use_cache if use_cache is not None else self.config.use_cache
287
  # outputs = self.transformer(input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, prefix_mask=prefix_mask, sequence_id=sequence_id, return_dict=return_dict, output_attentions=output_attentions, output_hidden_states=output_hidden_states, use_cache=use_cache)
 
281
  return self.transformer
282
 
283
  # def forward(self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None, labels: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, use_cache: Optional[bool]=None):
284
+ def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[List[Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None, labels: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, use_cache: Optional[bool]=None, inputs_embeds: Optional[torch.FloatTensor] = None):
285
  return_dict = return_dict if return_dict is not None else self.config.return_dict
286
  use_cache = use_cache if use_cache is not None else self.config.use_cache
287
  # outputs = self.transformer(input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, prefix_mask=prefix_mask, sequence_id=sequence_id, return_dict=return_dict, output_attentions=output_attentions, output_hidden_states=output_hidden_states, use_cache=use_cache)