cekal commited on
Commit
515c609
1 Parent(s): a5eab52

Update modeling_mpt.py

Browse files
Files changed (1) hide show
  1. modeling_mpt.py +6 -1
modeling_mpt.py CHANGED
@@ -291,7 +291,12 @@ class MPTForCausalLM(MPTPreTrainedModel):
291
  return_dict = return_dict if return_dict is not None else self.config.return_dict
292
  use_cache = use_cache if use_cache is not None else self.config.use_cache
293
  outputs = self.transformer(input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, prefix_mask=prefix_mask, sequence_id=sequence_id, return_dict=return_dict, output_attentions=output_attentions, output_hidden_states=output_hidden_states, use_cache=use_cache, inputs_embeds=inputs_embeds)
294
- logits = F.linear(outputs.last_hidden_state, self.transformer.wte.weight)
 
 
 
 
 
295
  if self.logit_scale is not None:
296
  if self.logit_scale == 0:
297
  warnings.warn(f'Multiplying logits by self.logit_scale={self.logit_scale!r}. This will produce uniform (uninformative) outputs.')
 
291
  return_dict = return_dict if return_dict is not None else self.config.return_dict
292
  use_cache = use_cache if use_cache is not None else self.config.use_cache
293
  outputs = self.transformer(input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, prefix_mask=prefix_mask, sequence_id=sequence_id, return_dict=return_dict, output_attentions=output_attentions, output_hidden_states=output_hidden_states, use_cache=use_cache, inputs_embeds=inputs_embeds)
294
+
295
+ last_hidden_state = outputs.last_hidden_state
296
+ if self.model_parallel:
297
+ last_hidden_state = last_hidden_state.to(self.transformer.wte.weight.device)
298
+ logits = F.linear(last_hidden_state, self.transformer.wte.weight)
299
+
300
  if self.logit_scale is not None:
301
  if self.logit_scale == 0:
302
  warnings.warn(f'Multiplying logits by self.logit_scale={self.logit_scale!r}. This will produce uniform (uninformative) outputs.')