lbwavebo commited on
Commit
33cb8e3
1 Parent(s): 72e5f59

test commit using https://huggingface.co/cekal/mpt-7b-peft-compatible/blob/main/modeling_mpt.py

Browse files
Files changed (1) hide show
  1. modeling_mpt.py +127 -54
modeling_mpt.py CHANGED
@@ -1,5 +1,4 @@
1
  """A simple, flexible implementation of a GPT model.
2
-
3
  Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
4
  """
5
  import math
@@ -25,16 +24,24 @@ except:
25
  pass
26
  Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
27
 
 
28
  class MPTPreTrainedModel(PreTrainedModel):
29
  config_class = MPTConfig
30
  base_model_prefix = 'model'
31
- _no_split_modules = ['MPTBlock']
 
 
 
 
 
 
32
 
33
  class MPTModel(MPTPreTrainedModel):
34
 
35
  def __init__(self, config: MPTConfig):
36
  config._validate_config()
37
  super().__init__(config)
 
38
  self.attn_impl = config.attn_config['attn_impl']
39
  self.prefix_lm = config.attn_config['prefix_lm']
40
  self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id']
@@ -84,11 +91,14 @@ class MPTModel(MPTPreTrainedModel):
84
  self.wte = value
85
 
86
  @torch.no_grad()
87
- def _attn_bias(self, device, dtype, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None):
 
88
  if not self._attn_bias_initialized:
89
  if self.attn_bias_shape:
90
  self.attn_bias = torch.zeros(self.attn_bias_shape, device=device, dtype=dtype)
91
- self.attn_bias = build_attn_bias(self.attn_impl, self.attn_bias, self.config.n_heads, self.config.max_seq_len, causal=self.is_causal, alibi=self.alibi, alibi_bias_max=self.alibi_bias_max)
 
 
92
  self._attn_bias_initialized = True
93
  if self.attn_impl == 'flash':
94
  return (self.attn_bias, attention_mask)
@@ -108,7 +118,7 @@ class MPTModel(MPTPreTrainedModel):
108
  attn_bias = torch.zeros((1, 1, 1, s_k), device=device, dtype=dtype)
109
  else:
110
  _s_k = max(0, attn_bias.size(-1) - s_k)
111
- attn_bias = attn_bias[:, :, :, _s_k:]
112
  if prefix_mask is not None and attention_mask.shape != prefix_mask.shape:
113
  raise ValueError(f'attention_mask shape={attention_mask.shape} ' + f'and prefix_mask shape={prefix_mask.shape} are not equal.')
114
  min_val = torch.finfo(attn_bias.dtype).min
@@ -118,12 +128,15 @@ class MPTModel(MPTPreTrainedModel):
118
  def _apply_prefix_mask(self, attn_bias: torch.Tensor, prefix_mask: torch.Tensor):
119
  (s_k, s_q) = attn_bias.shape[-2:]
120
  if s_k != self.config.max_seq_len or s_q != self.config.max_seq_len:
121
- raise ValueError('attn_bias does not match the expected shape. ' + f'The last two dimensions should both be {self.config.max_length} ' + f'but are {s_k} and {s_q}.')
 
122
  seq_len = prefix_mask.shape[-1]
123
  if seq_len > self.config.max_seq_len:
124
  raise ValueError(f'prefix_mask sequence length cannot exceed max_seq_len={self.config.max_seq_len}')
125
  attn_bias = attn_bias[..., :seq_len, :seq_len]
126
- causal = torch.tril(torch.ones((seq_len, seq_len), dtype=torch.bool, device=prefix_mask.device)).view(1, 1, seq_len, seq_len)
 
 
127
  prefix = prefix_mask.view(-1, 1, 1, seq_len)
128
  cannot_attend = ~torch.logical_or(causal, prefix.bool())
129
  min_val = torch.finfo(attn_bias.dtype).min
@@ -135,52 +148,85 @@ class MPTModel(MPTPreTrainedModel):
135
  if seq_len > self.config.max_seq_len:
136
  raise ValueError(f'sequence_id sequence length cannot exceed max_seq_len={self.config.max_seq_len}')
137
  attn_bias = attn_bias[..., :seq_len, :seq_len]
138
- cannot_attend = torch.logical_not(torch.eq(sequence_id.view(-1, seq_len, 1), sequence_id.view(-1, 1, seq_len))).unsqueeze(1)
 
139
  min_val = torch.finfo(attn_bias.dtype).min
140
  attn_bias = attn_bias.masked_fill(cannot_attend, min_val)
141
  return attn_bias
142
 
143
- def forward(self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, use_cache: Optional[bool]=None, inputs_embeds: Optional[torch.Tensor]=None):
 
 
 
 
144
  return_dict = return_dict if return_dict is not None else self.config.return_dict
145
  use_cache = use_cache if use_cache is not None else self.config.use_cache
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146
  if attention_mask is not None:
147
  attention_mask = attention_mask.bool()
 
 
 
 
 
 
 
 
 
 
148
  if prefix_mask is not None:
149
  prefix_mask = prefix_mask.bool()
150
  if not return_dict:
151
  raise NotImplementedError('return_dict False is not implemented yet for MPT')
152
  if output_attentions:
153
- if self.attn_impl != 'torch':
154
- raise NotImplementedError('output_attentions is not implemented for MPT when using attn_impl `flash` or `triton`.')
155
- if attention_mask is not None and attention_mask[:, 0].sum() != attention_mask.shape[0] and self.training:
156
- raise NotImplementedError('MPT does not support training with left padding.')
157
  if self.prefix_lm and prefix_mask is None:
158
  raise ValueError('prefix_mask is a required argument when MPT is configured with prefix_lm=True.')
159
- if inputs_embeds is not None:
160
- raise NotImplementedError('inputs_embeds is not implemented for MPT.')
161
  if self.training:
162
  if self.attn_uses_sequence_id and sequence_id is None:
163
- raise ValueError('sequence_id is a required argument when MPT is configured with attn_uses_sequence_id=True ' + 'and the model is in train mode.')
 
164
  elif self.attn_uses_sequence_id is False and sequence_id is not None:
165
- warnings.warn('MPT received non-None input for `sequence_id` but is configured with attn_uses_sequence_id=False. ' + 'This input will be ignored. If you want the model to use `sequence_id`, set attn_uses_sequence_id to True.')
166
- S = input_ids.size(1)
 
167
  assert S <= self.config.max_seq_len, f'Cannot forward input with seq_len={S}, this model only supports seq_len<={self.config.max_seq_len}'
168
- tok_emb = self.wte(input_ids)
169
  if self.alibi:
170
  x = tok_emb
171
  else:
172
  past_position = 0
173
  if past_key_values is not None:
174
  if len(past_key_values) != self.config.n_layers:
175
- raise ValueError(f'past_key_values must provide a past_key_value for each attention ' + f'layer in the network (len(past_key_values)={len(past_key_values)!r}; self.config.n_layers={self.config.n_layers!r}).')
 
176
  past_position = past_key_values[0][0].size(1)
177
- if self.attn_impl == 'torch':
178
- past_position = past_key_values[0][0].size(3)
179
  if S + past_position > self.config.max_seq_len:
180
- raise ValueError(f'Cannot forward input with past sequence length {past_position} and current sequence length {S + 1}, this model only supports total sequence length <= {self.config.max_seq_len}.')
 
181
  pos = torch.arange(past_position, S + past_position, dtype=torch.long, device=input_ids.device).unsqueeze(0)
182
- if attention_mask is not None:
183
- pos = torch.clamp(pos - torch.cumsum((~attention_mask).to(torch.int32), dim=1)[:, past_position:], min=0)
 
184
  pos_emb = self.wpe(pos)
185
  x = tok_emb + pos_emb
186
  if self.embedding_fraction == 1:
@@ -189,31 +235,49 @@ class MPTModel(MPTPreTrainedModel):
189
  x_shrunk = x * self.embedding_fraction + x.detach() * (1 - self.embedding_fraction)
190
  assert isinstance(self.emb_drop, nn.Module)
191
  x = self.emb_drop(x_shrunk)
192
- (attn_bias, attention_mask) = self._attn_bias(device=x.device, dtype=torch.float32, attention_mask=attention_mask, prefix_mask=prefix_mask, sequence_id=sequence_id)
 
193
  if use_cache and past_key_values is None:
194
  past_key_values = [() for _ in range(self.config.n_layers)]
 
195
  all_hidden_states = () if output_hidden_states else None
196
- all_self_attns = () if output_attentions else None
197
  for (b_idx, block) in enumerate(self.blocks):
198
  if output_hidden_states:
199
  assert all_hidden_states is not None
200
  all_hidden_states = all_hidden_states + (x,)
201
  past_key_value = past_key_values[b_idx] if past_key_values is not None else None
202
- (x, attn_weights, past_key_value) = block(x, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=self.is_causal)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
203
  if past_key_values is not None:
204
  past_key_values[b_idx] = past_key_value
205
- if output_attentions:
206
- assert all_self_attns is not None
207
- all_self_attns = all_self_attns + (attn_weights,)
208
  x = self.norm_f(x)
209
- if output_hidden_states:
210
- assert all_hidden_states is not None
211
- all_hidden_states = all_hidden_states + (x,)
212
- return BaseModelOutputWithPast(last_hidden_state=x, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns)
213
 
214
  def param_init_fn(self, module):
215
  init_fn_name = self.config.init_config['name']
216
- MODEL_INIT_REGISTRY[init_fn_name](module=module, n_layers=self.config.n_layers, d_model=self.config.d_model, **self.config.init_config)
 
217
 
218
  def fsdp_wrap_fn(self, module):
219
  return isinstance(module, MPTBlock)
@@ -221,19 +285,14 @@ class MPTModel(MPTPreTrainedModel):
221
  def activation_checkpointing_fn(self, module):
222
  return isinstance(module, MPTBlock)
223
 
 
224
  class MPTForCausalLM(MPTPreTrainedModel):
225
 
226
  def __init__(self, config: MPTConfig):
227
  super().__init__(config)
228
  if not config.tie_word_embeddings:
229
  raise ValueError('MPTForCausalLM only supports tied word embeddings')
230
- print(f'Instantiating an MPTForCausalLM model from {__file__}')
231
  self.transformer = MPTModel(config)
232
- for child in self.transformer.children():
233
- if isinstance(child, torch.nn.ModuleList):
234
- continue
235
- if isinstance(child, torch.nn.Module):
236
- child._fsdp_wrap = True
237
  self.logit_scale = None
238
  if config.logit_scale is not None:
239
  logit_scale = config.logit_scale
@@ -241,7 +300,8 @@ class MPTForCausalLM(MPTPreTrainedModel):
241
  if logit_scale == 'inv_sqrt_d_model':
242
  logit_scale = 1 / math.sqrt(config.d_model)
243
  else:
244
- raise ValueError(f"logit_scale={logit_scale!r} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'.")
 
245
  self.logit_scale = logit_scale
246
 
247
  def get_input_embeddings(self):
@@ -262,27 +322,39 @@ class MPTForCausalLM(MPTPreTrainedModel):
262
  def get_decoder(self):
263
  return self.transformer
264
 
265
- def forward(self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None, labels: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, use_cache: Optional[bool]=None, inputs_embeds: Optional[torch.FloatTensor]=None):
 
 
 
 
 
266
  return_dict = return_dict if return_dict is not None else self.config.return_dict
267
  use_cache = use_cache if use_cache is not None else self.config.use_cache
268
- if inputs_embeds is not None:
269
- raise NotImplementedError('inputs_embeds has to be None (for hf/peft support).')
270
- outputs = self.transformer(input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, prefix_mask=prefix_mask, sequence_id=sequence_id, return_dict=return_dict, output_attentions=output_attentions, output_hidden_states=output_hidden_states, use_cache=use_cache)
271
- logits = self.transformer.wte(outputs.last_hidden_state.to(self.transformer.wte.weight.device), True)
 
 
 
 
272
  if self.logit_scale is not None:
273
  if self.logit_scale == 0:
274
- warnings.warn(f'Multiplying logits by self.logit_scale={self.logit_scale!r}. This will produce uniform (uninformative) outputs.')
 
275
  logits *= self.logit_scale
276
  loss = None
277
  if labels is not None:
278
  labels = torch.roll(labels, shifts=-1)
279
  labels[:, -1] = -100
280
  loss = F.cross_entropy(logits.view(-1, logits.size(-1)), labels.to(logits.device).view(-1))
281
- return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
 
282
 
283
  def param_init_fn(self, module):
284
  init_fn_name = self.config.init_config['name']
285
- MODEL_INIT_REGISTRY[init_fn_name](module=module, n_layers=self.config.n_layers, d_model=self.config.d_model, **self.config.init_config)
 
286
 
287
  def fsdp_wrap_fn(self, module):
288
  return isinstance(module, MPTBlock)
@@ -308,16 +380,17 @@ class MPTForCausalLM(MPTPreTrainedModel):
308
  raise NotImplementedError('MPT with prefix_lm=True does not support use_cache=False.')
309
  else:
310
  prefix_mask = None
311
- return {'input_ids': input_ids, 'attention_mask': attention_mask, 'prefix_mask': prefix_mask, 'sequence_id': sequence_id, 'past_key_values': past_key_values, 'use_cache': kwargs.get('use_cache', True)}
 
 
312
 
313
  @staticmethod
314
  def _reorder_cache(past_key_values, beam_idx):
315
  """Used by HuggingFace generate when using beam search with kv-caching.
316
-
317
  See https://github.com/huggingface/transformers/blob/3ec7a47664ebe40c40f4b722f6bb1cd30c3821ec/src/transformers/models/gpt2/modeling_gpt2.py#L1122-L1133
318
  for an example in transformers.
319
  """
320
  reordered_past = []
321
  for layer_past in past_key_values:
322
  reordered_past += [tuple((past_state.index_select(0, beam_idx) for past_state in layer_past))]
323
- return reordered_past
 
1
  """A simple, flexible implementation of a GPT model.
 
2
  Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
3
  """
4
  import math
 
24
  pass
25
  Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
26
 
27
+
28
  class MPTPreTrainedModel(PreTrainedModel):
29
  config_class = MPTConfig
30
  base_model_prefix = 'model'
31
+ _no_split_modules = ["MPTBlock"]
32
+ supports_gradient_checkpointing = True
33
+
34
+ def _set_gradient_checkpointing(self, module, value=False):
35
+ if isinstance(module, MPTModel):
36
+ module.gradient_checkpointing = value
37
+
38
 
39
  class MPTModel(MPTPreTrainedModel):
40
 
41
  def __init__(self, config: MPTConfig):
42
  config._validate_config()
43
  super().__init__(config)
44
+ self.gradient_checkpointing = False
45
  self.attn_impl = config.attn_config['attn_impl']
46
  self.prefix_lm = config.attn_config['prefix_lm']
47
  self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id']
 
91
  self.wte = value
92
 
93
  @torch.no_grad()
94
+ def _attn_bias(self, device, dtype, attention_mask: Optional[torch.ByteTensor] = None,
95
+ prefix_mask: Optional[torch.ByteTensor] = None, sequence_id: Optional[torch.LongTensor] = None):
96
  if not self._attn_bias_initialized:
97
  if self.attn_bias_shape:
98
  self.attn_bias = torch.zeros(self.attn_bias_shape, device=device, dtype=dtype)
99
+ self.attn_bias = build_attn_bias(self.attn_impl, self.attn_bias, self.config.n_heads,
100
+ self.config.max_seq_len, causal=self.is_causal, alibi=self.alibi,
101
+ alibi_bias_max=self.alibi_bias_max)
102
  self._attn_bias_initialized = True
103
  if self.attn_impl == 'flash':
104
  return (self.attn_bias, attention_mask)
 
118
  attn_bias = torch.zeros((1, 1, 1, s_k), device=device, dtype=dtype)
119
  else:
120
  _s_k = max(0, attn_bias.size(-1) - s_k)
121
+ attn_bias = attn_bias[:, :, :, -s_k:]
122
  if prefix_mask is not None and attention_mask.shape != prefix_mask.shape:
123
  raise ValueError(f'attention_mask shape={attention_mask.shape} ' + f'and prefix_mask shape={prefix_mask.shape} are not equal.')
124
  min_val = torch.finfo(attn_bias.dtype).min
 
128
  def _apply_prefix_mask(self, attn_bias: torch.Tensor, prefix_mask: torch.Tensor):
129
  (s_k, s_q) = attn_bias.shape[-2:]
130
  if s_k != self.config.max_seq_len or s_q != self.config.max_seq_len:
131
+ raise ValueError(
132
+ 'attn_bias does not match the expected shape. ' + f'The last two dimensions should both be {self.config.max_length} ' + f'but are {s_k} and {s_q}.')
133
  seq_len = prefix_mask.shape[-1]
134
  if seq_len > self.config.max_seq_len:
135
  raise ValueError(f'prefix_mask sequence length cannot exceed max_seq_len={self.config.max_seq_len}')
136
  attn_bias = attn_bias[..., :seq_len, :seq_len]
137
+ causal = torch.tril(torch.ones((seq_len, seq_len), dtype=torch.bool, device=prefix_mask.device)).view(1, 1,
138
+ seq_len,
139
+ seq_len)
140
  prefix = prefix_mask.view(-1, 1, 1, seq_len)
141
  cannot_attend = ~torch.logical_or(causal, prefix.bool())
142
  min_val = torch.finfo(attn_bias.dtype).min
 
148
  if seq_len > self.config.max_seq_len:
149
  raise ValueError(f'sequence_id sequence length cannot exceed max_seq_len={self.config.max_seq_len}')
150
  attn_bias = attn_bias[..., :seq_len, :seq_len]
151
+ cannot_attend = torch.logical_not(
152
+ torch.eq(sequence_id.view(-1, seq_len, 1), sequence_id.view(-1, 1, seq_len))).unsqueeze(1)
153
  min_val = torch.finfo(attn_bias.dtype).min
154
  attn_bias = attn_bias.masked_fill(cannot_attend, min_val)
155
  return attn_bias
156
 
157
+ def forward(self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]] = None,
158
+ attention_mask: Optional[torch.ByteTensor] = None, prefix_mask: Optional[torch.ByteTensor] = None,
159
+ sequence_id: Optional[torch.LongTensor] = None, return_dict: Optional[bool] = None,
160
+ output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None,
161
+ use_cache: Optional[bool] = None, inputs_embeds: Optional[torch.FloatTensor] = None):
162
  return_dict = return_dict if return_dict is not None else self.config.return_dict
163
  use_cache = use_cache if use_cache is not None else self.config.use_cache
164
+ if self.gradient_checkpointing and self.training:
165
+ if use_cache:
166
+ use_cache = False
167
+ if input_ids is not None and inputs_embeds is not None:
168
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
169
+ elif input_ids is not None:
170
+ batch_size, seq_length = input_ids.shape
171
+ elif inputs_embeds is not None:
172
+ batch_size, seq_length, _ = inputs_embeds.shape
173
+ else:
174
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
175
+
176
+ seq_length_with_past = seq_length
177
+ past_key_values_length = 0
178
+
179
+ if past_key_values is not None:
180
+ past_key_values_length = past_key_values[0][0].shape[2]
181
+ seq_length_with_past = seq_length_with_past + past_key_values_length
182
+
183
  if attention_mask is not None:
184
  attention_mask = attention_mask.bool()
185
+ else:
186
+ attention_mask = torch.ones(
187
+ (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
188
+ )
189
+
190
+ if inputs_embeds is None:
191
+ tok_emb = self.wte(input_ids)
192
+ else:
193
+ tok_emb = inputs_embeds
194
+
195
  if prefix_mask is not None:
196
  prefix_mask = prefix_mask.bool()
197
  if not return_dict:
198
  raise NotImplementedError('return_dict False is not implemented yet for MPT')
199
  if output_attentions:
200
+ raise NotImplementedError('output_attentions is not implemented yet for MPT')
201
+ # if attention_mask is not None and attention_mask[:, 0].sum() != attention_mask.shape[0] and self.training:
202
+ # raise NotImplementedError('MPT does not support training with left padding.')
 
203
  if self.prefix_lm and prefix_mask is None:
204
  raise ValueError('prefix_mask is a required argument when MPT is configured with prefix_lm=True.')
 
 
205
  if self.training:
206
  if self.attn_uses_sequence_id and sequence_id is None:
207
+ raise ValueError(
208
+ 'sequence_id is a required argument when MPT is configured with attn_uses_sequence_id=True ' + 'and the model is in train mode.')
209
  elif self.attn_uses_sequence_id is False and sequence_id is not None:
210
+ warnings.warn(
211
+ 'MPT received non-None input for `sequence_id` but is configured with attn_uses_sequence_id=False. ' + 'This input will be ignored. If you want the model to use `sequence_id`, set attn_uses_sequence_id to True.')
212
+ S = seq_length
213
  assert S <= self.config.max_seq_len, f'Cannot forward input with seq_len={S}, this model only supports seq_len<={self.config.max_seq_len}'
 
214
  if self.alibi:
215
  x = tok_emb
216
  else:
217
  past_position = 0
218
  if past_key_values is not None:
219
  if len(past_key_values) != self.config.n_layers:
220
+ raise ValueError(
221
+ f'past_key_values must provide a past_key_value for each attention ' + f'layer in the network (len(past_key_values)={len(past_key_values)!r}; self.config.n_layers={self.config.n_layers!r}).')
222
  past_position = past_key_values[0][0].size(1)
 
 
223
  if S + past_position > self.config.max_seq_len:
224
+ raise ValueError(
225
+ f'Cannot forward input with past sequence length {past_position} and current sequence length {S + 1}, this model only supports total sequence length <= {self.config.max_seq_len}.')
226
  pos = torch.arange(past_position, S + past_position, dtype=torch.long, device=input_ids.device).unsqueeze(0)
227
+ if attention_mask is not None and not self.training:
228
+ pos = torch.clamp(pos - torch.cumsum((~attention_mask).to(torch.int32), dim=1)[:, past_position:],
229
+ min=0)
230
  pos_emb = self.wpe(pos)
231
  x = tok_emb + pos_emb
232
  if self.embedding_fraction == 1:
 
235
  x_shrunk = x * self.embedding_fraction + x.detach() * (1 - self.embedding_fraction)
236
  assert isinstance(self.emb_drop, nn.Module)
237
  x = self.emb_drop(x_shrunk)
238
+ (attn_bias, attention_mask) = self._attn_bias(device=x.device, dtype=x.dtype, attention_mask=attention_mask,
239
+ prefix_mask=prefix_mask, sequence_id=sequence_id)
240
  if use_cache and past_key_values is None:
241
  past_key_values = [() for _ in range(self.config.n_layers)]
242
+
243
  all_hidden_states = () if output_hidden_states else None
 
244
  for (b_idx, block) in enumerate(self.blocks):
245
  if output_hidden_states:
246
  assert all_hidden_states is not None
247
  all_hidden_states = all_hidden_states + (x,)
248
  past_key_value = past_key_values[b_idx] if past_key_values is not None else None
249
+
250
+ if self.gradient_checkpointing and self.training:
251
+
252
+ def create_custom_forward(module):
253
+ def custom_forward(*inputs):
254
+ # None for past_key_value
255
+ return module(*inputs)
256
+
257
+ return custom_forward
258
+
259
+ (x, past_key_value) = torch.utils.checkpoint.checkpoint(
260
+ create_custom_forward(block),
261
+ x,
262
+ past_key_value,
263
+ attn_bias,
264
+ attention_mask,
265
+ self.is_causal,
266
+ )
267
+ else:
268
+ (x, past_key_value) = block(x, past_key_value=past_key_value, attn_bias=attn_bias,
269
+ attention_mask=attention_mask, is_causal=self.is_causal)
270
+
271
  if past_key_values is not None:
272
  past_key_values[b_idx] = past_key_value
 
 
 
273
  x = self.norm_f(x)
274
+ return BaseModelOutputWithPast(last_hidden_state=x, past_key_values=past_key_values,
275
+ hidden_states=all_hidden_states)
 
 
276
 
277
  def param_init_fn(self, module):
278
  init_fn_name = self.config.init_config['name']
279
+ MODEL_INIT_REGISTRY[init_fn_name](module=module, n_layers=self.config.n_layers, d_model=self.config.d_model,
280
+ **self.config.init_config)
281
 
282
  def fsdp_wrap_fn(self, module):
283
  return isinstance(module, MPTBlock)
 
285
  def activation_checkpointing_fn(self, module):
286
  return isinstance(module, MPTBlock)
287
 
288
+
289
  class MPTForCausalLM(MPTPreTrainedModel):
290
 
291
  def __init__(self, config: MPTConfig):
292
  super().__init__(config)
293
  if not config.tie_word_embeddings:
294
  raise ValueError('MPTForCausalLM only supports tied word embeddings')
 
295
  self.transformer = MPTModel(config)
 
 
 
 
 
296
  self.logit_scale = None
297
  if config.logit_scale is not None:
298
  logit_scale = config.logit_scale
 
300
  if logit_scale == 'inv_sqrt_d_model':
301
  logit_scale = 1 / math.sqrt(config.d_model)
302
  else:
303
+ raise ValueError(
304
+ f"logit_scale={logit_scale!r} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'.")
305
  self.logit_scale = logit_scale
306
 
307
  def get_input_embeddings(self):
 
322
  def get_decoder(self):
323
  return self.transformer
324
 
325
+ def forward(self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]] = None,
326
+ attention_mask: Optional[torch.ByteTensor] = None, prefix_mask: Optional[torch.ByteTensor] = None,
327
+ sequence_id: Optional[torch.LongTensor] = None, labels: Optional[torch.LongTensor] = None,
328
+ return_dict: Optional[bool] = None, output_attentions: Optional[bool] = None,
329
+ output_hidden_states: Optional[bool] = None, use_cache: Optional[bool] = None,
330
+ inputs_embeds: Optional[torch.FloatTensor] = None):
331
  return_dict = return_dict if return_dict is not None else self.config.return_dict
332
  use_cache = use_cache if use_cache is not None else self.config.use_cache
333
+ outputs = self.transformer(input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask,
334
+ prefix_mask=prefix_mask, sequence_id=sequence_id, return_dict=return_dict,
335
+ output_attentions=output_attentions, output_hidden_states=output_hidden_states,
336
+ use_cache=use_cache, inputs_embeds=inputs_embeds)
337
+
338
+ last_hidden_state = outputs.last_hidden_state
339
+ logits = F.linear(last_hidden_state, self.transformer.wte.weight)
340
+
341
  if self.logit_scale is not None:
342
  if self.logit_scale == 0:
343
+ warnings.warn(
344
+ f'Multiplying logits by self.logit_scale={self.logit_scale!r}. This will produce uniform (uninformative) outputs.')
345
  logits *= self.logit_scale
346
  loss = None
347
  if labels is not None:
348
  labels = torch.roll(labels, shifts=-1)
349
  labels[:, -1] = -100
350
  loss = F.cross_entropy(logits.view(-1, logits.size(-1)), labels.to(logits.device).view(-1))
351
+ return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values,
352
+ hidden_states=outputs.hidden_states)
353
 
354
  def param_init_fn(self, module):
355
  init_fn_name = self.config.init_config['name']
356
+ MODEL_INIT_REGISTRY[init_fn_name](module=module, n_layers=self.config.n_layers, d_model=self.config.d_model,
357
+ **self.config.init_config)
358
 
359
  def fsdp_wrap_fn(self, module):
360
  return isinstance(module, MPTBlock)
 
380
  raise NotImplementedError('MPT with prefix_lm=True does not support use_cache=False.')
381
  else:
382
  prefix_mask = None
383
+ return {'input_ids': input_ids, 'attention_mask': attention_mask, 'prefix_mask': prefix_mask,
384
+ 'sequence_id': sequence_id, 'past_key_values': past_key_values,
385
+ 'use_cache': kwargs.get('use_cache', True)}
386
 
387
  @staticmethod
388
  def _reorder_cache(past_key_values, beam_idx):
389
  """Used by HuggingFace generate when using beam search with kv-caching.
 
390
  See https://github.com/huggingface/transformers/blob/3ec7a47664ebe40c40f4b722f6bb1cd30c3821ec/src/transformers/models/gpt2/modeling_gpt2.py#L1122-L1133
391
  for an example in transformers.
392
  """
393
  reordered_past = []
394
  for layer_past in past_key_values:
395
  reordered_past += [tuple((past_state.index_select(0, beam_idx) for past_state in layer_past))]
396
+ return reordered_past