Lolalb commited on
Commit
2e41a1b
·
verified ·
1 Parent(s): 898ed4e

Minor fix for correct input forwarding

Browse files
Files changed (1) hide show
  1. model.py +14 -14
model.py CHANGED
@@ -323,13 +323,13 @@ class NeoBERTLMHead(NeoBERTPreTrainedModel):
323
  ):
324
 
325
  output = self.model.forward(
326
- input_ids,
327
- position_ids,
328
- max_seqlen,
329
- cu_seqlens,
330
- attention_mask,
331
- output_hidden_states,
332
- output_attentions,
333
  )
334
  logits = self.decoder(output.last_hidden_state)
335
 
@@ -380,13 +380,13 @@ class NeoBERTForSequenceClassification(NeoBERTPreTrainedModel):
380
  ):
381
 
382
  output = self.model.forward(
383
- input_ids,
384
- position_ids,
385
- max_seqlen,
386
- cu_seqlens,
387
- attention_mask,
388
- output_hidden_states,
389
- output_attentions,
390
  )
391
  hidden_states = output.last_hidden_state
392
 
 
323
  ):
324
 
325
  output = self.model.forward(
326
+ input_ids=input_ids,
327
+ position_ids=position_ids,
328
+ max_seqlen=max_seqlen,
329
+ cu_seqlens=cu_seqlens,
330
+ attention_mask=attention_mask,
331
+ output_hidden_states=output_hidden_states,
332
+ output_attentions=output_attentions,
333
  )
334
  logits = self.decoder(output.last_hidden_state)
335
 
 
380
  ):
381
 
382
  output = self.model.forward(
383
+ input_ids=input_ids,
384
+ position_ids=position_ids,
385
+ max_seqlen=max_seqlen,
386
+ cu_seqlens=cu_seqlens,
387
+ attention_mask=attention_mask,
388
+ output_hidden_states=output_hidden_states,
389
+ output_attentions=output_attentions,
390
  )
391
  hidden_states = output.last_hidden_state
392