d-Matrix commited on
Commit
d16c53a
·
verified ·
1 Parent(s): 4bfa7b2

Update modeling_opt.py

Browse files
Files changed (1) hide show
  1. modeling_opt.py +0 -1
modeling_opt.py CHANGED
@@ -227,7 +227,6 @@ class OPTAttention(nn.Module):
227
  attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
228
 
229
  # upcast to fp32 if the weights are in fp16. Please see https://github.com/huggingface/transformers/pull/17437
230
- breakpoint()
231
  if attn_weights.dtype == torch.float16:
232
  attn_weights = self.softmax(attn_weights.float()).to(torch.float16)
233
  else:
 
227
  attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
228
 
229
  # upcast to fp32 if the weights are in fp16. Please see https://github.com/huggingface/transformers/pull/17437
 
230
  if attn_weights.dtype == torch.float16:
231
  attn_weights = self.softmax(attn_weights.float()).to(torch.float16)
232
  else: