Spaces:
Sleeping
Sleeping
Fix reomval of special tokens for T5
Browse filesThere is only 1 extra token added now.
- hexviz/attention.py +2 -2
hexviz/attention.py
CHANGED
@@ -162,8 +162,8 @@ def get_attention(
|
|
162 |
] # Do you need an attention mask?
|
163 |
|
164 |
if remove_special_tokens:
|
165 |
-
# Remove attention to
|
166 |
-
attentions = [attention[:, :,
|
167 |
attentions = torch.stack([attention.squeeze(0) for attention in attentions])
|
168 |
|
169 |
else:
|
|
|
162 |
] # Do you need an attention mask?
|
163 |
|
164 |
if remove_special_tokens:
|
165 |
+
# Remove attention to </s> (last) token
|
166 |
+
attentions = [attention[:, :, :-1, :-1] for attention in attentions]
|
167 |
attentions = torch.stack([attention.squeeze(0) for attention in attentions])
|
168 |
|
169 |
else:
|