Spaces:
Sleeping
Sleeping
Avoid crash on empty translation
Browse files
app.py
CHANGED
@@ -92,11 +92,14 @@ with tokenizer.as_target_tokenizer():
|
|
92 |
'cumulative probability': probs_for_likely_tokens.cumsum(0)
|
93 |
})
|
94 |
|
|
|
95 |
st.write(probs_table)
|
96 |
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
|
|
|
|
|
92 |
'cumulative probability': probs_for_likely_tokens.cumsum(0)
|
93 |
})
|
94 |
|
95 |
+
st.subheader("Most likely next tokens")
|
96 |
st.write(probs_table)
|
97 |
|
98 |
+
if len(decoder_input_ids) > 1:
|
99 |
+
st.subheader("Loss by token")
|
100 |
+
loss_table = pd.DataFrame({
|
101 |
+
'token': [tokenizer.decode(token_id) for token_id in decoder_input_ids[1:]],
|
102 |
+
'loss': F.cross_entropy(model_output.logits[0, :-1], torch.tensor(decoder_input_ids[1:]).to(device), reduction='none').cpu()
|
103 |
+
})
|
104 |
+
st.write(loss_table)
|
105 |
+
st.write("Total loss so far:", loss_table.loss.sum())
|