b1swas commited on
Commit
f1ef060
·
verified ·
1 Parent(s): b86f487

went back a few commits

Browse files
Files changed (1) hide show
  1. app.py +2 -4
app.py CHANGED
@@ -1,7 +1,5 @@
1
  import gradio as gr
2
 
3
- import torch
4
-
5
  from transformers import AutoTokenizer, AutoModelForCausalLM
6
  import numpy as np
7
 
@@ -57,7 +55,7 @@ if __name__ == "__main__":
57
  )
58
  # Important: don't forget to set `normalize_logits=True` to obtain normalized probabilities (i.e. sum(p) = 1)
59
  transition_scores = model.compute_transition_scores(outputs.sequences, outputs.scores, normalize_logits=True)
60
- transition_proba = np.exp(transition_scores[0])
61
  # We only have scores for the generated tokens, so pop out the prompt tokens
62
  input_length = 1 if model.config.is_encoder_decoder else inputs.input_ids.shape[1]
63
  generated_ids = outputs.sequences[:, input_length:]
@@ -80,7 +78,7 @@ if __name__ == "__main__":
80
  break
81
  highlighted_out.append((token.replace("▁", " "), this_label))
82
 
83
- return highlighted_out, transition_proba
84
 
85
  demo = gr.Blocks()
86
  with demo:
 
1
  import gradio as gr
2
 
 
 
3
  from transformers import AutoTokenizer, AutoModelForCausalLM
4
  import numpy as np
5
 
 
55
  )
56
  # Important: don't forget to set `normalize_logits=True` to obtain normalized probabilities (i.e. sum(p) = 1)
57
  transition_scores = model.compute_transition_scores(outputs.sequences, outputs.scores, normalize_logits=True)
58
+ transition_proba = np.exp(transition_scores)
59
  # We only have scores for the generated tokens, so pop out the prompt tokens
60
  input_length = 1 if model.config.is_encoder_decoder else inputs.input_ids.shape[1]
61
  generated_ids = outputs.sequences[:, input_length:]
 
78
  break
79
  highlighted_out.append((token.replace("▁", " "), this_label))
80
 
81
+ return highlighted_out
82
 
83
  demo = gr.Blocks()
84
  with demo: