s-a-malik commited on
Commit
32936b7
·
1 Parent(s): f89d8b2
Files changed (1) hide show
  1. app.py +0 -7
app.py CHANGED
@@ -78,17 +78,10 @@ def generate(
78
  # Generate without threading
79
  with torch.no_grad():
80
  outputs = model.generate(**generation_kwargs)
81
- print(outputs.sequences.shape, input_ids.shape)
82
  generated_tokens = outputs.sequences[0, input_ids.shape[1]:]
83
- print("Generated tokens:", generated_tokens, generated_tokens.shape)
84
  generated_text = tokenizer.decode(generated_tokens, skip_special_tokens=True)
85
- print("Generated text:", generated_text)
86
  # hidden states
87
  hidden = outputs.hidden_states # list of tensors, one for each token, then (batch size, sequence length, hidden size)
88
- print(len(hidden))
89
- print(len(hidden[1])) # layers
90
- print(hidden[1][0].shape) # (sequence length, hidden size)
91
- # stack token embeddings
92
 
93
  # TODO do this loop on the fly instead of waiting for the whole generation
94
  highlighted_text = ""
 
78
  # Generate without threading
79
  with torch.no_grad():
80
  outputs = model.generate(**generation_kwargs)
 
81
  generated_tokens = outputs.sequences[0, input_ids.shape[1]:]
 
82
  generated_text = tokenizer.decode(generated_tokens, skip_special_tokens=True)
 
83
  # hidden states
84
  hidden = outputs.hidden_states # list of tensors, one for each token, then (batch size, sequence length, hidden size)
 
 
 
 
85
 
86
  # TODO do this loop on the fly instead of waiting for the whole generation
87
  highlighted_text = ""