File size: 8,520 Bytes
38826eb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9b8968e
 
38826eb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bfee54e
 
 
 
 
 
 
38826eb
 
 
 
 
bfee54e
38826eb
bfee54e
 
38826eb
 
bfee54e
38826eb
bfee54e
38826eb
bfee54e
38826eb
 
 
 
 
 
 
 
 
 
 
bfee54e
38826eb
 
bfee54e
38826eb
 
 
bfee54e
38826eb
 
 
 
bfee54e
 
38826eb
 
bfee54e
38826eb
 
 
 
 
bfee54e
 
38826eb
 
 
bfee54e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38826eb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9b8968e
 
 
 
bfee54e
9b8968e
 
 
 
 
bfee54e
 
9b8968e
bfee54e
9b8968e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
import torch
from transformers.cache_utils import DynamicCache


def get_tokenized_chat(tokenizer, prompt, doc):
    messages = [
        {
            "role": "user",
            "content": f"{prompt}\n\n{doc}",
        },
    ]
    tokenized_chat = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt")[0]
    return tokenized_chat


def tokenize_doc_in_progress(tokenizer, doc_in_progress):
    if len(doc_in_progress) == 0:
        # Some tokenizers give tensors of the wrong dtype if the input is empty
        return torch.empty(0, dtype=torch.int64)

    doc_in_progress_ids = tokenizer(
        doc_in_progress, return_tensors='pt')['input_ids'][0]

    # strip the first token, the "beginning of document" token
    # TODO: make this robust to switching models
    # since some models will use different special tokens
    doc_in_progress_ids = doc_in_progress_ids[1:]
    return doc_in_progress_ids


def get_highlights_inner(model, tokenizer, doc, prompt, updated_doc, k):
    tokenized_chat = get_tokenized_chat(tokenizer, prompt, doc)
    assert len(tokenized_chat.shape) == 1

    if updated_doc is None or len(updated_doc.strip()) == 0:
        updated_doc = doc
    updated_doc_ids = tokenize_doc_in_progress(tokenizer, updated_doc)

    joined_ids = torch.cat([tokenized_chat, updated_doc_ids])

    # Compute the next-token logits for the entire document
    with torch.no_grad():
        logits = model(joined_ids[None].to(model.device)).logits[0].cpu()
    
    highlights = []
    length_so_far = 0
    for idx in range(len(tokenized_chat), len(joined_ids)):
        probs = logits[idx - 1].softmax(dim=-1)
        token_id = joined_ids[idx]
        token = tokenizer.decode(token_id)
        token_loss = -probs[token_id].log().item()
        topk_tokens = probs.topk(k).indices.cpu().numpy().tolist()
        topk_tokens_decoded = tokenizer.batch_decode(topk_tokens, skip_special_tokens=True)
        highlights.append(dict(
            start=length_so_far,
            end=length_so_far + len(token),
            token=token,
            token_loss=token_loss,
            most_likely_token=topk_tokens_decoded[0],
            topk_tokens=topk_tokens_decoded,
        ))
        length_so_far += len(token)
    return highlights


def get_lookahead_sequences(model, tokenizer, hypotheses, n_branch_tokens, device):
    """
    For each of the n_branch_tokens next tokens, generate most-likely next tokens and append back on.
    """
    assert len(hypotheses.shape) == 2
    assert hypotheses.shape[0] == 1
    n_tokens_so_far = hypotheses.shape[1]
    past_key_values = DynamicCache()

    with torch.no_grad():
        model_outs_onestep = model(hypotheses, output_hidden_states=True, past_key_values=past_key_values)

    branch_tokens = model_outs_onestep.logits[0, -1].topk(n_branch_tokens).indices

    # split the cache into n_branch_tokens reps. We pretend we're doing a "Beam search"...
    past_key_values.reorder_cache(torch.zeros((n_branch_tokens,), dtype=torch.long, device=device))

    # Now call the model again, passing the kv cache, so we can continue generating.
    # Each of the n_branch_tokens next tokens will be considered as one sequence in a "batch".
    next_tokens_as_batch = branch_tokens.unsqueeze(1)
    assert next_tokens_as_batch.shape == (n_branch_tokens, 1)

    position_id_for_final_token = n_tokens_so_far
    cache_position = torch.full((1,), position_id_for_final_token, dtype=int, device=device)
    with torch.no_grad():
        model_outs = model(
            next_tokens_as_batch,
            past_key_values=past_key_values,
            output_hidden_states=True,
            use_cache=True,
            # the cache surprisingly doesn't know the position of the last token
            cache_position=cache_position
        )
    
    # Grab the single most likely token from each of the n_branch_tokens sequences
    next_token_logits = model_outs.logits[:, -1]
    vocab_size = model.config.vocab_size
    assert next_token_logits.shape == (n_branch_tokens, vocab_size), f"{next_token_logits.shape=}, {n_branch_tokens=}, {vocab_size=}"
    most_likely_token_ids = next_token_logits.argmax(dim=-1)

    # Stick them at the end of the branch tokens.
    assert most_likely_token_ids.shape == (n_branch_tokens,)
    lookahead_sequences = torch.cat([
        branch_tokens.unsqueeze(1),
        most_likely_token_ids.unsqueeze(1)
    ], dim=1)
    assert lookahead_sequences.shape == (n_branch_tokens, 2)
    return lookahead_sequences, next_token_logits


def get_next_token_predictions_inner(
        model, tokenizer, original_doc, prompt, doc_in_progress, k):

    tokenized_chat = get_tokenized_chat(tokenizer, prompt, original_doc)
    doc_in_progress_ids = tokenize_doc_in_progress(tokenizer, doc_in_progress)

    device = model.device

    joined_ids = torch.cat([tokenized_chat, doc_in_progress_ids])
    hypotheses = joined_ids[None].to(model.device)

    # Alternative approach: chat templates
    tokenized_chat = tokenizer.apply_chat_template([
        {"role": "user", "content": f"{prompt}\n\n{original_doc}"},
        {"role": "assistant", "content": doc_in_progress}
    ], tokenize=True, return_tensors="pt", continue_final_message=True).to(model.device)

    # Compare them
    if tokenized_chat.shape == hypotheses.shape and torch.all(tokenized_chat == hypotheses):
        print("Tokenized chat and hypotheses match")
    else:
        print("FAIL: Tokenized chat and hypotheses do not match!")
        print(f"{tokenized_chat=}")
        print(f"{hypotheses=}")

    lookahead_sequences, next_token_logits = get_lookahead_sequences(
        model, tokenizer, hypotheses, k, device)

    decoded_next_tokens = tokenizer.batch_decode(lookahead_sequences, skip_special_tokens=True)
    return decoded_next_tokens, next_token_logits


def get_next_token_predictions_slow(
        model, tokenizer, original_doc, prompt, doc_in_progress, k):

    tokenized_chat = get_tokenized_chat(tokenizer, prompt, original_doc)
    doc_in_progress_ids = tokenize_doc_in_progress(tokenizer, doc_in_progress)

    joined_ids = torch.cat([tokenized_chat, doc_in_progress_ids])
    hypotheses = joined_ids[None].to(model.device)

    # For each of the k next tokens, generate most-likely next tokens and append back on until we
    # reach a token with a space

    with torch.no_grad():
        model_outs = model(hypotheses, output_hidden_states=True)

    next_token_logits = model_outs.logits[0, -1]
    branch_tokens = next_token_logits.topk(k).indices

    # Slow mode: concat the branch tokens to the hypotheses.
    # Then call the model on the full sequence.
    # This is slow because the beginning of the sequence is re-processed each time.

    hypotheses_with_next_tokens = torch.cat([
        torch.repeat_interleave(hypotheses, k, dim=0),
        branch_tokens.unsqueeze(1)
    ], dim=1)
    assert hypotheses_with_next_tokens.shape == (k, len(joined_ids) + 1)

    with torch.no_grad():
        model_outs = model(hypotheses_with_next_tokens)
    
    # Grab the single most likely token from each of the k sequences
    next_token_logits = model_outs.logits[:, -1]
    vocab_size = model.config.vocab_size
    assert next_token_logits.shape == (k, vocab_size), f"{next_token_logits.shape=}, {k=}, {vocab_size=}"
    most_likely_token_ids = next_token_logits.argmax(dim=-1)

    # Stick them at the end of the branch tokens.
    assert most_likely_token_ids.shape == (k,)
    lookahead_sequences = torch.cat([
        branch_tokens.unsqueeze(1),
        most_likely_token_ids.unsqueeze(1)
    ], dim=1)
    assert lookahead_sequences.shape == (k, 2)

    decoded_next_tokens = tokenizer.batch_decode(lookahead_sequences, skip_special_tokens=True)
    return decoded_next_tokens, next_token_logits



def continue_messages_inner(model, tokenizer, messages, n_branch_tokens, n_future_tokens):
    # Note: we're ignoring n_future_tokens right now since the old implementation was buggy.
    device = model.device

    tokenized_chat = tokenizer.apply_chat_template(messages, tokenize=True, return_tensors="pt", continue_final_message=True).to(model.device)
    print(tokenizer.batch_decode(tokenized_chat, skip_special_tokens=False))

    lookahead_sequences, next_token_logits = get_lookahead_sequences(
        model, tokenizer, tokenized_chat, n_branch_tokens, device)

    generated_docs = tokenizer.batch_decode(lookahead_sequences, skip_special_tokens=True)
    return generated_docs