File size: 3,681 Bytes
c289bbc
 
 
 
 
6d50883
 
 
 
a4ce4a5
c289bbc
 
 
 
a4ce4a5
c289bbc
 
 
a4ce4a5
 
c289bbc
 
 
a4ce4a5
 
 
 
c289bbc
 
 
e29168c
c289bbc
 
 
a4ce4a5
c289bbc
 
 
a4ce4a5
c289bbc
a4ce4a5
 
 
 
 
 
6d50883
a4ce4a5
 
 
c289bbc
a4ce4a5
 
c289bbc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a4ce4a5
c289bbc
 
 
 
 
 
 
 
a4ce4a5
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import torch
import pandas as pd
import transformers
import gradio as gr

tokenizer = transformers.AutoTokenizer.from_pretrained('gpt2')
vecs = torch.load("senses/all_vecs_mtx.pt")
lm_head = torch.load("senses/lm_head.pt")


def visualize_word(word, count=10, remove_space=False):

    if not remove_space:
        word = ' ' + word
    print(f"Looking up word '{word}'...")

    token_ids = tokenizer(word)['input_ids']
    tokens = [tokenizer.decode(token_id) for token_id in token_ids]
    tokens = ", ".join(tokens) # display tokenization for user
    print(f"Tokenized as: {tokens}")
    # look up sense vectors only for the first token
    contents = vecs[token_ids[0]] # torch.Size([16, 768])

    # for pos and neg respectively, create a list (for each sense) of list (top k) of tuples (word, logit)
    pos_word_lists = [] 
    neg_word_lists = []
    sense_names = [] # column header
    for i in range(contents.shape[0]):
        logits = contents[i,:] @ lm_head.t() # (vocab,)    [768] @ [768, 50257] -> [50257]
        sorted_logits, sorted_indices = torch.sort(logits, descending=True)
        sense_names.append('sense {}'.format(i))

        pos_sorted_words = [tokenizer.decode(sorted_indices[j]) for j in range(count)]
        pos_sorted_logits = [sorted_logits[j].item() for j in range(count)]
        pos_word_lists.append(list(zip(pos_sorted_words, pos_sorted_logits)))

        neg_sorted_words = [tokenizer.decode(sorted_indices[-j-1]) for j in range(count)]
        neg_sorted_logits = [sorted_logits[-j-1].item() for j in range(count)]
        neg_word_lists.append(list(zip(neg_sorted_words, neg_sorted_logits)))

    def create_dataframe(word_lists, sense_names, count):
        data = dict(zip(sense_names, word_lists))
        df = pd.DataFrame(index=[i for i in range(count)],
                        columns=list(data.keys()))
        for prop, word_list in data.items():
            for i, word_pair in enumerate(word_list):
                cell_value = "space ({:.2f})".format(word_pair[1])
                cell_value = "{} ({:.2f})".format(word_pair[0], word_pair[1])
                df.at[i, prop] = cell_value
        return df
    
    pos_df = create_dataframe(pos_word_lists, sense_names, count)
    neg_df = create_dataframe(neg_word_lists, sense_names, count)

    return pos_df, neg_df, tokens

with gr.Blocks() as demo:
    gr.Markdown("""
    ## Backpack visualization: senses lookup
    > Note: Backpack uses the GPT-2 tokenizer, which includes the space before a word as part of the token, so by default, a space character `' '` is added to the beginning of the word you look up. You can disable this by checking `Remove space before word`, but know this might cause strange behaviors like breaking `afraid` into `af` and `raid`, or `slight` into `s` and `light`.
    """)
    with gr.Row():
        word = gr.Textbox(label="Word")
        token_breakdown = gr.Textbox(label="Token Breakdown (senses are for the first token only)")
        remove_space = gr.Checkbox(label="Remove space before word", default=False)
        count = gr.Slider(minimum=1, maximum=20, value=10, label="Top K", step=1)
    pos_outputs = gr.Dataframe(label="Highest Scoring Senses")
    neg_outputs = gr.Dataframe(label="Lowest Scoring Senses")
    gr.Examples(
    examples=["science", "afraid", "book", "slight"],
    inputs=[word],
    outputs=[pos_outputs, neg_outputs, token_breakdown],
    fn=visualize_word,
    cache_examples=True,
    )

    gr.Button("Look up").click(
        fn=visualize_word, 
        inputs= [word, count, remove_space],
        outputs= [pos_outputs, neg_outputs, token_breakdown],
    )

demo.launch(auth=("caesar", "wins"))