Spaces:
Running
Running
norabelrose
commited on
Commit
•
95f31c6
1
Parent(s):
b7ab225
Add llama 13b lens
Browse files- .gitattributes +5 -0
- README.md +3 -12
- __pycache__/app.cpython-310.pyc +0 -0
- app.py +3 -112
- lens/EleutherAI/gpt-neox-20b/config.json +3 -1
- lens/EleutherAI/pythia-1.4b-deduped-v0/config.json +3 -1
- lens/EleutherAI/pythia-1.4b-deduped/config.json +3 -1
- lens/EleutherAI/pythia-12b-deduped-v0/config.json +3 -1
- lens/EleutherAI/pythia-12b-deduped/config.json +3 -1
- lens/EleutherAI/pythia-160m-deduped-v0/config.json +3 -1
- lens/EleutherAI/pythia-160m-deduped/config.json +3 -1
- lens/EleutherAI/pythia-1b-deduped-v0/config.json +3 -1
- lens/EleutherAI/pythia-2.8b-deduped-v0/config.json +3 -1
- lens/EleutherAI/pythia-2.8b-deduped/config.json +3 -1
- lens/EleutherAI/pythia-410m-deduped-v0/config.json +3 -1
- lens/EleutherAI/pythia-410m-deduped/config.json +3 -1
- lens/EleutherAI/pythia-6.9b-deduped-v0/config.json +3 -1
- lens/EleutherAI/pythia-6.9b-deduped/config.json +3 -1
- lens/EleutherAI/pythia-70m-deduped-v0/config.json +3 -1
- lens/EleutherAI/pythia-70m-deduped/config.json +3 -1
- lens/facebook/llama-13b/config.json +3 -0
- lens/facebook/llama-13b/params.pt +3 -0
- lens/facebook/llama-7b/config.json +3 -1
- lens/facebook/opt-1.3b/config.json +3 -1
- lens/facebook/opt-125m/config.json +3 -1
- lens/facebook/opt-6.7b/config.json +3 -1
- lens/gpt2-large/config.json +3 -1
- lens/gpt2-xl/config.json +3 -1
- lens/gpt2/config.json +3 -1
- requirements.txt +3 -1
.gitattributes
CHANGED
@@ -32,3 +32,8 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
32 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
32 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*.md filter=lfs diff=lfs merge=lfs -text
|
36 |
+
*.pyc filter=lfs diff=lfs merge=lfs -text
|
37 |
+
*.py filter=lfs diff=lfs merge=lfs -text
|
38 |
+
*.json filter=lfs diff=lfs merge=lfs -text
|
39 |
+
*.txt filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
@@ -1,12 +1,3 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
colorFrom: pink
|
5 |
-
colorTo: blue
|
6 |
-
sdk: gradio
|
7 |
-
python_version: 3.10.2
|
8 |
-
sdk_version: 3.20.0
|
9 |
-
app_file: app.py
|
10 |
-
pinned: false
|
11 |
-
license: mit
|
12 |
-
---
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bd20cba45293ebf4604bfc781e4ebd35a46ce45ff0c9c161492c6fb9f4912d5b
|
3 |
+
size 167
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
__pycache__/app.cpython-310.pyc
CHANGED
Binary files a/__pycache__/app.cpython-310.pyc and b/__pycache__/app.cpython-310.pyc differ
|
|
app.py
CHANGED
@@ -1,112 +1,3 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
from tuned_lens.plotting import plot_lens
|
5 |
-
import gradio as gr
|
6 |
-
from plotly import graph_objects as go
|
7 |
-
|
8 |
-
device = torch.device("cpu")
|
9 |
-
print(f"Using device {device} for inference")
|
10 |
-
model = AutoModelForCausalLM.from_pretrained("EleutherAI/pythia-410m-deduped-v0")
|
11 |
-
model = model.to(device)
|
12 |
-
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/pythia-410m-deduped-v0")
|
13 |
-
tuned_lens = TunedLens.load("pythia-410m-deduped-v0", map_location=device)
|
14 |
-
logit_lens = LogitLens(model)
|
15 |
-
|
16 |
-
lens_options_dict = {
|
17 |
-
"Tuned Lens": tuned_lens,
|
18 |
-
"Logit Lens": logit_lens,
|
19 |
-
}
|
20 |
-
|
21 |
-
statistic_options_dict = {
|
22 |
-
"Entropy": "entropy",
|
23 |
-
"Cross Entropy": "ce",
|
24 |
-
"Forward KL": "forward_kl",
|
25 |
-
}
|
26 |
-
|
27 |
-
|
28 |
-
def make_plot(lens, text, statistic, token_cutoff):
|
29 |
-
input_ids = tokenizer.encode(text, return_tensors="pt")
|
30 |
-
|
31 |
-
if len(input_ids[0]) == 0:
|
32 |
-
return go.Figure(layout=dict(title="Please enter some text."))
|
33 |
-
|
34 |
-
if token_cutoff < 1:
|
35 |
-
return go.Figure(layout=dict(title="Please provide valid token cut off."))
|
36 |
-
|
37 |
-
fig = plot_lens(
|
38 |
-
model,
|
39 |
-
tokenizer,
|
40 |
-
lens_options_dict[lens],
|
41 |
-
layer_stride=2,
|
42 |
-
input_ids=input_ids,
|
43 |
-
start_pos=max(len(input_ids[0]) - token_cutoff, 0),
|
44 |
-
statistic=statistic_options_dict[statistic],
|
45 |
-
)
|
46 |
-
|
47 |
-
return fig
|
48 |
-
|
49 |
-
|
50 |
-
preamble = """
|
51 |
-
# The Tuned Lens 🔎
|
52 |
-
|
53 |
-
A tuned lens allows us to peak at the iterative computations a transformer uses to compute the next token.
|
54 |
-
|
55 |
-
A lens into a transformer with n layers allows you to replace the last $m$ layers of the model with an [affine transformation](https://pytorch.org/docs/stable/generated/torch.nn.Linear.html) (we call these affine translators).
|
56 |
-
|
57 |
-
This essentially skips over these last few layers and lets you see the best prediction that can be made from the model's representations, i.e. the residual stream, at layer $n - m$. Since the representations may be rotated, shifted, or stretched from layer to layer it's useful to train the len's affine adapters specifically on each layer. This training is what differentiates this method from simpler approaches that decode the residual stream of the network directly using the unembeding layer i.e. the logit lens. We explain this process in [the paper](https://arxiv.org/abs/2303.08112).
|
58 |
-
|
59 |
-
## Usage
|
60 |
-
Since the tuned lens produces a distribution of predictions to visualize it's output we need to we need to provide a summary statistic to plot. The default is simply [entropy](https://en.wikipedia.org/wiki/Entropy_(information_theory)), but you can also choose the [cross entropy](https://en.wikipedia.org/wiki/Cross_entropy) with the target token, or the [KL divergence](https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence) between the model's predictions and the tuned lens' predictions. You can also hover over a token to see more of the distribution i.e. the top 10 most probable tokens and their probabilities.
|
61 |
-
|
62 |
-
## Examples
|
63 |
-
Here are some interesting examples you can try.
|
64 |
-
|
65 |
-
### Copy paste:
|
66 |
-
```
|
67 |
-
Copy: A!2j!#u&NGApS&MkkHe8Gm!#
|
68 |
-
Paste: A!2j!#u&NGApS&MkkHe8Gm!#
|
69 |
-
```
|
70 |
-
|
71 |
-
### Trivial in-context learning
|
72 |
-
```
|
73 |
-
inc 1 2
|
74 |
-
inc 4 5
|
75 |
-
inc 13
|
76 |
-
```
|
77 |
-
|
78 |
-
#### Addition
|
79 |
-
```
|
80 |
-
add 1 1 2
|
81 |
-
add 3 4 7
|
82 |
-
add 13 2
|
83 |
-
```
|
84 |
-
"""
|
85 |
-
|
86 |
-
with gr.Blocks() as demo:
|
87 |
-
gr.Markdown(preamble)
|
88 |
-
with gr.Column():
|
89 |
-
text = gr.Textbox(
|
90 |
-
value="it was the best of times, it was the worst of times",
|
91 |
-
label="Input Text",
|
92 |
-
)
|
93 |
-
with gr.Row():
|
94 |
-
lens_options = gr.Dropdown(
|
95 |
-
list(lens_options_dict.keys()), value="Tuned Lens", label="Select Lens"
|
96 |
-
)
|
97 |
-
statistic = gr.Dropdown(
|
98 |
-
list(statistic_options_dict.keys()),
|
99 |
-
value="Entropy",
|
100 |
-
label="Select Statistic",
|
101 |
-
)
|
102 |
-
token_cutoff = gr.Slider(
|
103 |
-
maximum=20, minimum=2, value=10, step=1, label="Plot Last N Tokens"
|
104 |
-
)
|
105 |
-
examine_btn = gr.Button(value="Submit")
|
106 |
-
plot = gr.Plot()
|
107 |
-
examine_btn.click(make_plot, [lens_options, text, statistic, token_cutoff], plot)
|
108 |
-
demo.load(make_plot, [lens_options, text, statistic, token_cutoff], plot)
|
109 |
-
|
110 |
-
if __name__ == "__main__":
|
111 |
-
demo.launch()
|
112 |
-
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bc89bb5f80d8a86f0d65fba048f8b4d35e099f03ad268a08e2ccbf6eac8cffbc
|
3 |
+
size 4260
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
lens/EleutherAI/gpt-neox-20b/config.json
CHANGED
@@ -1 +1,3 @@
|
|
1 |
-
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f42ec7fc2a0d16d2fdb6ac2f1361d9e775179d41168a7bb12026433ea1ceb168
|
3 |
+
size 295
|
lens/EleutherAI/pythia-1.4b-deduped-v0/config.json
CHANGED
@@ -1 +1,3 @@
|
|
1 |
-
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e697309953fc2c15a215c381ea22d91d90cddf739df6ddd54fa8f01c45248344
|
3 |
+
size 305
|
lens/EleutherAI/pythia-1.4b-deduped/config.json
CHANGED
@@ -1 +1,3 @@
|
|
1 |
-
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c496b7bc74699436b65791838cb1fdf48d27744cb243874af9da25b9be84b465
|
3 |
+
size 264
|
lens/EleutherAI/pythia-12b-deduped-v0/config.json
CHANGED
@@ -1 +1,3 @@
|
|
1 |
-
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4f5c244a5d4c5ac6c5bb22d42c922eac03680625605400db384d3a3da1875206
|
3 |
+
size 304
|
lens/EleutherAI/pythia-12b-deduped/config.json
CHANGED
@@ -1 +1,3 @@
|
|
1 |
-
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:227869e1813ba53c131cbfbe0cb82d3f556e07cfb704606bb94e846fe6b5f8c5
|
3 |
+
size 263
|
lens/EleutherAI/pythia-160m-deduped-v0/config.json
CHANGED
@@ -1 +1,3 @@
|
|
1 |
-
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e2cfc2289ff688df53f599cda06dcac9f72087efe42da8f20a3fda203341413d
|
3 |
+
size 304
|
lens/EleutherAI/pythia-160m-deduped/config.json
CHANGED
@@ -1 +1,3 @@
|
|
1 |
-
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0393bfdd60f616c9606b1db801332c5e1cc1abbf7a52f0b8fc362547d613691c
|
3 |
+
size 263
|
lens/EleutherAI/pythia-1b-deduped-v0/config.json
CHANGED
@@ -1 +1,3 @@
|
|
1 |
-
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cc83a156780cb5d49778dfbae742c5c2e1af36123f7cf60764f1adae80ede4bb
|
3 |
+
size 303
|
lens/EleutherAI/pythia-2.8b-deduped-v0/config.json
CHANGED
@@ -1 +1,3 @@
|
|
1 |
-
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:06238f3af11e14f5663b498d82e6da7dca87c9e90877817f474fee5fda1f1c67
|
3 |
+
size 267
|
lens/EleutherAI/pythia-2.8b-deduped/config.json
CHANGED
@@ -1 +1,3 @@
|
|
1 |
-
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ab43342448b067918933af8e082c6b6a2b4111a80e955bb75577a81f9bec8d4c
|
3 |
+
size 264
|
lens/EleutherAI/pythia-410m-deduped-v0/config.json
CHANGED
@@ -1 +1,3 @@
|
|
1 |
-
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:33a2e63741f52966e5ff78d81a91e3d103dbe979d3a03165bedcf071e7e10358
|
3 |
+
size 305
|
lens/EleutherAI/pythia-410m-deduped/config.json
CHANGED
@@ -1 +1,3 @@
|
|
1 |
-
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9785f07bfea0affe284f39780e87347135725391f910a6846d45a7a18d982006
|
3 |
+
size 264
|
lens/EleutherAI/pythia-6.9b-deduped-v0/config.json
CHANGED
@@ -1 +1,3 @@
|
|
1 |
-
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b58ab18fa92a144b18fcdb0cb6d24f1d9fe2c2a90bbd69cfab9688e96bcde5a0
|
3 |
+
size 305
|
lens/EleutherAI/pythia-6.9b-deduped/config.json
CHANGED
@@ -1 +1,3 @@
|
|
1 |
-
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fe20b7d1dfaa0182930130e349204d9144af9cc1ca0b58d8102d6b81f63cd5f9
|
3 |
+
size 264
|
lens/EleutherAI/pythia-70m-deduped-v0/config.json
CHANGED
@@ -1 +1,3 @@
|
|
1 |
-
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:468abebefbad4f03c38bebd45317218fa0ee87ac81edcf963ee5c1af60ff218f
|
3 |
+
size 302
|
lens/EleutherAI/pythia-70m-deduped/config.json
CHANGED
@@ -1 +1,3 @@
|
|
1 |
-
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3925c5ff7bde19de9a09bf86ee303757cdc3dc70058f0a9b9528ea357ad6afc7
|
3 |
+
size 261
|
lens/facebook/llama-13b/config.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a6777d20cd9652dc252752c5c776c07e7c8d920afea9b962c841a16056d3a109
|
3 |
+
size 254
|
lens/facebook/llama-13b/params.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2700859b4335f4e4bdd4040232f594170bb717f8af04f65e5560c49cfb6da122
|
3 |
+
size 2097581027
|
lens/facebook/llama-7b/config.json
CHANGED
@@ -1 +1,3 @@
|
|
1 |
-
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6644680efdcc19840bf75c96e3ab97fb1b16a9e22f2b0456e9b05ff9e7c0d60d
|
3 |
+
size 253
|
lens/facebook/opt-1.3b/config.json
CHANGED
@@ -1 +1,3 @@
|
|
1 |
-
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8f6cee11c783ee10cf15b10a4a059b838643a414384584a9c53adeaee265de53
|
3 |
+
size 289
|
lens/facebook/opt-125m/config.json
CHANGED
@@ -1 +1,3 @@
|
|
1 |
-
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e653efc95c2587a4086fef70dd33af22fa4f2e49414668559bb68b8e14fb6b94
|
3 |
+
size 288
|
lens/facebook/opt-6.7b/config.json
CHANGED
@@ -1 +1,3 @@
|
|
1 |
-
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:37a2047cc1ea31a9bb0e97c6c84cc9726399981ac76ef16e45a7a0d3499ff5a7
|
3 |
+
size 289
|
lens/gpt2-large/config.json
CHANGED
@@ -1 +1,3 @@
|
|
1 |
-
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a0650af69d4c7d4ac6ad6e14c604b3fe2e296f967d21afe43017cd20160c5abd
|
3 |
+
size 282
|
lens/gpt2-xl/config.json
CHANGED
@@ -1 +1,3 @@
|
|
1 |
-
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d291da4602b3f32e85563e53ae68dc4c378a00f81f3ee52805f38c03636386fa
|
3 |
+
size 279
|
lens/gpt2/config.json
CHANGED
@@ -1 +1,3 @@
|
|
1 |
-
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7269c86486bd48001867e8d040f5e8a41f6a889648bd09548e27f4e59dabc35a
|
3 |
+
size 275
|
requirements.txt
CHANGED
@@ -1 +1,3 @@
|
|
1 |
-
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0de533a67fd8d52c57fce9de893dd7c8c53dfb750bc910848950c18f2ebc80ea
|
3 |
+
size 18
|