Commit
·
d114a5e
1
Parent(s):
86f3198
pico-decoder-tiny-1 trained to 50k steps
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- README.md +2 -0
- config.json +22 -0
- eval_results/step_0.json +1 -0
- eval_results/step_1000.json +1 -0
- eval_results/step_10000.json +1 -0
- eval_results/step_11000.json +1 -0
- eval_results/step_12000.json +1 -0
- eval_results/step_13000.json +1 -0
- eval_results/step_14000.json +1 -0
- eval_results/step_15000.json +1 -0
- eval_results/step_16000.json +1 -0
- eval_results/step_17000.json +1 -0
- eval_results/step_18000.json +1 -0
- eval_results/step_19000.json +1 -0
- eval_results/step_2000.json +1 -0
- eval_results/step_20000.json +1 -0
- eval_results/step_21000.json +1 -0
- eval_results/step_22000.json +1 -0
- eval_results/step_23000.json +1 -0
- eval_results/step_24000.json +1 -0
- eval_results/step_25000.json +1 -0
- eval_results/step_26000.json +1 -0
- eval_results/step_27000.json +1 -0
- eval_results/step_28000.json +1 -0
- eval_results/step_29000.json +1 -0
- eval_results/step_3000.json +1 -0
- eval_results/step_30000.json +1 -0
- eval_results/step_31000.json +1 -0
- eval_results/step_32000.json +1 -0
- eval_results/step_33000.json +1 -0
- eval_results/step_34000.json +1 -0
- eval_results/step_35000.json +1 -0
- eval_results/step_36000.json +1 -0
- eval_results/step_37000.json +1 -0
- eval_results/step_38000.json +1 -0
- eval_results/step_39000.json +1 -0
- eval_results/step_4000.json +1 -0
- eval_results/step_40000.json +1 -0
- eval_results/step_41000.json +1 -0
- eval_results/step_42000.json +1 -0
- eval_results/step_43000.json +1 -0
- eval_results/step_44000.json +1 -0
- eval_results/step_45000.json +1 -0
- eval_results/step_46000.json +1 -0
- eval_results/step_47000.json +1 -0
- eval_results/step_48000.json +1 -0
- eval_results/step_49000.json +1 -0
- eval_results/step_5000.json +1 -0
- eval_results/step_50000.json +1 -0
- eval_results/step_6000.json +1 -0
README.md
CHANGED
@@ -13,6 +13,8 @@ pipeline_tag: text-generation
|
|
13 |
|
14 |
**pico-decoder-tiny** is the smallest (11M) model in the `pico-decoder` suite — a lightweight, LLaMA-style decoder-only transformer trained from scratch using [`pico-train`](https://github.com/pico-lm/pico-train). It is designed for transparent and reproducible research into the learning dynamics of language models, and is fully compatible with the `pico-analyze` toolkit for detailed interpretability analysis.
|
15 |
|
|
|
|
|
16 |
## 🔧 Model Details
|
17 |
|
18 |
| Field | Value |
|
|
|
13 |
|
14 |
**pico-decoder-tiny** is the smallest (11M) model in the `pico-decoder` suite — a lightweight, LLaMA-style decoder-only transformer trained from scratch using [`pico-train`](https://github.com/pico-lm/pico-train). It is designed for transparent and reproducible research into the learning dynamics of language models, and is fully compatible with the `pico-analyze` toolkit for detailed interpretability analysis.
|
15 |
|
16 |
+
> NOTE: The `pico-decoder-tiny-1` branch contains the full commit history for the training run.
|
17 |
+
|
18 |
## 🔧 Model Details
|
19 |
|
20 |
| Field | Value |
|
config.json
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"activation_hidden_dim": 384,
|
3 |
+
"architectures": [
|
4 |
+
"PicoHF"
|
5 |
+
],
|
6 |
+
"attention_n_heads": 12,
|
7 |
+
"attention_n_kv_heads": 4,
|
8 |
+
"auto_map": {
|
9 |
+
"AutoConfig": "pico.PicoHFConfig",
|
10 |
+
"AutoModelForCausalLM": "pico.PicoHF"
|
11 |
+
},
|
12 |
+
"batch_size": 1024,
|
13 |
+
"d_model": 96,
|
14 |
+
"max_seq_len": 2048,
|
15 |
+
"model_type": "pico",
|
16 |
+
"n_layers": 12,
|
17 |
+
"norm_eps": 1e-06,
|
18 |
+
"position_emb_theta": 10000.0,
|
19 |
+
"torch_dtype": "float32",
|
20 |
+
"transformers_version": "4.48.1",
|
21 |
+
"vocab_size": 50304
|
22 |
+
}
|
eval_results/step_0.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 59435.05139917247}
|
eval_results/step_1000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 2176.422658291594}
|
eval_results/step_10000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 313.8343901737226}
|
eval_results/step_11000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 268.332413412885}
|
eval_results/step_12000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 254.46162965488767}
|
eval_results/step_13000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 249.49274518747362}
|
eval_results/step_14000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 260.20426084006704}
|
eval_results/step_15000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 239.41960436525244}
|
eval_results/step_16000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 229.52608692687562}
|
eval_results/step_17000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 225.62338353731906}
|
eval_results/step_18000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 212.63083450470236}
|
eval_results/step_19000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 208.58393890899234}
|
eval_results/step_2000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 884.8345587062504}
|
eval_results/step_20000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 205.5417249480191}
|
eval_results/step_21000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 201.91932611332538}
|
eval_results/step_22000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 199.08593051392026}
|
eval_results/step_23000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 198.37421456945066}
|
eval_results/step_24000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 193.6218826051373}
|
eval_results/step_25000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 192.3463352126942}
|
eval_results/step_26000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 190.8057739201323}
|
eval_results/step_27000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 192.40458190831572}
|
eval_results/step_28000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 188.59788890599373}
|
eval_results/step_29000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 187.05302696460632}
|
eval_results/step_3000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 623.0875143150835}
|
eval_results/step_30000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 186.20094004940074}
|
eval_results/step_31000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 183.60527752766626}
|
eval_results/step_32000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 182.63232411407841}
|
eval_results/step_33000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 182.5158897050997}
|
eval_results/step_34000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 562.858497242479}
|
eval_results/step_35000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 184.30060736659513}
|
eval_results/step_36000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 177.34308501486163}
|
eval_results/step_37000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 177.5313984960629}
|
eval_results/step_38000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 178.25564483549536}
|
eval_results/step_39000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 174.9592615609385}
|
eval_results/step_4000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 606.9779211090832}
|
eval_results/step_40000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 174.86358099016994}
|
eval_results/step_41000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 173.4716007269218}
|
eval_results/step_42000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 174.3865502098296}
|
eval_results/step_43000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 171.96283629081805}
|
eval_results/step_44000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 172.43479460373987}
|
eval_results/step_45000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 170.67068021986958}
|
eval_results/step_46000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 170.5537706348539}
|
eval_results/step_47000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 168.9578624722019}
|
eval_results/step_48000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 169.10025308456156}
|
eval_results/step_49000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 168.10817976263758}
|
eval_results/step_5000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 1044.7105118223183}
|
eval_results/step_50000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 167.72806381398377}
|
eval_results/step_6000.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"paloma": 786.850875407229}
|