rdiehlmartinez commited on
Commit
6bfde12
·
1 Parent(s): 81ed588

pico-decoder-large-1 trained to 50k steps

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. README.md +2 -0
  2. config.json +22 -0
  3. eval_results/step_0.json +1 -0
  4. eval_results/step_1000.json +1 -0
  5. eval_results/step_10000.json +1 -0
  6. eval_results/step_11000.json +1 -0
  7. eval_results/step_12000.json +1 -0
  8. eval_results/step_13000.json +1 -0
  9. eval_results/step_14000.json +1 -0
  10. eval_results/step_15000.json +1 -0
  11. eval_results/step_16000.json +1 -0
  12. eval_results/step_17000.json +1 -0
  13. eval_results/step_18000.json +1 -0
  14. eval_results/step_19000.json +1 -0
  15. eval_results/step_2000.json +1 -0
  16. eval_results/step_20000.json +1 -0
  17. eval_results/step_21000.json +1 -0
  18. eval_results/step_22000.json +1 -0
  19. eval_results/step_23000.json +1 -0
  20. eval_results/step_24000.json +1 -0
  21. eval_results/step_25000.json +1 -0
  22. eval_results/step_26000.json +1 -0
  23. eval_results/step_27000.json +1 -0
  24. eval_results/step_28000.json +1 -0
  25. eval_results/step_29000.json +1 -0
  26. eval_results/step_3000.json +1 -0
  27. eval_results/step_30000.json +1 -0
  28. eval_results/step_31000.json +1 -0
  29. eval_results/step_32000.json +1 -0
  30. eval_results/step_33000.json +1 -0
  31. eval_results/step_34000.json +1 -0
  32. eval_results/step_35000.json +1 -0
  33. eval_results/step_36000.json +1 -0
  34. eval_results/step_37000.json +1 -0
  35. eval_results/step_38000.json +1 -0
  36. eval_results/step_39000.json +1 -0
  37. eval_results/step_4000.json +1 -0
  38. eval_results/step_40000.json +1 -0
  39. eval_results/step_41000.json +1 -0
  40. eval_results/step_42000.json +1 -0
  41. eval_results/step_43000.json +1 -0
  42. eval_results/step_44000.json +1 -0
  43. eval_results/step_45000.json +1 -0
  44. eval_results/step_46000.json +1 -0
  45. eval_results/step_47000.json +1 -0
  46. eval_results/step_48000.json +1 -0
  47. eval_results/step_49000.json +1 -0
  48. eval_results/step_5000.json +1 -0
  49. eval_results/step_50000.json +1 -0
  50. eval_results/step_6000.json +1 -0
README.md CHANGED
@@ -13,6 +13,8 @@ pipeline_tag: text-generation
13
 
14
  **pico-decoder-large** is the largest model (570M) in the current `pico-decoder` suite. It is a full-scale research model designed for in-depth interpretability studies of transformer learning. Trained with [`pico-train`](https://github.com/pico-lm) and fully compatible with [`pico-analyze`](https://github.com/pico-lm), it offers rich checkpointing and analytical insight into large-scale LM behavior.
15
 
 
 
16
  ## 🔧 Model Details
17
 
18
  | Field | Value |
 
13
 
14
  **pico-decoder-large** is the largest model (570M) in the current `pico-decoder` suite. It is a full-scale research model designed for in-depth interpretability studies of transformer learning. Trained with [`pico-train`](https://github.com/pico-lm) and fully compatible with [`pico-analyze`](https://github.com/pico-lm), it offers rich checkpointing and analytical insight into large-scale LM behavior.
15
 
16
+ > NOTE: The `pico-decoder-large-1` branch contains the full commit history for the training run.
17
+
18
  ## 🔧 Model Details
19
 
20
  | Field | Value |
config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_hidden_dim": 6144,
3
+ "architectures": [
4
+ "PicoHF"
5
+ ],
6
+ "attention_n_heads": 12,
7
+ "attention_n_kv_heads": 4,
8
+ "auto_map": {
9
+ "AutoConfig": "pico.PicoHFConfig",
10
+ "AutoModelForCausalLM": "pico.PicoHF"
11
+ },
12
+ "batch_size": 1024,
13
+ "d_model": 1536,
14
+ "max_seq_len": 2048,
15
+ "model_type": "pico",
16
+ "n_layers": 12,
17
+ "norm_eps": 1e-06,
18
+ "position_emb_theta": 10000.0,
19
+ "torch_dtype": "float32",
20
+ "transformers_version": "4.48.1",
21
+ "vocab_size": 50304
22
+ }
eval_results/step_0.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 60407.55679170296}
eval_results/step_1000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 325.7754169842923}
eval_results/step_10000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 39.63131210396929}
eval_results/step_11000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 38.16542526977818}
eval_results/step_12000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 37.00609017996838}
eval_results/step_13000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 36.12258010915763}
eval_results/step_14000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 35.30092575084873}
eval_results/step_15000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 34.9831400311367}
eval_results/step_16000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 33.944966741887534}
eval_results/step_17000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 33.525249324692254}
eval_results/step_18000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 33.06738855614479}
eval_results/step_19000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 32.68154775822204}
eval_results/step_2000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 893.744832137902}
eval_results/step_20000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 32.365150609082875}
eval_results/step_21000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 31.893736724238778}
eval_results/step_22000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 31.542816529955182}
eval_results/step_23000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 31.126356617498896}
eval_results/step_24000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 30.837729862010438}
eval_results/step_25000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 30.55359834328761}
eval_results/step_26000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 30.223841104607132}
eval_results/step_27000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 30.00440268965133}
eval_results/step_28000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 29.728886223836227}
eval_results/step_29000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 29.550568088920276}
eval_results/step_3000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 86.66753888420942}
eval_results/step_30000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 29.28618069741784}
eval_results/step_31000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 29.13321706517648}
eval_results/step_32000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 28.90883405690409}
eval_results/step_33000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 28.689674922979666}
eval_results/step_34000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 28.554471499828512}
eval_results/step_35000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 28.37717256438026}
eval_results/step_36000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 28.19421185649646}
eval_results/step_37000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 28.05642763455155}
eval_results/step_38000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 27.91100605555943}
eval_results/step_39000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 27.77313469752202}
eval_results/step_4000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 61.31575394249959}
eval_results/step_40000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 27.60404353183321}
eval_results/step_41000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 27.54711822325344}
eval_results/step_42000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 27.389416195789696}
eval_results/step_43000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 27.207264288792626}
eval_results/step_44000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 27.154732140514493}
eval_results/step_45000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 26.99035127520146}
eval_results/step_46000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 26.9613054703337}
eval_results/step_47000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 26.828856883730207}
eval_results/step_48000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 26.728875775287374}
eval_results/step_49000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 26.63958778913013}
eval_results/step_5000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 56.45454279595551}
eval_results/step_50000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 26.524442167398405}
eval_results/step_6000.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"paloma": 49.864106077267316}