Spaces:
Running
Running
update
Browse files- .gitattributes +2 -0
- .gitignore +5 -0
- README.md +15 -0
- assets/data/clustering/data.csv +0 -0
- assets/data/clustering/info.csv +106 -0
- assets/data/plots/all_dumps_bad/agg_score.json +1 -0
- assets/images/IMG_7537D08D7F41-1.jpeg +3 -0
- assets/images/IMG_A95961668B3F-1.jpeg +3 -0
- assets/images/IMG_C4260C5C58DC-1.jpeg +3 -0
- assets/images/IMG_DA188FF29F45-1.jpeg +3 -0
- assets/images/image 1.png +3 -0
- assets/images/image 10.png +3 -0
- assets/images/image 11.png +3 -0
- assets/images/image 12.png +3 -0
- assets/images/image 13.png +3 -0
- assets/images/image 14.png +3 -0
- assets/images/image 15.png +3 -0
- assets/images/image 16.png +3 -0
- assets/images/image 17.png +3 -0
- assets/images/image 2.png +3 -0
- assets/images/image 3.png +3 -0
- assets/images/image 4.png +3 -0
- assets/images/image 5.png +3 -0
- assets/images/image 6.png +3 -0
- assets/images/image 7.png +3 -0
- assets/images/image 8.png +3 -0
- assets/images/image 9.png +3 -0
- assets/images/image.png +3 -0
- assets/images/rotation.jpeg +3 -0
- assets/images/rotation_speed.jpeg +3 -0
- dist/bibliography.bib +0 -5
- dist/distill.bundle.js +0 -0
- dist/distill.bundle.js.map +0 -0
- dist/index.html +0 -121
- dist/main.bundle.js +0 -0
- dist/main.bundle.js.LICENSE.txt +0 -19
- dist/main.bundle.js.map +0 -0
- package-lock.json +24 -0
- package.json +1 -0
- python/memory/__init__.py +0 -0
- python/memory/explorations.ipynb +51 -0
- python/memory/utils.py +57 -0
- src/bibliography.bib +334 -0
- src/colors.mjs +77 -0
- src/distill.js +0 -0
- src/index.html +917 -0
- src/index.js +10 -0
- src/plotting.js +383 -0
- {dist → src}/style.css +0 -0
- webpack.config.js +96 -0
.gitattributes
CHANGED
@@ -7,6 +7,7 @@
|
|
7 |
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
*.joblib filter=lfs diff=lfs merge=lfs -text
|
|
|
10 |
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
*.model filter=lfs diff=lfs merge=lfs -text
|
@@ -19,6 +20,7 @@
|
|
19 |
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
*.pkl filter=lfs diff=lfs merge=lfs -text
|
|
|
22 |
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
*.rar filter=lfs diff=lfs merge=lfs -text
|
|
|
7 |
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
11 |
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
12 |
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
13 |
*.model filter=lfs diff=lfs merge=lfs -text
|
|
|
20 |
*.pb filter=lfs diff=lfs merge=lfs -text
|
21 |
*.pickle filter=lfs diff=lfs merge=lfs -text
|
22 |
*.pkl filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
24 |
*.pt filter=lfs diff=lfs merge=lfs -text
|
25 |
*.pth filter=lfs diff=lfs merge=lfs -text
|
26 |
*.rar filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
node_modules/
|
2 |
+
*.log
|
3 |
+
*.env
|
4 |
+
*.cache
|
5 |
+
python/**/__pycache__
|
README.md
CHANGED
@@ -12,3 +12,18 @@ thumbnail: https://huggingface.co/spaces/HuggingFaceFW/blogpost-fineweb-v1/resol
|
|
12 |
---
|
13 |
|
14 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
---
|
13 |
|
14 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
15 |
+
|
16 |
+
|
17 |
+
Instruction to install and run locally
|
18 |
+
|
19 |
+
```bash
|
20 |
+
npm install
|
21 |
+
npm run build
|
22 |
+
npm run dev
|
23 |
+
|
24 |
+
// If you want to change something change it in src/....
|
25 |
+
|
26 |
+
// Once you are finished
|
27 |
+
npm run build
|
28 |
+
// And commit the dist folder
|
29 |
+
```
|
assets/data/clustering/data.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
assets/data/clustering/info.csv
ADDED
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
,cluster_id,cluster_summaries,cluster_position_x,cluster_position_y
|
2 |
+
0,-1,None,9.926462,4.7121987
|
3 |
+
1,0,Philosophical/Spiritual Introspection,10.312462,1.2666532
|
4 |
+
2,1,"Scholarships,",8.167274,4.8995786
|
5 |
+
3,2,Politics,8.81142,2.4859838
|
6 |
+
4,3,Theology,9.615214,0.3783942
|
7 |
+
5,4,Dating,4.985182,1.8439052
|
8 |
+
6,5,Accommodation,11.457769,5.080919
|
9 |
+
7,6,Football,6.6154537,-1.6859366
|
10 |
+
8,7,Film Festival,6.9734483,1.4548192
|
11 |
+
9,8,Culinary,13.426296,4.5412893
|
12 |
+
10,9,Music,6.0653744,0.7536916
|
13 |
+
11,10,Gambling,3.124241,3.2533677
|
14 |
+
12,11,Baseball,7.133596,-2.4256644
|
15 |
+
13,12,Technology,6.4929094,6.768577
|
16 |
+
14,13,Website Policies,4.873843,5.771508
|
17 |
+
15,14,Weddings,11.815845,3.7894728
|
18 |
+
16,15,Gaming,5.529167,2.9530518
|
19 |
+
17,16,Commodities/Services Provision,10.453564,5.8489122
|
20 |
+
18,17,Crafts,13.287651,6.4237967
|
21 |
+
19,18,Automobiles,9.9531145,8.840178
|
22 |
+
20,19,Watches,13.893139,9.859185
|
23 |
+
21,20,Dogs,12.595798,3.5351615
|
24 |
+
22,21,Photography,10.7942295,3.5504062
|
25 |
+
23,22,Legalities,8.942016,4.72733
|
26 |
+
24,23,Consumer Electronics,7.078649,8.338984
|
27 |
+
25,24,Insulation,10.520957,7.914946
|
28 |
+
26,25,Cannabis,14.317424,3.2114828
|
29 |
+
27,26,Footwear,15.052116,7.6956415
|
30 |
+
28,27,Real Estate,9.536316,6.103533
|
31 |
+
29,28,Relocation,10.205071,7.1883316
|
32 |
+
30,29,Sports betting,3.2779586,2.443366
|
33 |
+
31,30,Narratives,7.613535,1.8300554
|
34 |
+
32,31,Dating,4.788838,2.1900373
|
35 |
+
33,32,Apparel/Clothing,14.394226,7.3073387
|
36 |
+
34,33,User Authentication,5.265638,6.4014487
|
37 |
+
35,34,Academicwriting,6.9187264,3.4357684
|
38 |
+
36,35,Sports,7.4969172,-2.086585
|
39 |
+
37,36,Fashion/Lifestyle Products,13.821669,7.7150764
|
40 |
+
38,37,Diverse events,9.437052,2.2438836
|
41 |
+
39,38,Blockchain/Cryptocurrency,7.7586045,6.9439344
|
42 |
+
40,39,Online Businesses/Marketing,6.522259,5.219268
|
43 |
+
41,40,Healthcare,11.425277,2.3801014
|
44 |
+
42,41,Home Decor,12.878046,7.2632184
|
45 |
+
43,42,Biomedicine,12.789575,2.3376262
|
46 |
+
44,43,Jewelry,14.259997,8.653363
|
47 |
+
45,44,Addiction,11.561383,1.3774762
|
48 |
+
46,45,Products,11.711758,8.423251
|
49 |
+
47,46,Multi-purposefulness,11.080702,7.4574013
|
50 |
+
48,47,"Mass transit,",9.910158,5.4402313
|
51 |
+
49,48,Ethernet,6.9763823,7.7909245
|
52 |
+
50,49,Legal,9.516912,4.636553
|
53 |
+
51,50,E-commerce,13.263438,8.6548195
|
54 |
+
52,51,Audio,7.717162,8.903019
|
55 |
+
53,52,Infrastructure,10.52904,5.369669
|
56 |
+
54,53,Firearms,11.062812,9.268473
|
57 |
+
55,54,Freight/Logistics,9.551044,7.0336204
|
58 |
+
56,55,Products,12.073747,7.645973
|
59 |
+
57,56,Vaccinations,11.9387045,2.7824683
|
60 |
+
58,57,Artwork,11.019163,4.1677165
|
61 |
+
59,58,Viticulture,14.223523,5.0761614
|
62 |
+
60,59,WordPress,5.9597983,5.824579
|
63 |
+
61,60,Cosmetics/Dermatology,15.093273,3.4669027
|
64 |
+
62,61,Software,6.375921,6.4298844
|
65 |
+
63,62,Dentistry,14.76626,1.1620314
|
66 |
+
64,63,Pest Control,13.201735,3.6806118
|
67 |
+
65,64,SEO,5.720493,5.238112
|
68 |
+
66,65,Lottery,1.7142816,2.9782674
|
69 |
+
67,66,Narratives,8.460977,1.0804662
|
70 |
+
68,67,Waste Reduction & Recycling,10.634534,6.959523
|
71 |
+
69,68,Communication,6.438943,5.9467845
|
72 |
+
70,69,Orthopedics,13.005415,1.1908791
|
73 |
+
71,70,Home Decor & Furniture,12.732457,7.876862
|
74 |
+
72,71,Education,7.6568975,3.4944353
|
75 |
+
73,72,Sports,7.295141,-0.7343214
|
76 |
+
74,73,Social Media Advertising,6.133886,4.8547883
|
77 |
+
75,74,Privacy,4.756733,6.3598356
|
78 |
+
76,75,Website design,6.1168823,5.465095
|
79 |
+
77,76,Roofing,11.389448,8.080609
|
80 |
+
78,77,Nutrition/Supplements,13.631578,2.5334294
|
81 |
+
79,78,Haircare/Hairstyling,15.544645,4.54254
|
82 |
+
80,79,Cookies,4.341592,6.819268
|
83 |
+
81,80,International Trade,8.993828,6.4757586
|
84 |
+
82,81,Entrepreneurial Resources,9.435777,5.3340797
|
85 |
+
83,82,Cricket,6.5171986,-1.245905
|
86 |
+
84,83,Crafts,13.852216,7.049825
|
87 |
+
85,84,Floristry,13.407425,5.8741536
|
88 |
+
86,85,Genealogy,9.530803,1.6548243
|
89 |
+
87,86,Mental Health,11.074349,1.6069281
|
90 |
+
88,87,Volunteerism,10.145443,3.6734574
|
91 |
+
89,88,Lighting,11.385381,8.93693
|
92 |
+
90,89,Artificial Intelligence,6.5306387,6.2178063
|
93 |
+
91,90,Business,7.471462,6.4142885
|
94 |
+
92,91,E-commerce,13.638669,6.5098934
|
95 |
+
93,92,Urbanization/Over-tourism,10.221115,6.100654
|
96 |
+
94,93,Events,10.8449,3.9822264
|
97 |
+
95,94,Pharmaceuticals/Biotechnology,12.318266,2.4331784
|
98 |
+
96,95,Professional Wrestling,6.856304,-0.65598303
|
99 |
+
97,96,Various,9.3211975,3.4894605
|
100 |
+
98,97,Medicine,13.17882,2.1281319
|
101 |
+
99,98,Community Engagement,9.848856,3.5187004
|
102 |
+
100,99,Fitness,12.504849,0.9134393
|
103 |
+
101,100,Bathroom Design & Toilet Engineering,11.779076,7.2920136
|
104 |
+
102,101,Business Development,7.328447,5.659843
|
105 |
+
103,102,Sports,7.6370654,-1.0701839
|
106 |
+
104,103,Sexuality,13.817207,1.6510898
|
assets/data/plots/all_dumps_bad/agg_score.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"data": {"big-run-refinedweb": {"x": [0.0, 2.0971520000000003, 4.194304000000001, 6.291456, 8.388608000000001, 10.48576, 12.582912, 14.680064000000002, 16.777216000000003, 18.874368, 20.97152, 23.068672000000003, 25.165824, 27.262976000000002, 28.311552000000002, 29.360128000000003, 31.45728, 33.554432000000006, 35.651584, 37.748736, 39.845888, 41.94304, 44.040192000000005, 46.137344000000006, 48.234496, 50.331648, 52.4288, 54.525952000000004, 56.623104000000005, 58.720256000000006, 60.817408, 62.91456, 65.011712, 67.10886400000001, 69.206016, 71.303168, 73.40032000000001, 75.497472, 77.59462400000001, 79.691776, 81.788928, 83.88608, 85.983232, 88.08038400000001, 90.177536, 92.27468800000001, 94.37184, 96.468992, 98.56614400000001, 100.663296, 102.76044800000001, 104.8576, 106.95475200000001, 109.05190400000001, 111.149056, 113.24620800000001, 115.34336, 117.44051200000001, 119.537664, 121.634816, 123.73196800000001, 125.82912, 127.92627200000001, 130.023424, 132.120576, 134.21772800000002, 136.31488000000002, 138.412032, 140.509184, 142.606336, 144.70348800000002, 146.80064000000002, 148.897792, 150.994944, 153.092096, 155.18924800000002, 157.28640000000001, 159.383552, 161.480704, 163.577856, 165.67500800000002, 167.77216, 169.869312, 171.966464, 174.06361600000002, 176.16076800000002, 178.25792, 180.355072, 182.452224, 184.54937600000002, 186.64652800000002, 188.74368, 190.840832, 192.937984, 195.03513600000002, 197.13228800000002, 199.22944, 201.326592, 203.423744, 205.52089600000002, 207.61804800000002, 209.7152, 211.812352, 213.90950400000003, 216.00665600000002, 218.10380800000001, 220.20096, 222.298112, 224.39526400000003, 226.49241600000002, 228.589568, 230.68672, 232.783872, 234.88102400000002, 236.97817600000002, 239.075328, 241.17248, 243.269632, 245.36678400000002, 247.46393600000002, 249.561088, 251.65824, 253.75539200000003, 255.85254400000002, 257.949696, 260.046848, 262.144, 264.241152, 266.338304, 268.43545600000004, 270.53260800000004, 272.62976000000003, 274.726912, 276.824064, 278.921216, 281.018368, 283.11552, 285.212672, 287.309824, 289.40697600000004, 291.50412800000004, 293.60128000000003, 295.698432, 297.795584, 299.892736, 301.989888, 304.08704, 306.184192, 308.28134400000005, 310.37849600000004, 312.47564800000004, 314.57280000000003, 316.669952, 318.767104, 320.864256, 322.961408, 325.05856, 327.155712, 329.25286400000005, 331.35001600000004, 333.44716800000003, 335.54432, 337.641472, 339.738624, 341.835776, 343.932928, 346.03008, 348.12723200000005, 350.22438400000004], "y": [0.3308933284133672, 0.3534814938902855, 0.3764607086777687, 0.38782499730587, 0.3981050960719585, 0.4028486795723438, 0.4125883243978023, 0.4117814563214779, 0.414029736071825, 0.4197172522544861, 0.4211113378405571, 0.4279881417751312, 0.4280137903988361, 0.4280424378812313, 0.4291964024305343, 0.4326301179826259, 0.4371833503246307, 0.4346669465303421, 0.4336562640964985, 0.4432648755609989, 0.4401291646063328, 0.4394684173166752, 0.4476612061262131, 0.4465444348752498, 0.4472153298556804, 0.4433343075215816, 0.4510187618434429, 0.4459567815065384, 0.4460812956094742, 0.4498684890568256, 0.4529943652451038, 0.4528274349868297, 0.4551213420927524, 0.4549156539142132, 0.4564928151667118, 0.4576693661510944, 0.4557182416319847, 0.4536240361630916, 0.457439012825489, 0.4570476822555065, 0.4589823484420776, 0.462024375796318, 0.4540738053619861, 0.4550252184271812, 0.4576593860983848, 0.4573238864541054, 0.4575810581445694, 0.4622134491801262, 0.4592566937208175, 0.4614734016358852, 0.4637473002076149, 0.4625372551381588, 0.4613912180066108, 0.4597448222339153, 0.4594792164862156, 0.4662549719214439, 0.4634026065468788, 0.4633508697152138, 0.4635734222829342, 0.4628961533308029, 0.4670135043561458, 0.4639505892992019, 0.4631133340299129, 0.4665167145431041, 0.4672448337078094, 0.4693268723785877, 0.4630668573081493, 0.4676454700529575, 0.4646359197795391, 0.4621579721570015, 0.4692446552217006, 0.4704835228621959, 0.4663223996758461, 0.4680556617677212, 0.466339822858572, 0.4682099223136902, 0.4711195565760135, 0.4722655527293682, 0.4727961830794811, 0.4676857478916645, 0.4719390422105789, 0.4713102728128433, 0.4712141714990139, 0.4721613004803657, 0.4713456854224205, 0.4682970903813839, 0.4679934531450271, 0.4685162976384163, 0.4679946713149547, 0.4681242071092129, 0.4702276065945625, 0.472664151340723, 0.4730790853500366, 0.4731674715876579, 0.4718914777040481, 0.4719801284372806, 0.4761029370129108, 0.4735167175531387, 0.4730370938777923, 0.4730173237621784, 0.4735377207398414, 0.4777223989367485, 0.4796326830983162, 0.4734170883893966, 0.4739485755562782, 0.4748299159109592, 0.4765299335122108, 0.4745025858283043, 0.4754423759877682, 0.4784592799842357, 0.4761341325938701, 0.4760282784700393, 0.4769757278263569, 0.47154351323843, 0.4786738082766533, 0.4804279990494251, 0.4777076803147793, 0.4798569902777672, 0.4759011939167976, 0.4784621745347976, 0.479673832654953, 0.4780617095530033, 0.48076206818223, 0.47995800152421, 0.4790860973298549, 0.4817167408764362, 0.4811586998403072, 0.482547752559185, 0.4816697351634502, 0.4809327870607376, 0.4816545359790325, 0.4804601892828941, 0.4776877984404564, 0.4813711903989315, 0.4844604581594467, 0.4819537848234176, 0.4820829331874847, 0.4778126627206802, 0.482935007661581, 0.48230691999197, 0.4826001971960068, 0.4823969900608063, 0.4811219945549965, 0.4789146520197391, 0.484035175293684, 0.4848698377609253, 0.4855728335678577, 0.4825376532971859, 0.485215101391077, 0.4824351668357849, 0.4835342466831207, 0.4822137206792831, 0.4838785007596016, 0.4837255179882049, 0.4853012599050998, 0.4857851006090641, 0.4863366298377514, 0.4856646582484245, 0.4842503517866134, 0.4838776960968971, 0.4846346862614155, 0.4837041422724724, 0.4813097268342972, 0.4873070046305656, 0.4841253720223903, 0.4837464913725853, 0.483069509267807, 0.4851242564618587, 0.4861010462045669], "label": "RefinedWeb"}, "big-run-sampled_full_filtered_no_dedup": {"x": [0.0, 2.0971520000000003, 4.194304000000001, 6.291456, 8.388608000000001, 10.48576, 12.582912, 14.680064000000002, 16.777216000000003, 18.874368, 20.97152, 23.068672000000003, 25.165824, 27.262976000000002, 28.311552000000002, 29.360128000000003, 31.45728, 33.554432000000006, 35.651584, 37.748736, 39.845888, 41.94304, 44.040192000000005, 46.137344000000006, 48.234496, 50.331648, 52.4288, 54.525952000000004, 56.623104000000005, 58.720256000000006, 60.817408, 62.91456, 65.011712, 67.10886400000001, 69.206016, 71.303168, 73.40032000000001, 75.497472, 77.59462400000001, 79.691776, 81.788928, 83.88608, 85.983232, 88.08038400000001, 90.177536, 92.27468800000001, 94.37184, 96.468992, 98.56614400000001, 100.663296, 102.76044800000001, 104.8576, 106.95475200000001, 109.05190400000001, 111.149056, 113.24620800000001, 115.34336, 117.44051200000001, 119.537664, 121.634816, 123.73196800000001, 125.82912, 127.92627200000001, 130.023424, 132.120576, 134.21772800000002, 136.31488000000002, 138.412032, 140.509184, 142.606336, 144.70348800000002, 146.80064000000002, 148.897792, 150.994944, 153.092096, 155.18924800000002, 157.28640000000001, 159.383552, 161.480704, 163.577856, 165.67500800000002, 167.77216, 169.869312, 171.966464, 174.06361600000002, 176.16076800000002, 178.25792, 180.355072, 182.452224, 184.54937600000002, 186.64652800000002, 188.74368, 190.840832, 192.937984, 195.03513600000002, 197.13228800000002, 199.22944, 201.326592, 203.423744, 205.52089600000002, 207.61804800000002, 209.7152, 211.812352, 213.90950400000003, 216.00665600000002, 218.10380800000001, 220.20096, 222.298112, 224.39526400000003, 226.49241600000002, 228.589568, 230.68672, 232.783872, 234.88102400000002, 236.97817600000002, 239.075328, 241.17248, 243.269632, 245.36678400000002, 247.46393600000002, 249.561088, 251.65824, 253.75539200000003, 255.85254400000002, 257.949696, 260.046848, 262.144, 264.241152, 266.338304, 268.43545600000004, 270.53260800000004, 272.62976000000003, 274.726912, 276.824064, 278.921216, 281.018368, 283.11552, 285.212672, 287.309824, 289.40697600000004, 291.50412800000004, 293.60128000000003, 295.698432, 297.795584, 299.892736, 301.989888, 304.08704, 306.184192, 308.28134400000005, 310.37849600000004, 312.47564800000004, 314.57280000000003, 316.669952, 318.767104, 320.864256, 322.961408, 325.05856, 327.155712, 329.25286400000005, 331.35001600000004, 333.44716800000003, 335.54432, 337.641472, 339.738624, 341.835776, 343.932928, 346.03008, 348.12723200000005, 350.22438400000004], "y": [0.3308933284133672, 0.3605199865996837, 0.3733148723840713, 0.3882005847990513, 0.3934122696518898, 0.3947227671742439, 0.4042885974049568, 0.3974800482392311, 0.4055779427289963, 0.4133470430970192, 0.4117913842201233, 0.4113653488457203, 0.4149517640471458, 0.4187851920723915, 0.4252083078026771, 0.4206527359783649, 0.4240428246557712, 0.422003373503685, 0.4280910938978195, 0.4244147576391697, 0.4316282644867897, 0.4295645765960216, 0.4310102686285972, 0.4360743537545204, 0.4313482865691185, 0.4350991360843181, 0.4378576353192329, 0.4335876516997814, 0.4347924515604973, 0.4348904751241207, 0.436600212007761, 0.430036511272192, 0.4350974671542644, 0.4399556629359722, 0.4371416717767715, 0.4363861419260502, 0.4376698136329651, 0.4405004419386387, 0.4373639523983001, 0.4379038028419018, 0.4371281825006008, 0.4393439553678036, 0.440426729619503, 0.4401675276458263, 0.4429537951946258, 0.4449137263000011, 0.4434786736965179, 0.4450470842421055, 0.4454202279448509, 0.4394537284970283, 0.442185215651989, 0.4461225643754005, 0.4427758157253265, 0.4430646039545536, 0.4476901069283485, 0.4478763341903686, 0.4493869319558143, 0.4448477327823639, 0.450044184923172, 0.4498609118163585, 0.4457665979862213, 0.4506924152374267, 0.449855338782072, 0.448790930211544, 0.4474099352955818, 0.4546772800385952, 0.4529431238770485, 0.452015146613121, 0.4502020999789238, 0.4493804536759853, 0.4523266032338142, 0.4551868587732315, 0.4501944817602634, 0.4493303671479225, 0.4526805207133293, 0.4533850513398647, 0.4518048763275146, 0.4518973492085933, 0.4531301632523536, 0.4518006071448326, 0.4553494565188885, 0.4528752230107784, 0.4536322727799415, 0.4561733976006508, 0.4549491256475448, 0.4574789106845855, 0.4577847123146057, 0.4563642293214798, 0.4578686729073524, 0.4561499990522861, 0.4537816494703293, 0.4542164430022239, 0.4559455662965774, 0.4554723873734474, 0.4575514122843742, 0.4575202167034149, 0.4592722058296203, 0.4585275091230869, 0.4580587856471538, 0.456934317946434, 0.4577495418488979, 0.4540119916200638, 0.4570806957781315, 0.4608120545744896, 0.4588425755500793, 0.4578334167599678, 0.4610816091299057, 0.4598177038133144, 0.461849745362997, 0.4631866924464702, 0.4601576402783394, 0.4646804705262184, 0.4632389545440674, 0.4604574106633663, 0.4602976888418197, 0.4581312239170074, 0.4654182009398937, 0.4655338563024997, 0.4616620391607284, 0.461054053157568, 0.4613021649420261, 0.4658613465726375, 0.4633531905710697, 0.4613638147711754, 0.4643996246159076, 0.462500050663948, 0.4650798961520195, 0.4648764543235302, 0.4639869071543216, 0.4634246975183487, 0.46585888043046, 0.4639799632132053, 0.4630857892334461, 0.4644265696406364, 0.4642998576164245, 0.4686848931014538, 0.4687492996454239, 0.4650243632495403, 0.4627032242715359, 0.4665953740477562, 0.4660026729106903, 0.4664581045508384, 0.4676475040614605, 0.4657339677214622, 0.4664678275585174, 0.4673498086631298, 0.4676674827933311, 0.4680955372750759, 0.4681585058569908, 0.4659864418208599, 0.4686457589268684, 0.4661462865769863, 0.4658931568264961, 0.4674226939678192, 0.46805215254426, 0.4682257212698459, 0.4689070098102093, 0.4699570722877979, 0.4655096270143986, 0.4688013233244419, 0.4707522802054882, 0.4661469310522079, 0.4688841328024864, 0.4671329781413078, 0.4662554152309894, 0.4697433896362781, 0.4698473587632179, 0.4676505327224731, 0.4696521013975143], "label": "FineWeb filtered only"}, "big-run-fineweb-cross-dedup-fixed": {"x": [0.0, 2.0971520000000003, 4.194304000000001, 6.291456, 8.388608000000001, 10.48576, 12.582912, 14.680064000000002, 16.777216000000003, 18.874368, 20.97152, 23.068672000000003, 25.165824, 27.262976000000002, 29.360128000000003, 31.45728, 33.554432000000006, 35.651584, 37.748736, 39.845888, 41.94304, 44.040192000000005, 46.137344000000006, 48.234496, 50.331648, 52.4288, 54.525952000000004, 56.623104000000005, 58.720256000000006, 60.817408, 62.91456, 65.011712, 67.10886400000001, 69.206016, 71.303168, 73.40032000000001, 75.497472, 77.59462400000001, 79.691776, 81.788928, 83.88608, 85.983232, 88.08038400000001, 90.177536, 92.27468800000001, 94.37184, 96.468992, 98.56614400000001, 100.663296, 102.76044800000001, 104.8576, 106.95475200000001, 109.05190400000001, 111.149056, 113.24620800000001, 115.34336, 117.44051200000001, 119.537664, 121.634816, 123.73196800000001, 125.82912, 127.92627200000001, 130.023424, 132.120576, 134.21772800000002, 136.31488000000002, 138.412032, 140.509184, 142.606336, 144.70348800000002, 146.80064000000002, 148.897792, 150.994944, 153.092096, 155.18924800000002, 157.28640000000001, 159.383552, 161.480704, 163.577856, 165.67500800000002, 167.77216, 169.869312, 171.966464, 174.06361600000002, 176.16076800000002, 178.25792, 180.355072, 182.452224, 184.54937600000002, 186.64652800000002, 188.74368, 190.840832, 192.937984, 195.03513600000002, 197.13228800000002, 199.22944, 201.326592, 203.423744, 205.52089600000002, 207.61804800000002, 209.7152, 211.812352, 213.90950400000003, 216.00665600000002, 218.10380800000001, 220.20096, 222.298112, 224.39526400000003, 226.49241600000002, 228.589568, 230.68672, 232.783872, 234.88102400000002, 236.97817600000002, 239.075328, 241.17248, 243.269632, 245.36678400000002, 247.46393600000002, 249.561088, 251.65824, 253.75539200000003, 255.85254400000002, 257.949696, 260.046848, 262.144, 264.241152, 266.338304, 268.43545600000004, 270.53260800000004, 272.62976000000003, 274.726912, 276.824064, 278.921216, 281.018368, 283.11552, 285.212672, 287.309824, 289.40697600000004, 291.50412800000004, 293.60128000000003, 295.698432, 297.795584, 299.892736, 301.989888, 304.08704, 306.184192, 308.28134400000005, 310.37849600000004, 312.47564800000004, 314.57280000000003, 316.669952, 318.767104, 320.864256, 322.961408, 325.05856, 327.155712, 329.25286400000005, 331.35001600000004, 333.44716800000003, 335.54432, 337.641472, 339.738624, 341.835776, 343.932928, 346.03008, 348.12723200000005, 350.22438400000004], "y": [0.3308933284133672, 0.3551952373236418, 0.3736435137689113, 0.3814037963747978, 0.3948809280991554, 0.3996850810945034, 0.4089604057371616, 0.4100853353738785, 0.4119834117591381, 0.4168377220630646, 0.4186493046581745, 0.4169826358556747, 0.4234288297593593, 0.4229162000119686, 0.4273439794778824, 0.4290364980697632, 0.4291782416403293, 0.4296907968819141, 0.4311576783657074, 0.4326641112565994, 0.430318683385849, 0.430436260998249, 0.4339037239551544, 0.4363459683954716, 0.4357402548193931, 0.4342963136732578, 0.4366712383925915, 0.4363959729671478, 0.436981026083231, 0.4447868093848228, 0.4411709941923618, 0.4406092017889023, 0.4424176625907421, 0.4423875361680984, 0.4422253370285034, 0.4410557933151722, 0.4447037056088447, 0.4454837813973427, 0.4435960277915001, 0.4468514993786812, 0.4479999616742134, 0.4428562931716442, 0.445764634758234, 0.4456562362611294, 0.4488007053732872, 0.4475954286754131, 0.4468922987580299, 0.4548408314585686, 0.4511027485132217, 0.4530330970883369, 0.4483681954443455, 0.4531726539134979, 0.45334542542696, 0.4544384703040123, 0.4530758671462536, 0.4540613554418087, 0.4510113634169101, 0.4538320265710354, 0.4518541917204857, 0.4536847211420536, 0.4532708041369915, 0.4552236869931221, 0.455034039914608, 0.4562875479459762, 0.4532428197562694, 0.4574853852391243, 0.4517738744616508, 0.4579889141023159, 0.4538268558681011, 0.456730306148529, 0.4526018649339676, 0.4562746733427048, 0.4560015797615051, 0.4555426277220249, 0.4561501257121563, 0.4524396173655987, 0.4557023830711841, 0.4589769169688225, 0.4581078588962555, 0.4620813727378845, 0.4586601965129375, 0.4568093195557594, 0.4569808952510357, 0.4567535072565079, 0.4575250148773193, 0.4606908001005649, 0.4603964723646641, 0.4622848592698574, 0.4594669193029403, 0.4640629850327968, 0.4604269936680794, 0.4634841009974479, 0.4644578285515308, 0.4642514958977699, 0.4666304066777229, 0.4616626128554344, 0.4588956907391548, 0.4620226770639419, 0.4628621749579906, 0.4595407098531723, 0.4635516740381717, 0.46005355194211, 0.4601523540914058, 0.4644204638898372, 0.4620639197528362, 0.46614545956254, 0.4636696502566337, 0.4610077403485775, 0.4640897810459137, 0.4636163525283336, 0.4630545899271965, 0.466012816876173, 0.4650349207222461, 0.4613720141351223, 0.4644323363900184, 0.4647249802947044, 0.4656480401754379, 0.4651664271950722, 0.4622530452907085, 0.4655019529163837, 0.4650313258171081, 0.466718140989542, 0.4661559611558914, 0.4661237150430679, 0.4664223715662956, 0.4640601389110088, 0.4642657749354839, 0.4633881188929081, 0.4629989042878151, 0.4685831367969513, 0.4675870984792709, 0.467183344066143, 0.4678030684590339, 0.4660939238965511, 0.4691914953291416, 0.4670972637832165, 0.468262892216444, 0.4672016054391861, 0.4676182121038437, 0.4698677137494087, 0.4658828042447567, 0.4701816700398922, 0.4684622809290886, 0.466015312820673, 0.4675401039421558, 0.4693200923502445, 0.4702670983970165, 0.4679145030677318, 0.4676233418285846, 0.4674933589994907, 0.4678357951343059, 0.4669915996491909, 0.4657857678830623, 0.4666901864111423, 0.4669371582567692, 0.4672787226736545, 0.4684535376727581, 0.4685697965323925, 0.4694835692644119, 0.4683254994451999, 0.4712230190634727, 0.4683987610042095, 0.4707653746008873, 0.4663059376180172, 0.4683133698999882, 0.4686385430395603, 0.4657671600580215, 0.4692615270614624], "label": "FineWeb full MinHash"}}, "layout": {"title": {"text": "Dedup across all dumps does not improve performance"}}}
|
assets/images/IMG_7537D08D7F41-1.jpeg
ADDED
![]() |
Git LFS Details
|
assets/images/IMG_A95961668B3F-1.jpeg
ADDED
![]() |
Git LFS Details
|
assets/images/IMG_C4260C5C58DC-1.jpeg
ADDED
![]() |
Git LFS Details
|
assets/images/IMG_DA188FF29F45-1.jpeg
ADDED
![]() |
Git LFS Details
|
assets/images/image 1.png
ADDED
![]() |
Git LFS Details
|
assets/images/image 10.png
ADDED
![]() |
Git LFS Details
|
assets/images/image 11.png
ADDED
![]() |
Git LFS Details
|
assets/images/image 12.png
ADDED
![]() |
Git LFS Details
|
assets/images/image 13.png
ADDED
![]() |
Git LFS Details
|
assets/images/image 14.png
ADDED
![]() |
Git LFS Details
|
assets/images/image 15.png
ADDED
![]() |
Git LFS Details
|
assets/images/image 16.png
ADDED
![]() |
Git LFS Details
|
assets/images/image 17.png
ADDED
![]() |
Git LFS Details
|
assets/images/image 2.png
ADDED
![]() |
Git LFS Details
|
assets/images/image 3.png
ADDED
![]() |
Git LFS Details
|
assets/images/image 4.png
ADDED
![]() |
Git LFS Details
|
assets/images/image 5.png
ADDED
![]() |
Git LFS Details
|
assets/images/image 6.png
ADDED
![]() |
Git LFS Details
|
assets/images/image 7.png
ADDED
![]() |
Git LFS Details
|
assets/images/image 8.png
ADDED
![]() |
Git LFS Details
|
assets/images/image 9.png
ADDED
![]() |
Git LFS Details
|
assets/images/image.png
ADDED
![]() |
Git LFS Details
|
assets/images/rotation.jpeg
ADDED
![]() |
Git LFS Details
|
assets/images/rotation_speed.jpeg
ADDED
![]() |
Git LFS Details
|
dist/bibliography.bib
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
@article{radford2019language,
|
2 |
-
title={Language Models are Unsupervised Multitask Learners},
|
3 |
-
author={Radford, Alec and Wu, Jeff and Child, Rewon and Luan, David and Amodei, Dario and Sutskever, Ilya},
|
4 |
-
year={2019}
|
5 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
dist/distill.bundle.js
DELETED
The diff for this file is too large to render.
See raw diff
|
|
dist/distill.bundle.js.map
DELETED
The diff for this file is too large to render.
See raw diff
|
|
dist/index.html
DELETED
@@ -1,121 +0,0 @@
|
|
1 |
-
<!DOCTYPE html>
|
2 |
-
<html>
|
3 |
-
<head>
|
4 |
-
<script src="distill.bundle.js" type="module" fetchpriority="high" blocking></script>
|
5 |
-
<script src="main.bundle.js" type="module" fetchpriority="low" defer></script>
|
6 |
-
<meta name="viewport" content="width=device-width, initial-scale=1">
|
7 |
-
<meta charset="utf8">
|
8 |
-
<base target="_blank">
|
9 |
-
<title>FineWeb: decanting the web for the finest text data at scale</title>
|
10 |
-
<link rel="stylesheet" href="style.css">
|
11 |
-
</head>
|
12 |
-
|
13 |
-
<body>
|
14 |
-
<d-front-matter>
|
15 |
-
<script id='distill-front-matter' type="text/json">{
|
16 |
-
"title": "Nanotron Gigablogpost",
|
17 |
-
"description": "This blog covers everything.",
|
18 |
-
"published": "May 28, 2024",
|
19 |
-
"affiliation": {"name": "HuggingFace"},
|
20 |
-
"authors": [
|
21 |
-
{
|
22 |
-
"author":"John Doe",
|
23 |
-
"authorURL":"https://huggingface.co/"
|
24 |
-
},
|
25 |
-
],
|
26 |
-
"katex": {
|
27 |
-
"delimiters": [
|
28 |
-
{"left": "$$", "right": "$$", "display": false}
|
29 |
-
]
|
30 |
-
}
|
31 |
-
}
|
32 |
-
</script>
|
33 |
-
</d-front-matter>
|
34 |
-
<d-title>
|
35 |
-
<h1 class="l-page" style="text-align: center;">Nanotron Gigablogpost</h1>
|
36 |
-
<div id="title-plot" class="main-plot-container l-screen">
|
37 |
-
<figure>
|
38 |
-
<img src="assets/images/banner.png" alt="FineWeb">
|
39 |
-
</figure>
|
40 |
-
<div id="clusters-plot">
|
41 |
-
<img src="assets/images/clusters.png" alt="Clusters">
|
42 |
-
</div>
|
43 |
-
</div>
|
44 |
-
</d-title>
|
45 |
-
<d-byline></d-byline>
|
46 |
-
<d-article>
|
47 |
-
<d-contents>
|
48 |
-
</d-contents>
|
49 |
-
|
50 |
-
<p>The performance of a large language model (LLM) depends heavily on the quality and size of its pretraining framework.</p>
|
51 |
-
</d-article>
|
52 |
-
|
53 |
-
<d-appendix>
|
54 |
-
<d-bibliography src="bibliography.bib"></d-bibliography>
|
55 |
-
</d-appendix>
|
56 |
-
|
57 |
-
<script>
|
58 |
-
const article = document.querySelector('d-article');
|
59 |
-
const toc = document.querySelector('d-contents');
|
60 |
-
if (toc) {
|
61 |
-
const headings = article.querySelectorAll('h2, h3, h4');
|
62 |
-
let ToC = `<nav role="navigation" class="l-text figcaption"><h3>Table of contents</h3>`;
|
63 |
-
let prevLevel = 0;
|
64 |
-
|
65 |
-
for (const el of headings) {
|
66 |
-
// should element be included in TOC?
|
67 |
-
const isInTitle = el.parentElement.tagName == 'D-TITLE';
|
68 |
-
const isException = el.getAttribute('no-toc');
|
69 |
-
if (isInTitle || isException) continue;
|
70 |
-
el.setAttribute('id', el.textContent.toLowerCase().replaceAll(" ", "_"))
|
71 |
-
const link = '<a target="_self" href="' + '#' + el.getAttribute('id') + '">' + el.textContent + '</a>';
|
72 |
-
|
73 |
-
const level = el.tagName === 'H2' ? 0 : (el.tagName === 'H3' ? 1 : 2);
|
74 |
-
while (prevLevel < level) {
|
75 |
-
ToC += '<ul>'
|
76 |
-
prevLevel++;
|
77 |
-
}
|
78 |
-
while (prevLevel > level) {
|
79 |
-
ToC += '</ul>'
|
80 |
-
prevLevel--;
|
81 |
-
}
|
82 |
-
if (level === 0)
|
83 |
-
ToC += '<div>' + link + '</div>';
|
84 |
-
else
|
85 |
-
ToC += '<li>' + link + '</li>';
|
86 |
-
}
|
87 |
-
|
88 |
-
while (prevLevel > 0) {
|
89 |
-
ToC += '</ul>'
|
90 |
-
prevLevel--;
|
91 |
-
}
|
92 |
-
ToC += '</nav>';
|
93 |
-
toc.innerHTML = ToC;
|
94 |
-
toc.setAttribute('prerendered', 'true');
|
95 |
-
const toc_links = document.querySelectorAll('d-contents > nav a');
|
96 |
-
|
97 |
-
window.addEventListener('scroll', (_event) => {
|
98 |
-
if (typeof (headings) != 'undefined' && headings != null && typeof (toc_links) != 'undefined' && toc_links != null) {
|
99 |
-
// Then iterate forwards, on the first match highlight it and break
|
100 |
-
find_active: {
|
101 |
-
for (let i = headings.length - 1; i >= 0; i--) {
|
102 |
-
if (headings[i].getBoundingClientRect().top - 50 <= 0) {
|
103 |
-
if (!toc_links[i].classList.contains("active")) {
|
104 |
-
toc_links.forEach((link, _index) => {
|
105 |
-
link.classList.remove("active");
|
106 |
-
});
|
107 |
-
toc_links[i].classList.add('active');
|
108 |
-
}
|
109 |
-
break find_active;
|
110 |
-
}
|
111 |
-
}
|
112 |
-
toc_links.forEach((link, _index) => {
|
113 |
-
link.classList.remove("active");
|
114 |
-
});
|
115 |
-
}
|
116 |
-
}
|
117 |
-
});
|
118 |
-
}
|
119 |
-
</script>
|
120 |
-
</body>
|
121 |
-
</html>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
dist/main.bundle.js
DELETED
The diff for this file is too large to render.
See raw diff
|
|
dist/main.bundle.js.LICENSE.txt
DELETED
@@ -1,19 +0,0 @@
|
|
1 |
-
/* @license
|
2 |
-
Papa Parse
|
3 |
-
v5.4.1
|
4 |
-
https://github.com/mholt/PapaParse
|
5 |
-
License: MIT
|
6 |
-
*/
|
7 |
-
|
8 |
-
/*! For license information please see plotly-basic.min.js.LICENSE.txt */
|
9 |
-
|
10 |
-
/*! regenerator-runtime -- Copyright (c) 2014-present, Facebook, Inc. -- license (MIT): https://github.com/facebook/regenerator/blob/main/LICENSE */
|
11 |
-
|
12 |
-
/**
|
13 |
-
* @license
|
14 |
-
* Lodash <https://lodash.com/>
|
15 |
-
* Copyright OpenJS Foundation and other contributors <https://openjsf.org/>
|
16 |
-
* Released under MIT license <https://lodash.com/license>
|
17 |
-
* Based on Underscore.js 1.8.3 <http://underscorejs.org/LICENSE>
|
18 |
-
* Copyright Jeremy Ashkenas, DocumentCloud and Investigative Reporters & Editors
|
19 |
-
*/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
dist/main.bundle.js.map
DELETED
The diff for this file is too large to render.
See raw diff
|
|
package-lock.json
CHANGED
@@ -9,6 +9,7 @@
|
|
9 |
"version": "1.0.0",
|
10 |
"license": "ISC",
|
11 |
"dependencies": {
|
|
|
12 |
"lodash": "^4.17.21",
|
13 |
"papaparse": "^5.4.1",
|
14 |
"plotly.js-basic-dist-min": "^2.33.0"
|
@@ -5294,6 +5295,29 @@
|
|
5294 |
"node": ">=6"
|
5295 |
}
|
5296 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5297 |
"node_modules/kind-of": {
|
5298 |
"version": "6.0.3",
|
5299 |
"resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz",
|
|
|
9 |
"version": "1.0.0",
|
10 |
"license": "ISC",
|
11 |
"dependencies": {
|
12 |
+
"katex": "^0.16.11",
|
13 |
"lodash": "^4.17.21",
|
14 |
"papaparse": "^5.4.1",
|
15 |
"plotly.js-basic-dist-min": "^2.33.0"
|
|
|
5295 |
"node": ">=6"
|
5296 |
}
|
5297 |
},
|
5298 |
+
"node_modules/katex": {
|
5299 |
+
"version": "0.16.11",
|
5300 |
+
"resolved": "https://registry.npmjs.org/katex/-/katex-0.16.11.tgz",
|
5301 |
+
"integrity": "sha512-RQrI8rlHY92OLf3rho/Ts8i/XvjgguEjOkO1BEXcU3N8BqPpSzBNwV/G0Ukr+P/l3ivvJUE/Fa/CwbS6HesGNQ==",
|
5302 |
+
"funding": [
|
5303 |
+
"https://opencollective.com/katex",
|
5304 |
+
"https://github.com/sponsors/katex"
|
5305 |
+
],
|
5306 |
+
"dependencies": {
|
5307 |
+
"commander": "^8.3.0"
|
5308 |
+
},
|
5309 |
+
"bin": {
|
5310 |
+
"katex": "cli.js"
|
5311 |
+
}
|
5312 |
+
},
|
5313 |
+
"node_modules/katex/node_modules/commander": {
|
5314 |
+
"version": "8.3.0",
|
5315 |
+
"resolved": "https://registry.npmjs.org/commander/-/commander-8.3.0.tgz",
|
5316 |
+
"integrity": "sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==",
|
5317 |
+
"engines": {
|
5318 |
+
"node": ">= 12"
|
5319 |
+
}
|
5320 |
+
},
|
5321 |
"node_modules/kind-of": {
|
5322 |
"version": "6.0.3",
|
5323 |
"resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz",
|
package.json
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
{
|
2 |
"dependencies": {
|
|
|
3 |
"lodash": "^4.17.21",
|
4 |
"papaparse": "^5.4.1",
|
5 |
"plotly.js-basic-dist-min": "^2.33.0"
|
|
|
1 |
{
|
2 |
"dependencies": {
|
3 |
+
"katex": "^0.16.11",
|
4 |
"lodash": "^4.17.21",
|
5 |
"papaparse": "^5.4.1",
|
6 |
"plotly.js-basic-dist-min": "^2.33.0"
|
python/memory/__init__.py
ADDED
File without changes
|
python/memory/explorations.ipynb
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": 3,
|
6 |
+
"metadata": {},
|
7 |
+
"outputs": [],
|
8 |
+
"source": [
|
9 |
+
"from utils import activation_memory, param_grads_opt"
|
10 |
+
]
|
11 |
+
},
|
12 |
+
{
|
13 |
+
"cell_type": "code",
|
14 |
+
"execution_count": null,
|
15 |
+
"metadata": {},
|
16 |
+
"outputs": [],
|
17 |
+
"source": [
|
18 |
+
"tpl = (\n",
|
19 |
+
" a, # attention heads\n",
|
20 |
+
" b, # micro batch size\n",
|
21 |
+
" h, # hidden dimension size\n",
|
22 |
+
" L, # number of layers\n",
|
23 |
+
" s, # sequence length\n",
|
24 |
+
" mixed=True,\n",
|
25 |
+
" recomputation=None\n",
|
26 |
+
")"
|
27 |
+
]
|
28 |
+
}
|
29 |
+
],
|
30 |
+
"metadata": {
|
31 |
+
"kernelspec": {
|
32 |
+
"display_name": "jupyter",
|
33 |
+
"language": "python",
|
34 |
+
"name": "python3"
|
35 |
+
},
|
36 |
+
"language_info": {
|
37 |
+
"codemirror_mode": {
|
38 |
+
"name": "ipython",
|
39 |
+
"version": 3
|
40 |
+
},
|
41 |
+
"file_extension": ".py",
|
42 |
+
"mimetype": "text/x-python",
|
43 |
+
"name": "python",
|
44 |
+
"nbconvert_exporter": "python",
|
45 |
+
"pygments_lexer": "ipython3",
|
46 |
+
"version": "3.10.14"
|
47 |
+
}
|
48 |
+
},
|
49 |
+
"nbformat": 4,
|
50 |
+
"nbformat_minor": 2
|
51 |
+
}
|
python/memory/utils.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
def activation_memory(
|
2 |
+
a, # attention heads
|
3 |
+
b, # micro batch size
|
4 |
+
h, # hidden dimension size
|
5 |
+
L, # number of layers
|
6 |
+
s, # sequence length
|
7 |
+
mixed=True,
|
8 |
+
recomputation=None
|
9 |
+
):
|
10 |
+
|
11 |
+
# https://arxiv.org/pdf/2205.05198
|
12 |
+
if recomputation is None:
|
13 |
+
one_layer = s * b * h * (34 + (5 * a * s / h)) # eq (2)
|
14 |
+
elif recomputation =="selective":
|
15 |
+
one_layer = s * b * h * 34 # eq (6)
|
16 |
+
elif recomputation =="full":
|
17 |
+
one_layer = s * b * h * 2
|
18 |
+
else:
|
19 |
+
raise ValueError()
|
20 |
+
|
21 |
+
input_dropout = s * b * h # section 4.3
|
22 |
+
|
23 |
+
if mixed:
|
24 |
+
bytes_per_value = 2
|
25 |
+
else:
|
26 |
+
bytes_per_value = 4
|
27 |
+
|
28 |
+
return bytes_per_value * L * one_layer + input_dropout
|
29 |
+
|
30 |
+
|
31 |
+
def param_grads_opt(
|
32 |
+
h, # hidden dimension size
|
33 |
+
L, # number of layers
|
34 |
+
s, # sequence length
|
35 |
+
v, # vocab size
|
36 |
+
k=8, # parameters for optimizer (Adam: 8 = 4 bytes moments + 4 bytes variance)
|
37 |
+
mixed=True # mixed precision training
|
38 |
+
):
|
39 |
+
|
40 |
+
# https://michaelwornow.net/2024/01/18/counting-params-in-transformer
|
41 |
+
# note: this is without GQA or MQA
|
42 |
+
|
43 |
+
emb = h*(v+s)
|
44 |
+
one_layer = 12 * h**2 + 13*h
|
45 |
+
other = 2*h
|
46 |
+
|
47 |
+
n = emb + L * one_layer + other
|
48 |
+
|
49 |
+
# 3.1 https://arxiv.org/pdf/1910.02054
|
50 |
+
|
51 |
+
if mixed:
|
52 |
+
k += 4 # additional full precision weights
|
53 |
+
bytes_per_paramter = 2
|
54 |
+
else:
|
55 |
+
bytes_per_paramter = 4
|
56 |
+
|
57 |
+
return bytes_per_paramter*n, bytes_per_paramter*n, k*n
|
src/bibliography.bib
ADDED
@@ -0,0 +1,334 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
@article{radford2019language,
|
2 |
+
title={Language Models are Unsupervised Multitask Learners},
|
3 |
+
author={Radford, Alec and Wu, Jeff and Child, Rewon and Luan, David and Amodei, Dario and Sutskever, Ilya},
|
4 |
+
year={2019}
|
5 |
+
}
|
6 |
+
@inproceedings{barbaresi-2021-trafilatura,
|
7 |
+
title = {Trafilatura: A Web Scraping Library and Command-Line Tool for Text Discovery and Extraction},
|
8 |
+
author = "Barbaresi, Adrien",
|
9 |
+
booktitle = "Proceedings of the Joint Conference of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing: System Demonstrations",
|
10 |
+
pages = "122--131",
|
11 |
+
publisher = "Association for Computational Linguistics",
|
12 |
+
url = "https://aclanthology.org/2021.acl-demo.15",
|
13 |
+
year = 2021,
|
14 |
+
}
|
15 |
+
@misc{penedo2023refinedweb,
|
16 |
+
title={The RefinedWeb Dataset for Falcon LLM: Outperforming Curated Corpora with Web Data, and Web Data Only},
|
17 |
+
author={Guilherme Penedo and Quentin Malartic and Daniel Hesslow and Ruxandra Cojocaru and Alessandro Cappelli and Hamza Alobeidli and Baptiste Pannier and Ebtesam Almazrouei and Julien Launay},
|
18 |
+
year={2023},
|
19 |
+
eprint={2306.01116},
|
20 |
+
archivePrefix={arXiv},
|
21 |
+
primaryClass={cs.CL}
|
22 |
+
}
|
23 |
+
@article{joulin2016fasttext,
|
24 |
+
title={FastText.zip: Compressing text classification models},
|
25 |
+
author={Joulin, Armand and Grave, Edouard and Bojanowski, Piotr and Douze, Matthijs and J{\'e}gou, H{\'e}rve and Mikolov, Tomas},
|
26 |
+
journal={arXiv preprint arXiv:1612.03651},
|
27 |
+
year={2016}
|
28 |
+
}
|
29 |
+
@article{joulin2016bag,
|
30 |
+
title={Bag of Tricks for Efficient Text Classification},
|
31 |
+
author={Joulin, Armand and Grave, Edouard and Bojanowski, Piotr and Mikolov, Tomas},
|
32 |
+
journal={arXiv preprint arXiv:1607.01759},
|
33 |
+
year={2016}
|
34 |
+
}
|
35 |
+
@misc{penedo2024datatrove,
|
36 |
+
author = {Penedo, Guilherme and Kydlíček, Hynek and Cappelli, Alessandro and Sasko, Mario and Wolf, Thomas},
|
37 |
+
title = {DataTrove: large scale data processing},
|
38 |
+
year = {2024},
|
39 |
+
publisher = {GitHub},
|
40 |
+
journal = {GitHub repository},
|
41 |
+
url = {https://github.com/huggingface/datatrove}
|
42 |
+
}
|
43 |
+
@misc{chiang2024chatbot,
|
44 |
+
title={Chatbot Arena: An Open Platform for Evaluating LLMs by Human Preference},
|
45 |
+
author={Wei-Lin Chiang and Lianmin Zheng and Ying Sheng and Anastasios Nikolas Angelopoulos and Tianle Li and Dacheng Li and Hao Zhang and Banghua Zhu and Michael Jordan and Joseph E. Gonzalez and Ion Stoica},
|
46 |
+
year={2024},
|
47 |
+
eprint={2403.04132},
|
48 |
+
archivePrefix={arXiv},
|
49 |
+
primaryClass={cs.AI}
|
50 |
+
}
|
51 |
+
@misc{rae2022scaling,
|
52 |
+
title={Scaling Language Models: Methods, Analysis & Insights from Training Gopher},
|
53 |
+
author={Jack W. Rae and Sebastian Borgeaud and Trevor Cai and Katie Millican and Jordan Hoffmann and Francis Song and John Aslanides and Sarah Henderson and Roman Ring and Susannah Young and Eliza Rutherford and Tom Hennigan and Jacob Menick and Albin Cassirer and Richard Powell and George van den Driessche and Lisa Anne Hendricks and Maribeth Rauh and Po-Sen Huang and Amelia Glaese and Johannes Welbl and Sumanth Dathathri and Saffron Huang and Jonathan Uesato and John Mellor and Irina Higgins and Antonia Creswell and Nat McAleese and Amy Wu and Erich Elsen and Siddhant Jayakumar and Elena Buchatskaya and David Budden and Esme Sutherland and Karen Simonyan and Michela Paganini and Laurent Sifre and Lena Martens and Xiang Lorraine Li and Adhiguna Kuncoro and Aida Nematzadeh and Elena Gribovskaya and Domenic Donato and Angeliki Lazaridou and Arthur Mensch and Jean-Baptiste Lespiau and Maria Tsimpoukelli and Nikolai Grigorev and Doug Fritz and Thibault Sottiaux and Mantas Pajarskas and Toby Pohlen and Zhitao Gong and Daniel Toyama and Cyprien de Masson d'Autume and Yujia Li and Tayfun Terzi and Vladimir Mikulik and Igor Babuschkin and Aidan Clark and Diego de Las Casas and Aurelia Guy and Chris Jones and James Bradbury and Matthew Johnson and Blake Hechtman and Laura Weidinger and Iason Gabriel and William Isaac and Ed Lockhart and Simon Osindero and Laura Rimell and Chris Dyer and Oriol Vinyals and Kareem Ayoub and Jeff Stanway and Lorrayne Bennett and Demis Hassabis and Koray Kavukcuoglu and Geoffrey Irving},
|
54 |
+
year={2022},
|
55 |
+
eprint={2112.11446},
|
56 |
+
archivePrefix={arXiv},
|
57 |
+
primaryClass={cs.CL}
|
58 |
+
}
|
59 |
+
@misc{lee2022deduplicating,
|
60 |
+
title={Deduplicating Training Data Makes Language Models Better},
|
61 |
+
author={Katherine Lee and Daphne Ippolito and Andrew Nystrom and Chiyuan Zhang and Douglas Eck and Chris Callison-Burch and Nicholas Carlini},
|
62 |
+
year={2022},
|
63 |
+
eprint={2107.06499},
|
64 |
+
archivePrefix={arXiv},
|
65 |
+
primaryClass={cs.CL}
|
66 |
+
}
|
67 |
+
@misc{carlini2023quantifying,
|
68 |
+
title={Quantifying Memorization Across Neural Language Models},
|
69 |
+
author={Nicholas Carlini and Daphne Ippolito and Matthew Jagielski and Katherine Lee and Florian Tramer and Chiyuan Zhang},
|
70 |
+
year={2023},
|
71 |
+
eprint={2202.07646},
|
72 |
+
archivePrefix={arXiv},
|
73 |
+
primaryClass={cs.LG}
|
74 |
+
}
|
75 |
+
@misc{raffel2023exploring,
|
76 |
+
title={Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer},
|
77 |
+
author={Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu},
|
78 |
+
year={2023},
|
79 |
+
eprint={1910.10683},
|
80 |
+
archivePrefix={arXiv},
|
81 |
+
primaryClass={cs.LG}
|
82 |
+
}
|
83 |
+
@misc{touvron2023llama,
|
84 |
+
title={LLaMA: Open and Efficient Foundation Language Models},
|
85 |
+
author={Hugo Touvron and Thibaut Lavril and Gautier Izacard and Xavier Martinet and Marie-Anne Lachaux and Timothée Lacroix and Baptiste Rozière and Naman Goyal and Eric Hambro and Faisal Azhar and Aurelien Rodriguez and Armand Joulin and Edouard Grave and Guillaume Lample},
|
86 |
+
year={2023},
|
87 |
+
eprint={2302.13971},
|
88 |
+
archivePrefix={arXiv},
|
89 |
+
primaryClass={cs.CL}
|
90 |
+
}
|
91 |
+
@article{dolma,
|
92 |
+
title = {Dolma: an Open Corpus of Three Trillion Tokens for Language Model Pretraining Research},
|
93 |
+
author={
|
94 |
+
Luca Soldaini and Rodney Kinney and Akshita Bhagia and Dustin Schwenk and David Atkinson and
|
95 |
+
Russell Authur and Ben Bogin and Khyathi Chandu and Jennifer Dumas and Yanai Elazar and
|
96 |
+
Valentin Hofmann and Ananya Harsh Jha and Sachin Kumar and Li Lucy and Xinxi Lyu and
|
97 |
+
Nathan Lambert and Ian Magnusson and Jacob Morrison and Niklas Muennighoff and Aakanksha Naik and
|
98 |
+
Crystal Nam and Matthew E. Peters and Abhilasha Ravichander and Kyle Richardson and Zejiang Shen and
|
99 |
+
Emma Strubell and Nishant Subramani and Oyvind Tafjord and Pete Walsh and Luke Zettlemoyer and
|
100 |
+
Noah A. Smith and Hannaneh Hajishirzi and Iz Beltagy and Dirk Groeneveld and Jesse Dodge and Kyle Lo
|
101 |
+
},
|
102 |
+
year = {2024},
|
103 |
+
journal={arXiv preprint},
|
104 |
+
}
|
105 |
+
@article{gao2020pile,
|
106 |
+
title={The {P}ile: An 800{GB} dataset of diverse text for language modeling},
|
107 |
+
author={Gao, Leo and Biderman, Stella and Black, Sid and Golding, Laurence and Hoppe, Travis and Foster, Charles and Phang, Jason and He, Horace and Thite, Anish and Nabeshima, Noa and others},
|
108 |
+
journal={arXiv preprint arXiv:2101.00027},
|
109 |
+
year={2020}
|
110 |
+
}
|
111 |
+
@misc{cerebras2023slimpajama,
|
112 |
+
author = {Soboleva, Daria and Al-Khateeb, Faisal and Myers, Robert and Steeves, Jacob R and Hestness, Joel and Dey, Nolan},
|
113 |
+
title = {SlimPajama: A 627B token cleaned and deduplicated version of RedPajama},
|
114 |
+
month = {June},
|
115 |
+
year = 2023,
|
116 |
+
url = {https://huggingface.co/datasets/cerebras/SlimPajama-627B},
|
117 |
+
}
|
118 |
+
@software{together2023redpajama,
|
119 |
+
author = {Together Computer},
|
120 |
+
title = {RedPajama: an Open Dataset for Training Large Language Models},
|
121 |
+
month = {October},
|
122 |
+
year = 2023,
|
123 |
+
url = {https://github.com/togethercomputer/RedPajama-Data}
|
124 |
+
}
|
125 |
+
@article{jaccard1912distribution,
|
126 |
+
title={The distribution of the flora in the alpine zone. 1},
|
127 |
+
author={Jaccard, Paul},
|
128 |
+
journal={New phytologist},
|
129 |
+
volume={11},
|
130 |
+
number={2},
|
131 |
+
pages={37--50},
|
132 |
+
year={1912},
|
133 |
+
publisher={Wiley Online Library}
|
134 |
+
}
|
135 |
+
@misc{albalak2024survey,
|
136 |
+
title={A Survey on Data Selection for Language Models},
|
137 |
+
author={Alon Albalak and Yanai Elazar and Sang Michael Xie and Shayne Longpre and Nathan Lambert and Xinyi Wang and Niklas Muennighoff and Bairu Hou and Liangming Pan and Haewon Jeong and Colin Raffel and Shiyu Chang and Tatsunori Hashimoto and William Yang Wang},
|
138 |
+
year={2024},
|
139 |
+
eprint={2402.16827},
|
140 |
+
archivePrefix={arXiv},
|
141 |
+
primaryClass={cs.CL}
|
142 |
+
}
|
143 |
+
@misc{longpre2023pretrainers,
|
144 |
+
title={A Pretrainer's Guide to Training Data: Measuring the Effects of Data Age, Domain Coverage, Quality, & Toxicity},
|
145 |
+
author={Shayne Longpre and Gregory Yauney and Emily Reif and Katherine Lee and Adam Roberts and Barret Zoph and Denny Zhou and Jason Wei and Kevin Robinson and David Mimno and Daphne Ippolito},
|
146 |
+
year={2023},
|
147 |
+
eprint={2305.13169},
|
148 |
+
archivePrefix={arXiv},
|
149 |
+
primaryClass={cs.CL}
|
150 |
+
}
|
151 |
+
@misc{wenzek2019ccnet,
|
152 |
+
title={CCNet: Extracting High Quality Monolingual Datasets from Web Crawl Data},
|
153 |
+
author={Guillaume Wenzek and Marie-Anne Lachaux and Alexis Conneau and Vishrav Chaudhary and Francisco Guzmán and Armand Joulin and Edouard Grave},
|
154 |
+
year={2019},
|
155 |
+
eprint={1911.00359},
|
156 |
+
archivePrefix={arXiv},
|
157 |
+
primaryClass={cs.CL}
|
158 |
+
}
|
159 |
+
@misc{soldaini2024dolma,
|
160 |
+
title={Dolma: an Open Corpus of Three Trillion Tokens for Language Model Pretraining Research},
|
161 |
+
author={Luca Soldaini and Rodney Kinney and Akshita Bhagia and Dustin Schwenk and David Atkinson and Russell Authur and Ben Bogin and Khyathi Chandu and Jennifer Dumas and Yanai Elazar and Valentin Hofmann and Ananya Harsh Jha and Sachin Kumar and Li Lucy and Xinxi Lyu and Nathan Lambert and Ian Magnusson and Jacob Morrison and Niklas Muennighoff and Aakanksha Naik and Crystal Nam and Matthew E. Peters and Abhilasha Ravichander and Kyle Richardson and Zejiang Shen and Emma Strubell and Nishant Subramani and Oyvind Tafjord and Pete Walsh and Luke Zettlemoyer and Noah A. Smith and Hannaneh Hajishirzi and Iz Beltagy and Dirk Groeneveld and Jesse Dodge and Kyle Lo},
|
162 |
+
year={2024},
|
163 |
+
eprint={2402.00159},
|
164 |
+
archivePrefix={arXiv},
|
165 |
+
primaryClass={cs.CL}
|
166 |
+
}
|
167 |
+
@misc{ouyang2022training,
|
168 |
+
title={Training language models to follow instructions with human feedback},
|
169 |
+
author={Long Ouyang and Jeff Wu and Xu Jiang and Diogo Almeida and Carroll L. Wainwright and Pamela Mishkin and Chong Zhang and Sandhini Agarwal and Katarina Slama and Alex Ray and John Schulman and Jacob Hilton and Fraser Kelton and Luke Miller and Maddie Simens and Amanda Askell and Peter Welinder and Paul Christiano and Jan Leike and Ryan Lowe},
|
170 |
+
year={2022},
|
171 |
+
eprint={2203.02155},
|
172 |
+
archivePrefix={arXiv},
|
173 |
+
primaryClass={cs.CL}
|
174 |
+
}
|
175 |
+
@misc{hoffmann2022training,
|
176 |
+
title={Training Compute-Optimal Large Language Models},
|
177 |
+
author={Jordan Hoffmann and Sebastian Borgeaud and Arthur Mensch and Elena Buchatskaya and Trevor Cai and Eliza Rutherford and Diego de Las Casas and Lisa Anne Hendricks and Johannes Welbl and Aidan Clark and Tom Hennigan and Eric Noland and Katie Millican and George van den Driessche and Bogdan Damoc and Aurelia Guy and Simon Osindero and Karen Simonyan and Erich Elsen and Jack W. Rae and Oriol Vinyals and Laurent Sifre},
|
178 |
+
year={2022},
|
179 |
+
eprint={2203.15556},
|
180 |
+
archivePrefix={arXiv},
|
181 |
+
primaryClass={cs.CL}
|
182 |
+
}
|
183 |
+
@misc{muennighoff2023scaling,
|
184 |
+
title={Scaling Data-Constrained Language Models},
|
185 |
+
author={Niklas Muennighoff and Alexander M. Rush and Boaz Barak and Teven Le Scao and Aleksandra Piktus and Nouamane Tazi and Sampo Pyysalo and Thomas Wolf and Colin Raffel},
|
186 |
+
year={2023},
|
187 |
+
eprint={2305.16264},
|
188 |
+
archivePrefix={arXiv},
|
189 |
+
primaryClass={cs.CL}
|
190 |
+
}
|
191 |
+
@misc{hernandez2022scaling,
|
192 |
+
title={Scaling Laws and Interpretability of Learning from Repeated Data},
|
193 |
+
author={Danny Hernandez and Tom Brown and Tom Conerly and Nova DasSarma and Dawn Drain and Sheer El-Showk and Nelson Elhage and Zac Hatfield-Dodds and Tom Henighan and Tristan Hume and Scott Johnston and Ben Mann and Chris Olah and Catherine Olsson and Dario Amodei and Nicholas Joseph and Jared Kaplan and Sam McCandlish},
|
194 |
+
year={2022},
|
195 |
+
eprint={2205.10487},
|
196 |
+
archivePrefix={arXiv},
|
197 |
+
primaryClass={cs.LG}
|
198 |
+
}
|
199 |
+
@article{llama3modelcard,
|
200 |
+
|
201 |
+
title={Llama 3 Model Card},
|
202 |
+
|
203 |
+
author={AI@Meta},
|
204 |
+
|
205 |
+
year={2024},
|
206 |
+
|
207 |
+
url = {https://github.com/meta-llama/llama3/blob/main/MODEL_CARD.md}
|
208 |
+
|
209 |
+
}
|
210 |
+
@misc{jiang2024mixtral,
|
211 |
+
title={Mixtral of Experts},
|
212 |
+
author={Albert Q. Jiang and Alexandre Sablayrolles and Antoine Roux and Arthur Mensch and Blanche Savary and Chris Bamford and Devendra Singh Chaplot and Diego de las Casas and Emma Bou Hanna and Florian Bressand and Gianna Lengyel and Guillaume Bour and Guillaume Lample and Lélio Renard Lavaud and Lucile Saulnier and Marie-Anne Lachaux and Pierre Stock and Sandeep Subramanian and Sophia Yang and Szymon Antoniak and Teven Le Scao and Théophile Gervet and Thibaut Lavril and Thomas Wang and Timothée Lacroix and William El Sayed},
|
213 |
+
year={2024},
|
214 |
+
eprint={2401.04088},
|
215 |
+
archivePrefix={arXiv},
|
216 |
+
primaryClass={cs.LG}
|
217 |
+
}
|
218 |
+
@article{yuan2024self,
|
219 |
+
title={Self-rewarding language models},
|
220 |
+
author={Yuan, Weizhe and Pang, Richard Yuanzhe and Cho, Kyunghyun and Sukhbaatar, Sainbayar and Xu, Jing and Weston, Jason},
|
221 |
+
journal={arXiv preprint arXiv:2401.10020},
|
222 |
+
year={2024}
|
223 |
+
}
|
224 |
+
@article{verga2024replacing,
|
225 |
+
title={Replacing Judges with Juries: Evaluating LLM Generations with a Panel of Diverse Models},
|
226 |
+
author={Verga, Pat and Hofstatter, Sebastian and Althammer, Sophia and Su, Yixuan and Piktus, Aleksandra and Arkhangorodsky, Arkady and Xu, Minjie and White, Naomi and Lewis, Patrick},
|
227 |
+
journal={arXiv preprint arXiv:2404.18796},
|
228 |
+
year={2024}
|
229 |
+
}
|
230 |
+
@article{abdin2024phi,
|
231 |
+
title={Phi-3 technical report: A highly capable language model locally on your phone},
|
232 |
+
author={Abdin, Marah and Jacobs, Sam Ade and Awan, Ammar Ahmad and Aneja, Jyoti and Awadallah, Ahmed and Awadalla, Hany and Bach, Nguyen and Bahree, Amit and Bakhtiari, Arash and Behl, Harkirat and others},
|
233 |
+
journal={arXiv preprint arXiv:2404.14219},
|
234 |
+
year={2024}
|
235 |
+
}
|
236 |
+
@misc{meta2024responsible,
|
237 |
+
title = {Our responsible approach to Meta AI and Meta Llama 3},
|
238 |
+
author = {Meta},
|
239 |
+
year = {2024},
|
240 |
+
url = {https://ai.meta.com/blog/meta-llama-3-meta-ai-responsibility/},
|
241 |
+
note = {Accessed: 2024-05-31}
|
242 |
+
}
|
243 |
+
@inproceedings{talmor-etal-2019-commonsenseqa,
|
244 |
+
title = "CommonsenseQA: A Question Answering Challenge Targeting Commonsense Knowledge",
|
245 |
+
author = "Talmor, Alon and
|
246 |
+
Herzig, Jonathan and
|
247 |
+
Lourie, Nicholas and
|
248 |
+
Berant, Jonathan",
|
249 |
+
booktitle = "Proceedings of the 2019 Conference of the North {A}merican Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers)",
|
250 |
+
month = jun,
|
251 |
+
year = "2019",
|
252 |
+
address = "Minneapolis, Minnesota",
|
253 |
+
publisher = "Association for Computational Linguistics",
|
254 |
+
url = "https://aclanthology.org/N19-1421",
|
255 |
+
doi = "10.18653/v1/N19-1421",
|
256 |
+
pages = "4149--4158",
|
257 |
+
archivePrefix = "arXiv",
|
258 |
+
eprint = "1811.00937",
|
259 |
+
primaryClass = "cs",
|
260 |
+
}
|
261 |
+
@inproceedings{zellers-etal-2019-hellaswag,
|
262 |
+
title = "HellaSwag: Can a Machine Really Finish Your Sentence?",
|
263 |
+
author = "Zellers, Rowan and
|
264 |
+
Holtzman, Ari and
|
265 |
+
Bisk, Yonatan and
|
266 |
+
Farhadi, Ali and
|
267 |
+
Choi, Yejin",
|
268 |
+
editor = "Korhonen, Anna and
|
269 |
+
Traum, David and
|
270 |
+
M{\`a}rquez, Llu{\'\i}s",
|
271 |
+
booktitle = "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics",
|
272 |
+
month = jul,
|
273 |
+
year = "2019",
|
274 |
+
address = "Florence, Italy",
|
275 |
+
publisher = "Association for Computational Linguistics",
|
276 |
+
url = "https://aclanthology.org/P19-1472",
|
277 |
+
doi = "10.18653/v1/P19-1472",
|
278 |
+
pages = "4791--4800",
|
279 |
+
abstract = "Recent work by Zellers et al. (2018) introduced a new task of commonsense natural language inference: given an event description such as {``}A woman sits at a piano,{''} a machine must select the most likely followup: {``}She sets her fingers on the keys.{''} With the introduction of BERT, near human-level performance was reached. Does this mean that machines can perform human level commonsense inference? In this paper, we show that commonsense inference still proves difficult for even state-of-the-art models, by presenting HellaSwag, a new challenge dataset. Though its questions are trivial for humans ({\textgreater}95{\%} accuracy), state-of-the-art models struggle ({\textless}48{\%}). We achieve this via Adversarial Filtering (AF), a data collection paradigm wherein a series of discriminators iteratively select an adversarial set of machine-generated wrong answers. AF proves to be surprisingly robust. The key insight is to scale up the length and complexity of the dataset examples towards a critical {`}Goldilocks{'} zone wherein generated text is ridiculous to humans, yet often misclassified by state-of-the-art models. Our construction of HellaSwag, and its resulting difficulty, sheds light on the inner workings of deep pretrained models. More broadly, it suggests a new path forward for NLP research, in which benchmarks co-evolve with the evolving state-of-the-art in an adversarial way, so as to present ever-harder challenges.",
|
280 |
+
}
|
281 |
+
@inproceedings{OpenBookQA2018,
|
282 |
+
title={Can a Suit of Armor Conduct Electricity? A New Dataset for Open Book Question Answering},
|
283 |
+
author={Todor Mihaylov and Peter Clark and Tushar Khot and Ashish Sabharwal},
|
284 |
+
booktitle={EMNLP},
|
285 |
+
year={2018}
|
286 |
+
}
|
287 |
+
@misc{bisk2019piqa,
|
288 |
+
title={PIQA: Reasoning about Physical Commonsense in Natural Language},
|
289 |
+
author={Yonatan Bisk and Rowan Zellers and Ronan Le Bras and Jianfeng Gao and Yejin Choi},
|
290 |
+
year={2019},
|
291 |
+
eprint={1911.11641},
|
292 |
+
archivePrefix={arXiv},
|
293 |
+
primaryClass={cs.CL}
|
294 |
+
}
|
295 |
+
@misc{sap2019socialiqa,
|
296 |
+
title={SocialIQA: Commonsense Reasoning about Social Interactions},
|
297 |
+
author={Maarten Sap and Hannah Rashkin and Derek Chen and Ronan LeBras and Yejin Choi},
|
298 |
+
year={2019},
|
299 |
+
eprint={1904.09728},
|
300 |
+
archivePrefix={arXiv},
|
301 |
+
primaryClass={cs.CL}
|
302 |
+
}
|
303 |
+
@misc{sakaguchi2019winogrande,
|
304 |
+
title={WinoGrande: An Adversarial Winograd Schema Challenge at Scale},
|
305 |
+
author={Keisuke Sakaguchi and Ronan Le Bras and Chandra Bhagavatula and Yejin Choi},
|
306 |
+
year={2019},
|
307 |
+
eprint={1907.10641},
|
308 |
+
archivePrefix={arXiv},
|
309 |
+
primaryClass={cs.CL}
|
310 |
+
}
|
311 |
+
@misc{clark2018think,
|
312 |
+
title={Think you have Solved Question Answering? Try ARC, the AI2 Reasoning Challenge},
|
313 |
+
author={Peter Clark and Isaac Cowhey and Oren Etzioni and Tushar Khot and Ashish Sabharwal and Carissa Schoenick and Oyvind Tafjord},
|
314 |
+
year={2018},
|
315 |
+
eprint={1803.05457},
|
316 |
+
archivePrefix={arXiv},
|
317 |
+
primaryClass={cs.AI}
|
318 |
+
}
|
319 |
+
@misc{hendrycks2021measuring,
|
320 |
+
title={Measuring Massive Multitask Language Understanding},
|
321 |
+
author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},
|
322 |
+
year={2021},
|
323 |
+
eprint={2009.03300},
|
324 |
+
archivePrefix={arXiv},
|
325 |
+
primaryClass={cs.CY}
|
326 |
+
}
|
327 |
+
@misc{mitchell2023measuring,
|
328 |
+
title={Measuring Data},
|
329 |
+
author={Margaret Mitchell and Alexandra Sasha Luccioni and Nathan Lambert and Marissa Gerchick and Angelina McMillan-Major and Ezinwanne Ozoani and Nazneen Rajani and Tristan Thrush and Yacine Jernite and Douwe Kiela},
|
330 |
+
year={2023},
|
331 |
+
eprint={2212.05129},
|
332 |
+
archivePrefix={arXiv},
|
333 |
+
primaryClass={cs.AI}
|
334 |
+
}
|
src/colors.mjs
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// export const COLORS = [
|
2 |
+
// [255, 135, 65], // Bright Orange
|
3 |
+
// [96, 168, 176], // Ocean Blue
|
4 |
+
// [23, 17, 26], // Charcoal Black
|
5 |
+
// [141, 193, 86], // Fresh Green
|
6 |
+
// [208, 65, 83], // Soft Red
|
7 |
+
// [73, 85, 136], // Deep Blue
|
8 |
+
// [45, 68, 67], // Dark Forest Green
|
9 |
+
// [255, 206, 149], // Peach
|
10 |
+
// [241, 139, 105], // Salmon Pink
|
11 |
+
// [102, 45, 57], // Burgundy
|
12 |
+
// [174, 91, 57], // Brown
|
13 |
+
// [232, 158, 86], // Sand
|
14 |
+
// [255, 220, 106], // Sun Yellow
|
15 |
+
// [213, 246, 110], // Light Lime
|
16 |
+
// [74, 122, 71], // Moss Green
|
17 |
+
// [110, 81, 106], // Muted Purple
|
18 |
+
// [170, 140, 148], // Dusty Rose
|
19 |
+
// [223, 203, 191], // Soft Beige
|
20 |
+
// [255, 255, 255], // Pure White
|
21 |
+
// [255, 153, 169], // Light Pink
|
22 |
+
// [199, 102, 178], // Vivid Purple
|
23 |
+
// [131, 58, 149], // Deep Purple
|
24 |
+
// [59, 44, 74], // Dark Slate
|
25 |
+
// [154, 231, 201], // Aqua Green
|
26 |
+
// ]
|
27 |
+
|
28 |
+
export const COLORS = [
|
29 |
+
["235", "102", "59"], // Burnt Orange
|
30 |
+
["46", "145", "229"], // Sky Blue
|
31 |
+
["225", "95", "153"], // Soft Magenta
|
32 |
+
["28", "167", "28"], // Bright Green
|
33 |
+
// ["108", "69", "22"], // Dark Brown
|
34 |
+
["167", "119", "241"], // Lavender
|
35 |
+
["182", "129", "0"], // Mustard Yellow
|
36 |
+
["134", "42", "22"], // Brick Red
|
37 |
+
["0", "160", "139"], // Teal
|
38 |
+
["175", "0", "56"], // Crimson
|
39 |
+
["108", "124", "50"], // Olive Green
|
40 |
+
["81", "28", "251"], // Royal Blue
|
41 |
+
["218", "22", "255"], // Electric Purple
|
42 |
+
["98", "0", "66"], // Dark Magenta
|
43 |
+
// ['34', '42', '42'], Black, which makes the text unreadable
|
44 |
+
// ["117", "13", "134"], // Deep Magenta
|
45 |
+
["251", "0", "209"], // Hot Pink
|
46 |
+
["252", "0", "128"], // Bright Pink
|
47 |
+
// ["178", "130", "141"], // Dusty Pink
|
48 |
+
["119", "138", "174"], // Slate Blue
|
49 |
+
["22", "22", "167"], // Deep Blue
|
50 |
+
["218", "96", "202"], // Orchid
|
51 |
+
// ['13', '42', '99'], Black
|
52 |
+
];
|
53 |
+
|
54 |
+
|
55 |
+
|
56 |
+
|
57 |
+
export const NAMED_COLORS = {
|
58 |
+
"red": ["251", "13", "13"], // Vivid Red
|
59 |
+
"black": ['13', '42', '99'], // Black
|
60 |
+
"blue": ["46", "145", "229"], // Sky Blue
|
61 |
+
};
|
62 |
+
|
63 |
+
export const getColor = (i, opacity=1) => {
|
64 |
+
if (i < 0) {
|
65 |
+
i = i * -1;
|
66 |
+
}
|
67 |
+
return `rgba(${COLORS[i % COLORS.length].join(",")}, ${opacity})`;
|
68 |
+
};
|
69 |
+
|
70 |
+
export const getNamedColor = (name, opacity=1) => {
|
71 |
+
if (NAMED_COLORS.hasOwnProperty(name)) {
|
72 |
+
return `rgba(${NAMED_COLORS[name].join(",")}, ${opacity})`;
|
73 |
+
} else {
|
74 |
+
return undefined; // Return undefined if name doesn't exist
|
75 |
+
}
|
76 |
+
};
|
77 |
+
|
src/distill.js
ADDED
The diff for this file is too large to render.
See raw diff
|
|
src/index.html
ADDED
@@ -0,0 +1,917 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!DOCTYPE html>
|
2 |
+
<html>
|
3 |
+
<head>
|
4 |
+
<script src="distill.bundle.js" type="module" fetchpriority="high" blocking></script>
|
5 |
+
<script src="main.bundle.js" type="module" fetchpriority="low" defer></script>
|
6 |
+
<meta name="viewport" content="width=device-width, initial-scale=1">
|
7 |
+
<meta charset="utf8">
|
8 |
+
<base target="_blank">
|
9 |
+
<title>FineWeb: decanting the web for the finest text data at scale</title>
|
10 |
+
<link rel="stylesheet" href="style.css">
|
11 |
+
</head>
|
12 |
+
|
13 |
+
<body>
|
14 |
+
<d-front-matter>
|
15 |
+
<script id='distill-front-matter' type="text/json">{
|
16 |
+
"title": "🔭 Ultra-Guide to Scaling LLM training",
|
17 |
+
"description": "This blog covers everything about scaling LLMs in 2024.",
|
18 |
+
"published": "Sept 28, 2024",
|
19 |
+
"affiliation": {"name": "HuggingFace"},
|
20 |
+
"authors": [
|
21 |
+
{
|
22 |
+
"author":"Leandro Werra",
|
23 |
+
"authorURL":"https://huggingface.co/lvwerra"
|
24 |
+
},
|
25 |
+
{
|
26 |
+
"author":"Thomas Wolf",
|
27 |
+
"authorURL":"https://huggingface.co/thomwolf"
|
28 |
+
}
|
29 |
+
],
|
30 |
+
"katex": {
|
31 |
+
"delimiters": [
|
32 |
+
{"left": "$$", "right": "$$", "display": false}
|
33 |
+
]
|
34 |
+
}
|
35 |
+
}
|
36 |
+
</script>
|
37 |
+
</d-front-matter>
|
38 |
+
<d-title>
|
39 |
+
<h1 class="l-page" style="text-align: center;">🔭 Ultra-Guide to Scaling LLM training</h1>
|
40 |
+
<div id="title-plot" class="main-plot-container l-screen">
|
41 |
+
<figure>
|
42 |
+
<img src="assets/images/banner.png" alt="FineWeb">
|
43 |
+
</figure>
|
44 |
+
<!-- <div id="clusters-plot">
|
45 |
+
<img src="assets/images/clusters.png" alt="Clusters">
|
46 |
+
</div> -->
|
47 |
+
</div>
|
48 |
+
</d-title>
|
49 |
+
<d-byline></d-byline>
|
50 |
+
<d-article>
|
51 |
+
<d-contents>
|
52 |
+
</d-contents>
|
53 |
+
|
54 |
+
<p>The performance of a large language model (LLM) depends heavily on the quality and size of the LLMs.
|
55 |
+
However, the pretraining datasets for state-of-the-art open LLMs like Llama 3<d-cite bibtex-key="llama3modelcard"></d-cite> and Mixtral<d-cite bibtex-key="jiang2024mixtral"></d-cite> are not publicly available and very little is known about how they were created.</p>
|
56 |
+
<aside>Reading time: 7 days. For the best reading experience, we recommend not using a mobile phone.</aside>
|
57 |
+
|
58 |
+
<p>Recently, we released <a href="https://huggingface.co/datasets/HuggingFaceFW/fineweb"><strong>🍷 FineWeb</strong></a>, a new, large-scale
|
59 |
+
(<strong>15-trillion tokens, 44TB disk space</strong>) dataset for LLM pretraining. FineWeb is derived from 96 <a href="https://commoncrawl.org/">CommonCrawl</a> snapshots and produces <strong>better-performing LLMs than other open pretraining datasets</strong>.
|
60 |
+
|
61 |
+
<aside>We are extremely thankful to the whole <a href="https://distill.pub/">distill.pub</a> team for creating the template on which we based this blog post.</aside>
|
62 |
+
|
63 |
+
<p><strong>TLDR:</strong> This blog covers a discussion on processing and evaluating data quality at scale, the 🍷 FineWeb
|
64 |
+
recipe (listing and explaining all of our design choices), and the process followed to create its 📚 FineWeb-Edu subset.</p>
|
65 |
+
|
66 |
+
<h2>Scaling Models and Hardware</h2>
|
67 |
+
|
68 |
+
<p>Now that we know the basics of distributed communication and computations it’s time to apply this to training LLMs at scale. Here’s the plan of action: we’ll go through increasingly complex distribution strategies, namely data, then tensor and finally pipeline parallelism, and show three things:</p>
|
69 |
+
|
70 |
+
<ol>
|
71 |
+
<li>conceptual explanations with diagrams</li>
|
72 |
+
<li>a minimal coding example illustrating how to implement said strategy</li>
|
73 |
+
<li>scaling experiments show casing strengths and limits of the method with real data</li>
|
74 |
+
</ol>
|
75 |
+
|
76 |
+
<p>For the experiments we scale across two dimensions: we make the models larger and larger and add more and more compute nodes and measure how throughput changes.</p>
|
77 |
+
|
78 |
+
<p>So this is a good point to get ☕ #2 and we’ll have a look at the setup for the practical experiments.</p>
|
79 |
+
|
80 |
+
<h2>Experiment setup</h2>
|
81 |
+
|
82 |
+
<table>
|
83 |
+
<thead>
|
84 |
+
<tr>
|
85 |
+
<th></th>
|
86 |
+
<th><strong>1B (1)</strong></th>
|
87 |
+
<th><strong>7B</strong></th>
|
88 |
+
<th><strong>70B</strong></th>
|
89 |
+
<th><strong>340B (2)</strong></th>
|
90 |
+
<th><strong>400B (3)</strong></th>
|
91 |
+
</tr>
|
92 |
+
</thead>
|
93 |
+
<tbody>
|
94 |
+
<tr>
|
95 |
+
<td><strong>N Layers</strong></td>
|
96 |
+
<td>24</td>
|
97 |
+
<td>32</td>
|
98 |
+
<td>80</td>
|
99 |
+
<td>96</td>
|
100 |
+
<td>126</td>
|
101 |
+
</tr>
|
102 |
+
<tr>
|
103 |
+
<td><strong>N Heads</strong></td>
|
104 |
+
<td>32</td>
|
105 |
+
<td>32</td>
|
106 |
+
<td>64</td>
|
107 |
+
<td>96</td>
|
108 |
+
<td>128</td>
|
109 |
+
</tr>
|
110 |
+
<tr>
|
111 |
+
<td><strong>Dimension</strong></td>
|
112 |
+
<td>2048</td>
|
113 |
+
<td>4096</td>
|
114 |
+
<td>8192</td>
|
115 |
+
<td>18432</td>
|
116 |
+
<td>16384</td>
|
117 |
+
</tr>
|
118 |
+
</tbody>
|
119 |
+
</table>
|
120 |
+
|
121 |
+
<p>(1) FineWeb ablation models</p>
|
122 |
+
<p>(2) Nemotron-340B architecture (without GQA)</p>
|
123 |
+
<p>(3) Llama-400B, ffn dim = 1.2 hidden dim (without GQA)</p>
|
124 |
+
|
125 |
+
|
126 |
+
<h2>Distribution Methods</h2>
|
127 |
+
|
128 |
+
<p>Efficiently training LLMs now requires amounts of compute which exceed in most case single GPUs or machine. Large distributed clusters are thus used to train these models and can range from hundreds to thousands of nodes each usually equipped with up to 8 GPUs. To make the best use of such an expensive hardware, a range of distributed training methods have been developed with the goal of ensuring that GPUs are highly utilized at all times and not waiting for data/synchronization/etc.</p>
|
129 |
+
|
130 |
+
<p>Several methods can be used to distribute training and we’ll start with 4D parallelism followed-up by DeepSpeed stages. While we explain these strategies we’ll also run experiments to determine the trade-offs and understand the optimal settings.</p>
|
131 |
+
<p>The name “4D parallelism” originates from the fact that it involves combining up to 4 distribution methods: data, tensor, pipeline, and sequence parallelism (each of these techniques can be used independently of the other). You may thus ask “So which one should I use?”.</p>
|
132 |
+
|
133 |
+
<p>Unfortunately, there is no universal answer as the response will actually depend on the cluster setup as well as the model architecture. But do not despair for in this section we’ll develop strategies to figure out the best setting experimentally!</p>
|
134 |
+
|
135 |
+
<p>In addition to 4D parallelism we’ll also take a look at “DeepSpeed”, a method developed by Microsoft which is generally complimentary to 4D parallelism and can be leveraged on top of it.</p>
|
136 |
+
|
137 |
+
<p><strong>Idea: show two things in every section</strong></p>
|
138 |
+
<ol>
|
139 |
+
<li>a small toy model (e.g. 4 layer FFN) we can interactively show with every approach</li>
|
140 |
+
<li>a benchmark showing the improvement/limits of the approach (e.g. when you cross 1 node with TP)</li>
|
141 |
+
</ol>
|
142 |
+
|
143 |
+
<h3>No Parallelism</h3>
|
144 |
+
|
145 |
+
<p>Let’s quickly go over the basics before going into distributed training. When a model is trained on a single GPU, the training consists of 3 steps in the simplest case:</p>
|
146 |
+
<ol>
|
147 |
+
<li>one forward pass,</li>
|
148 |
+
<li>one backward pass to compute the gradients, and</li>
|
149 |
+
<li>an optimization step using the gradients to update the parameters</li>
|
150 |
+
</ol>
|
151 |
+
|
152 |
+
<p>As we’ll see in the future, these steps may be repeated or intertwined but for now we’ll start simple:</p>
|
153 |
+
|
154 |
+
<img src="assets/images/IMG_7537D08D7F41-1.jpeg" alt="Training Steps">
|
155 |
+
|
156 |
+
<p>In this figure the successive blue boxes on the top line can be seen as successive layers inside a model (same for the last line). The red boxes are the associated gradients for each of these layers.</p>
|
157 |
+
|
158 |
+
<p>The batch size (<em>bs</em>) is one of the most important hyper-parameters in machine learning, affecting both model convergence and throughput.</p>
|
159 |
+
|
160 |
+
<p>If the batch size is too small, gradients will tend to be noisy and the model may not be able to converge to optimal performances while a batch size too large can make the convergence of the model slower and waste compute. You can find a nice discussion of this topic in OpenAI’s paper on large batch training (<a href="https://arxiv.org/abs/1812.06162">https://arxiv.org/pdf/1812.06162</a>).</p>
|
161 |
+
|
162 |
+
<p>The batch size also affects the throughput: a small batch size will require more optimizer steps to train on a given amount of samples. Optimizer steps are costly (in compute time) and the throughput will thus be lower than when using a larger batch size. On the other hand, larger batches, while leading to higher throughput may suffer from slow convergence in the limits as we’ve just seen. There is generally an optimal batch size from a convergence/performance point of view (note that the batch size can usually still be changed around the optimal batch size without major impact to the performance of the model).</p>
|
163 |
+
|
164 |
+
<p>Note that in the LLM community, batch sizes are commonly reported in terms of tokens instead of number of samples (BST - Batch Size Tokens) as each token has a label and thus a loss term and can thus be considered individual (although highly correlated) samples.</p>
|
165 |
+
|
166 |
+
<p>A sweet spot for LLM training is usually on the order of 4-20 million tokens per batch (links GPT-3, DeepSeek, Llama). In the simplest case, training on a single machine, the <em>BS</em> and <em>BST</em> can be computed from the model input sequence length as follows:</p>
|
167 |
+
|
168 |
+
<d-math>
|
169 |
+
bst=bs *seq
|
170 |
+
</d-math>
|
171 |
+
|
172 |
+
<p>(note that from here on forward we’ll show the formulas for the batch size in number of samples but you can always get its token-unit counterpart by multiplying it with the sequence length)</p>
|
173 |
+
|
174 |
+
<p>And we’re now hitting our first scaling problem:</p>
|
175 |
+
|
176 |
+
<blockquote>
|
177 |
+
<p>what if we can’t fit the model into GPU memory even with <code>BS=1</code>?</p>
|
178 |
+
</blockquote>
|
179 |
+
|
180 |
+
<p>Good question, reader!</p>
|
181 |
+
|
182 |
+
<p>Let’s start by understanding what led to our out-of-memory issue in the first place.</p>
|
183 |
+
|
184 |
+
<h2>A brief overview of memory usage in Transformers</h2>
|
185 |
+
|
186 |
+
<p>To train a neural network model, one needs to store many elements in memory besides the weights themselves. Generally, the memory usage is made up from the following elements:</p>
|
187 |
+
<ul>
|
188 |
+
<li>model weights</li>
|
189 |
+
<li>model gradients</li>
|
190 |
+
<li>optimizer states</li>
|
191 |
+
<li>activations computed during the forward pass and which are needed to compute the backward pass</li>
|
192 |
+
<li>also CUDA Kernels require 1-2GB of GPU memory which you can quickly check yourself by running <code>import torch; torch.ones((1, 1)).to("cuda")</code> and then checking the GPU memory with <code>nvidia-smi</code></li>
|
193 |
+
<li>lower rest memory usage from buffers, intermediate results and some memory that can’t be used due to fragmentation</li>
|
194 |
+
</ul>
|
195 |
+
|
196 |
+
<p>Scaling up training is usually a question of playing with those constituents to keep memory low while not impacting performance too much. We’ll neglect the last two contributors as there’s usually not that much you can do about them unless you dive deep in the code.</p>
|
197 |
+
|
198 |
+
<p>For the rest, they are usually different types of tensors that can have various sizes (usually multiples of one or several of batch size, sequence length, model hidden dimension and some potential sharding) and various precisions (with optimizer states and weights copy being often kept in full FP32 precision while activations can be of lower precision like BF16 or FP8). Let’s try to get some intuition for the memory requirement of these various elements.</p>
|
199 |
+
|
200 |
+
<p>Let’s first look at the weights, gradients and optimizer states. They are all dependent on the number of parameters in a model. For a simple LLM the number of parameters is given by the following formula:</p>
|
201 |
+
|
202 |
+
<d-math>
|
203 |
+
N = h*v + L * (12 * h^2 + 13*h) + 2*h
|
204 |
+
</d-math>
|
205 |
+
|
206 |
+
<p>In that equation, <em>h</em> corresponds to the hidden dimension, <em>v</em> to the vocabulary size, and <em>L</em> the number of layers in the model. Note that looking at the equation we can see that the term that will dominate at large model scales is the one with <em>h^2</em> since it’s the only term growing quadratically as we scale the models.</p>
|
207 |
+
|
208 |
+
<p>Let’s see how the number of parameters translates to memory usage. The memory requirements for the parameters and gradients are the number of parameters multiplied by the number of bytes per parameter. Mixed precision training with BF16 is the default nowadays which requires 2 bytes per parameter. In addition, there are a number of values necessary for the optimizer states: for ADAM it requires the momentum and the variance in FP32, each using 4 bytes, and an additional copy of the model weights in FP32, thus 12 bytes per parameter (ref: <a href="https://arxiv.org/pdf/1910.02054">ZeRO</a>):</p>
|
209 |
+
|
210 |
+
<d-math>
|
211 |
+
m_{params} = 2 * N
|
212 |
+
m_{grad} = 2 * N
|
213 |
+
m_{opt} = (4+4+4) * N
|
214 |
+
</d-math>
|
215 |
+
|
216 |
+
<p>In old-fashioned full precision training both parameters and gradients would require 4 bytes each but the optimizer on the other hand wouldn’t need to store an extra full precision copy of the weights:</p>
|
217 |
+
|
218 |
+
<d-math>
|
219 |
+
m_{params} = 4 * N
|
220 |
+
m_{grad} = 4 * N
|
221 |
+
m_{opt} = (4+4) * N
|
222 |
+
</d-math>
|
223 |
+
|
224 |
+
<p>So we can easily see that mixed precision itself doesn’t save memory as it just distributes the memory differently across the three components. So by multiplying the number of parameters by 16 (=2+2+12) you can quickly get a sense of how much GPU memory we need for a model:</p>
|
225 |
+
|
226 |
+
<table>
|
227 |
+
<thead>
|
228 |
+
<tr>
|
229 |
+
<th>Model parameters</th>
|
230 |
+
<th>Memory requirements</th>
|
231 |
+
</tr>
|
232 |
+
</thead>
|
233 |
+
<tbody>
|
234 |
+
<tr>
|
235 |
+
<td>1B</td>
|
236 |
+
<td>16 GB</td>
|
237 |
+
</tr>
|
238 |
+
<tr>
|
239 |
+
<td>7B</td>
|
240 |
+
<td>112 GB</td>
|
241 |
+
</tr>
|
242 |
+
<tr>
|
243 |
+
<td>70B</td>
|
244 |
+
<td>1120 GB</td>
|
245 |
+
</tr>
|
246 |
+
<tr>
|
247 |
+
<td>405B</td>
|
248 |
+
<td>6480 GB</td>
|
249 |
+
</tr>
|
250 |
+
</tbody>
|
251 |
+
</table>
|
252 |
+
|
253 |
+
<p>We can further decrease the memory usage if we choose FP8 training instead of BF16 but it is much less stable and a very active research topic (see <a href="https://x.com/xariusrke/status/1826669126955278401">here</a>) thus we won’t go in details here.</p>
|
254 |
+
|
255 |
+
<p>But we are not done yet, we’ll also need to store the forward pass activations which are used during the backward pass to compute the gradients. The total memory required for the activations in mixed precision (which contributes the leading factor of 2 below) is given by the following equation:</p>
|
256 |
+
|
257 |
+
<d-math>
|
258 |
+
m_{act} = 2 * L* seq * bs * h * (34 + \frac{5*n_{heads}*seq}{h})
|
259 |
+
</d-math>
|
260 |
+
|
261 |
+
<p>You can follow <a href="https://arxiv.org/pdf/2205.05198">this NVIDIA paper</a> for a complete derivation, it essentially requires you to do some accounting of all the sizes of intermediate activations between each operation. What’s interesting here is that the memory is not static for a given model but depends critically on the sequence length. We can use the memory formulas and have a look how the memory usage changes for a model for various sequence lengths:</p>
|
262 |
+
|
263 |
+
<img src="assets/images/image%206.png" alt="Memory Usage Graph 1">
|
264 |
+
<img src="assets/images/image%207.png" alt="Memory Usage Graph 2">
|
265 |
+
|
266 |
+
<p>This graph tells a striking story: for short sequences, activations are almost negligible, but starting at around 2-4k tokens they start to take up a significant amount of memory while parameter, gradient and optimizer state are roughly independent of the sequence length and batch size. For large batch/sequence, activations however become by far the largest memory burden.</p>
|
267 |
+
|
268 |
+
<p>Is there a way to tame this “activation explosion”?</p>
|
269 |
+
|
270 |
+
<p>Good question, reader! I see you’re following well and you’re lucky as the answer is “Yes”! Let’s talk about a technique called <strong>gradient checkpointing</strong> or more frequently <strong>activation recomputation</strong> which can help us cap activation memory footprint and is an essential tool in today’s large model training toolbox.</p>
|
271 |
+
|
272 |
+
<h3>Activation recomputation</h3>
|
273 |
+
|
274 |
+
<p>The general idea behind gradient checkpointing is to discard some activations to save memory if we are willing to spend some extra compute to recompute them when needed. Typically we will save activations at some key points in memory and discard the rest and recompute them during the backward pass from the nearest activations:</p>
|
275 |
+
|
276 |
+
<img src="assets/images/IMG_C4260C5C58DC-1.jpeg" alt="Activation Recompute">
|
277 |
+
|
278 |
+
<p>We can select these key activations according to several strategies and modern frameworks usually choose among the following three strategies:</p>
|
279 |
+
<ul>
|
280 |
+
<li><strong>None</strong>: We don’t recompute activations during the backward pass and keep all activations in memory. While this is the fastest and thus computationally cheapest option, it also requires the most memory.</li>
|
281 |
+
<li><strong>Full</strong>: The simplest strategy from a conceptual point of view is to checkpoint activations between each Transformer layer. This is usually called the <code>full</code> strategy since it requires a forward pass through each layer essentially adding a full forward pass during the backward pass. This strategy saves the most memory but is the most expensive one in terms of compute. This increases the compute cost by up to 30-40% which is very noticeable.</li>
|
282 |
+
<li><strong>Selective</strong>: In general we can do better than full. The authors of <a href="https://arxiv.org/pdf/2205.05198">this paper</a> did a detailed analysis studying which activations grow the largest and have the cheapest recomputation cost in terms of FLOPs. Turns out that the attention computations fall in that category, and thus we can usually discard them and focus on checkpointing expensive feedforward computations. Note: for a GPT-3 (175B) model this means 70% activation memory reduction at a 2.7% compute cost.</li>
|
283 |
+
</ul>
|
284 |
+
|
285 |
+
<p>Let’s see how recomputation strategies can drastically reduce the memory footprint while selective recomputation strikes a nice balance between memory saving and recomputation cost:</p>
|
286 |
+
|
287 |
+
<img src="assets/images/image%208.png" alt="Recomputation Strategies">
|
288 |
+
|
289 |
+
<p>Note: Hardware vs Model flops.</p>
|
290 |
+
|
291 |
+
<p>Most frameworks these days use FlashAttention (TODO: see later) which makes the attention computation less memory intensive through kernel fusion, thus most trainings use the <code>full</code> settings.</p>
|
292 |
+
|
293 |
+
<p>We can save some GPU memory with activation recomputation but this only delays by a bit the next bottleneck: as hinted earlier for LLM training there is usually a sweet spot for the GBST and we need to work out the training configuration backward from there. However, you can’t choose MBS to be an arbitrary large number on your GPU; at some point you will run out of GPU memory again since you need to store at least some of the activations in memory.</p>
|
294 |
+
|
295 |
+
<p>There is a useful trick to compensate for that: <strong>gradient accumulation</strong> (<em>GradAcc</em>). With gradient accumulation we will split our batch in micro-batch, do forward and backward passes repeatedly on each micro-batch, compute the gradients, and, as the name suggests, sum the gradients step by step before doing a final optimizer step.</p>
|
296 |
+
|
297 |
+
<p>We call the <code>micro batch size</code> (MBS) the batch size for each forward pass on a single node (the number of samples flowing through the model in one forward pass). We’ll refer to the overall batch size between each optimizer step as the <code>global batch size</code> (GBS). If we do one optimizer step each 8 forward/backward pass, the <code>global batch size</code> will be 8 times the <code>micro batch size</code>.</p>
|
298 |
+
|
299 |
+
<p>What we now call <code>global batch size</code> thus corresponds to what we’ve called up to now just <code>batch size</code> for simplicity (we now make the terms more precise to avoid ambiguity).</p>
|
300 |
+
|
301 |
+
<p>With gradient accumulation the global batch size can be computed as follows:</p>
|
302 |
+
|
303 |
+
<d-math>
|
304 |
+
BS = GBS=MBS * GradAcc
|
305 |
+
</d-math>
|
306 |
+
|
307 |
+
<p>Gradient accumulation allows us to effectively increase our batch size up to infinity (!) while the memory footprint stays constant. Gradient accumulation is also compatible with activation recomputation for further memory reduction. One drawback however, is that gradient accumulation requires multiple consecutive forward/backward passes per optimization step thereby increasing the compute overhead and slowing down training. No free lunch!</p>
|
308 |
+
|
309 |
+
<img src="assets/images/IMG_DA188FF29F45-1.jpeg" alt="Gradient Accumulation">
|
310 |
+
|
311 |
+
<p>This is actually a bummer since the forward/backward passes for each micro-batch could actually totally be run in parallel. They are independent from each other and the only changing parameter are the input samples.</p>
|
312 |
+
|
313 |
+
<p>Here comes data parallelism to solve exactly this problem! Let’s take a look, you say? Okay sure!</p>
|
314 |
+
|
315 |
+
<h3>Data Parallelism</h3>
|
316 |
+
|
317 |
+
<p>The idea behind data parallelism (DP) is to parallelize forward and backward passes across GPUs, passing different batches of data per GPU (or groups of GPUs) to the same model instance. Just like for gradient accumulation, we need to average gradients across instances before we do the optimization step. The GBS equation can then be extended to:</p>
|
318 |
+
|
319 |
+
<d-math>
|
320 |
+
GBS=MBS * GradAcc * DP
|
321 |
+
</d-math>
|
322 |
+
|
323 |
+
<p>This means that we can reduce the number of gradient accumulation steps in favor of data parallel processes which speeds up training. In practice, people will tend to max out the number of data parallel nodes (the DP above) as much as possible as it’s inherently parallel versus the sequential Gradient Accumulation. Gradient accumulation is then added only to achieve a target batch size if DP alone is not sufficient. One exception to that is pipeline parallelism which we’ll discuss later.</p>
|
324 |
+
|
325 |
+
<img src="assets/images/IMG_A95961668B3F-1.jpeg" alt="Data Parallelism">
|
326 |
+
|
327 |
+
<p>As you can see on the figure above, some gradients can already be gathered and summed (red boxes) even before gradients down the line (red boxes on the left of the current gradient) are still being computed. This significantly speeds up data parallelism. For instance, as soon as the backward pass of the last layer is done (last boxes on the right) those gradients can already be gathered/summed while the backward pass computations move to earlier layers, aka to the left. This lowers the communication/bandwidth pressure to sync gradients of the full model as it can be performed in part in parallel to the computation of said gradients. See <a href="https://siboehm.com/articles/22/data-parallel-training">this article</a> for more information.</p>
|
328 |
+
|
329 |
+
<p>A general recipe to determine an optimal data-parallel setup can be as follows:</p>
|
330 |
+
<ol>
|
331 |
+
<li>Determine the best (global) batch size in tokens to use either by consulting literature or running experiments? This determines the GBST.</li>
|
332 |
+
<li>Select a sequence length for training, again by either consulting literature or running experiments. Generally 2-8k tokens works reliably well.</li>
|
333 |
+
<li>You now know the batch size (GBS=GBST/SeqLen). Find the maximum MBS on a single GPU by increasing the local batch size until you run out of memory. This determines the MBS.</li>
|
334 |
+
<li>Finally, the number of available GPUs corresponds to the potential DP. The ratio of GPT to DP determines the remaining number of gradient accumulation steps needed for the desired GBS.</li>
|
335 |
+
</ol>
|
336 |
+
|
337 |
+
<p>If the gradient accumulation ratio is lower than one, i.e. you have too many GPUs (!), you can either choose to not use all your GPUs or test if a lower MBS will speed up training. In these cases, you may want to prioritize throughput over the individual GPU utilization, you can then choose DP first and use a smaller MBS than possible in order to speed up training.</p>
|
338 |
+
|
339 |
+
<p>Time to take a concrete example: We want to train a model with a GBS of 4M tokens and a sequence length of 4k. This means our batch size will be 1024 samples (we pick powers of two). We observe that a single of our GPU can fit MBS=2 in memory and we have 128 GPUs available for training. This means with 4 gradient accumulation steps we’ll achieve our goal of 1024 samples or 4M tokens per training step. Now what if we suddenly have 1024 GPUs available? We can achieve the same GBS and thus identical training by setting both MBS and gradient accumulation to 1 speeding up training significantly.</p>
|
340 |
+
|
341 |
+
<p>[EXPERIMENTS WHERE WE INCREASE DP AND SHOW THROUGHPUT FOR SEVERAL MODELS]</p>
|
342 |
+
|
343 |
+
<p>We’ve explored data parallelism, a simple strategy to scale training across more GPUs and gives consistent speed improvements. The keen reader might have noticed however that it rests on the assumption that we can fit at least one input sample forward pass (<em>MBS=1</em>) into our GPU memory. This is not always the case! In particular for larger models which often don’t fit into a single GPU anymore even with activation recomputations activated.</p>
|
344 |
+
|
345 |
+
<p>In such case, we need to shard the model across devices! We’ll now study two complementary sharding methods, tensor and pipeline parallelism which are doing that. Let’s start by the simplest, tensor parallelism!</p>
|
346 |
+
|
347 |
+
<h3>Tensor Parallelism</h3>
|
348 |
+
|
349 |
+
<p>So you’ve exhausted all the previous textbook tricks to try to fit your model on a single GPU but it still doesn’t fit? Let’s try to distribute this model across several GPUs. Unlike DP we will not simply duplicate the model but various parts of the model instance will be living on various GPUs.</p>
|
350 |
+
|
351 |
+
<p>If we take a look at a typical matrix multiplication (the core of a neural network), we can get an idea about how we could split the model:</p>
|
352 |
+
|
353 |
+
<img src="assets/images/image%209.png" alt="Matrix Multiplication Example">
|
354 |
+
|
355 |
+
<p>Tensor parallelism is a technique in which a tensor is split into N shards along a particular dimension across N GPUs. Matrices can be split either on the column part or row part leading to row and column parallelism. Depending on which splitting strategy we choose will require different communications primitives.</p>
|
356 |
+
|
357 |
+
<p><strong>Column linear:</strong></p>
|
358 |
+
<ul>
|
359 |
+
<li>Splitting by column or row involves different synchronization primitives:
|
360 |
+
<ul>
|
361 |
+
<li>column:
|
362 |
+
<ul>
|
363 |
+
<li>A <strong>Broadcast</strong> operation is used to send the same input to different GPUs,</li>
|
364 |
+
<li>Multiplications are done independently on the GPUs, and finally</li>
|
365 |
+
<li>An <strong>All-gather</strong> operation is used to gather the output results.</li>
|
366 |
+
</ul>
|
367 |
+
</li>
|
368 |
+
<li>Row:
|
369 |
+
<ul>
|
370 |
+
<li>A <strong>Scatter</strong> operation is used to split the input and send it to different GPUs (we split the weight row-wise),</li>
|
371 |
+
<li>Multiplications are done independently on the GPUs, and finally</li>
|
372 |
+
<li>An <strong>All-reduce</strong> operation is used to add the results together and the full output results.</li>
|
373 |
+
</ul>
|
374 |
+
</li>
|
375 |
+
</ul>
|
376 |
+
</li>
|
377 |
+
</ul>
|
378 |
+
|
379 |
+
<p>This was for an example matrix multiplication. How do we apply this in practice to a real model? In the Transformer, there are 2 basic building blocks where tensor parallel can be applied:</p>
|
380 |
+
<ul>
|
381 |
+
<li>Feedforward layers (MLP)</li>
|
382 |
+
<li>Multi-Head Attention (MHA)</li>
|
383 |
+
</ul>
|
384 |
+
|
385 |
+
<p>Feedforward layers comprise 2 successive MLPs with a non-linearity in-between. Here is the first part of it:</p>
|
386 |
+
|
387 |
+
<img src="assets/images/image%2012.png" alt="Feedforward Layers">
|
388 |
+
|
389 |
+
<p>Should we use row or column parallelization for the first MLP?</p>
|
390 |
+
|
391 |
+
<p>Well it turns out parallelized GeLU only works in Column schema:</p>
|
392 |
+
|
393 |
+
<p>In column schema:</p>
|
394 |
+
<d-math>
|
395 |
+
GeLU(cat([XW1, XW2])) = cat([GeLU(XW1), GeLU(XW2)])
|
396 |
+
</d-math>
|
397 |
+
|
398 |
+
<p>In row schema:</p>
|
399 |
+
<d-math>
|
400 |
+
GeLU(XW1 + XW2) \neq GeLU(XW1) + GeLU(XW2)
|
401 |
+
</d-math>
|
402 |
+
|
403 |
+
<p>If you rather like code, note that we can prove this with the following snippet as well:</p>
|
404 |
+
|
405 |
+
<pre>
|
406 |
+
def example_gelu():
|
407 |
+
from torch.nn.functional import gelu
|
408 |
+
|
409 |
+
X = torch.randn(4, 2, device="cuda", dtype=torch.float32)
|
410 |
+
W = torch.randn(2, 2, device="cuda", dtype=torch.float32)
|
411 |
+
|
412 |
+
W_0, W_1 = W.chunk(2, dim=1)
|
413 |
+
|
414 |
+
# Column linear
|
415 |
+
y_col_1 = torch.cat([gelu(X @ W_0), gelu(X @ W_1)], dim=1)
|
416 |
+
y_col_2 = gelu(torch.cat([X @ W_0, X @ W_1], dim=1))
|
417 |
+
|
418 |
+
# All match
|
419 |
+
torch.testing.assert_close(y_col_1, y_col_2, rtol=1e-5, atol=1e-5)
|
420 |
+
|
421 |
+
# Row linear
|
422 |
+
X_0, X_1 = X.chunk(2, dim=1)
|
423 |
+
W_0, W_1 = W.chunk(2, dim=0)
|
424 |
+
y_row_1 = gelu(X_0 @ W_0) + gelu(X_1 @ W_1)
|
425 |
+
y_row_2 = gelu(X_0 @ W_0 + X_1 @ W_1)
|
426 |
+
|
427 |
+
# Mismatch
|
428 |
+
torch.testing.assert_close(y_row_1, y_row_2, rtol=1e-5, atol=1e-5)
|
429 |
+
</pre>
|
430 |
+
|
431 |
+
<p>To avoid a synchronization step directly after the first MLP, we’ll thus start with Column Parallel and be able to directly perform parallel GELU.</p>
|
432 |
+
|
433 |
+
<p>Now, what about the second MLP? Should it be column or row parallel? Let’s draft both options:</p>
|
434 |
+
<ul>
|
435 |
+
<li>Column Parallel followed by Column Parallel</li>
|
436 |
+
<img src="assets/images/image%2013.png" alt="Column Parallel Schema 1">
|
437 |
+
<li>Column Parallel followed by Row Parallel</li>
|
438 |
+
<img src="assets/images/image%2014.png" alt="Column Parallel Schema 2">
|
439 |
+
</ul>
|
440 |
+
|
441 |
+
<p>We see that the “Column Parallel followed by Row Parallel” schema only involves two communications instead of four. It’s thus the most efficient schema in terms of communications.</p>
|
442 |
+
|
443 |
+
<p>Let’s take a quick look at the backward pass:</p>
|
444 |
+
<img src="assets/images/image%2015.png" alt="Backward Pass 1">
|
445 |
+
<img src="assets/images/image%2016.png" alt="Backward Pass 2">
|
446 |
+
|
447 |
+
<pre>
|
448 |
+
def column_linear_forward(X, local_W, group):
|
449 |
+
Y_local = X @ local_W.t()
|
450 |
+
return Y_local
|
451 |
+
|
452 |
+
def column_linear_backward(local_grad_Y, X, local_W, group):
|
453 |
+
local_grad_X = local_grad_Y @ local_W
|
454 |
+
grad_W = local_grad_Y.t() @ X
|
455 |
+
return local_grad_X, grad_W
|
456 |
+
|
457 |
+
def row_linear_forward(local_X, local_W, group):
|
458 |
+
Y_local = local_X @ local_W.t()
|
459 |
+
dist.all_reduce(Y_local, group=group)
|
460 |
+
Y = Y_local
|
461 |
+
return Y
|
462 |
+
|
463 |
+
def row_linear_backward(grad_Y, X, local_W, group):
|
464 |
+
local_grad_X = grad_Y @ local_W
|
465 |
+
grad_W = grad_Y.t() @ X
|
466 |
+
return local_grad_X, grad_W
|
467 |
+
|
468 |
+
def example_column_row_linear():
|
469 |
+
# torchrun --nproc_per_node=2 tp_all_reduce.py
|
470 |
+
group = dist.distributed_c10d._get_default_group()
|
471 |
+
|
472 |
+
X_ref = torch.arange(4 * 2, device="cuda", dtype=torch.float32, requires_grad=True).reshape(4, 2)
|
473 |
+
W_ref_layer1 = torch.arange(1, 5, device="cuda", dtype=torch.float32, requires_grad=True).reshape(2, 2) * 10
|
474 |
+
W_ref_layer2 = torch.arange(1, 5, device="cuda", dtype=torch.float32, requires_grad=True).reshape(2, 2)
|
475 |
+
|
476 |
+
X_ref.retain_grad()
|
477 |
+
W_ref_layer1.retain_grad()
|
478 |
+
W_ref_layer2.retain_grad()
|
479 |
+
|
480 |
+
dist.broadcast(X_ref, src=0, group=group)
|
481 |
+
dist.broadcast(W_ref_layer1, src=0, group=group)
|
482 |
+
dist.broadcast(W_ref_layer2, src=0, group=group)
|
483 |
+
|
484 |
+
X = X_ref.clone()
|
485 |
+
W_layer1 = W_ref_layer1.clone()
|
486 |
+
W_layer2 = W_ref_layer2.clone()
|
487 |
+
|
488 |
+
# Forward
|
489 |
+
Y_ref_linear1 = X_ref @ W_ref_layer1.t()
|
490 |
+
Y_ref_linear1.retain_grad()
|
491 |
+
|
492 |
+
# We will transpose for matrix multiplication. As a result, we need to split row-wise
|
493 |
+
Y_local_linear1 = column_linear_forward(X, split_tensor(W_layer1, dim=0), group)
|
494 |
+
|
495 |
+
torch.testing.assert_close(Y_local_linear1, split_tensor(Y_ref_linear1, dim=1), rtol=1e-5, atol=1e-5)
|
496 |
+
|
497 |
+
Y_local_linear2 = row_linear_forward(Y_local_linear1, split_tensor(W_ref_layer2, dim=1), group)
|
498 |
+
Y_ref_linear2 = Y_ref_linear1 @ W_ref_layer2.t()
|
499 |
+
torch.testing.assert_close(Y_local_linear2, Y_ref_linear2, rtol=1e-5, atol=1e-5)
|
500 |
+
|
501 |
+
# Backward
|
502 |
+
Y_ref_linear2.sum().backward()
|
503 |
+
|
504 |
+
grad_Y = torch.ones_like(Y_ref_linear2)
|
505 |
+
grad_X_linear2, grad_W_linear2 = row_linear_backward(grad_Y, Y_local_linear1, split_tensor(W_layer2, dim=1), group)
|
506 |
+
|
507 |
+
torch.testing.assert_close(grad_X_linear2, split_tensor(Y_ref_linear1.grad, dim=1), rtol=1e-5, atol=1e-5)
|
508 |
+
torch.testing.assert_close(grad_W_linear2, split_tensor(W_ref_layer2.grad, dim=1), rtol=1e-5, atol=1e-5)
|
509 |
+
|
510 |
+
grad_X, grad_W = column_linear_backward(grad_X_linear2, X, split_tensor(W_layer1, dim=0), group)
|
511 |
+
|
512 |
+
torch.testing.assert_close(grad_X, X_ref.grad, rtol=1e-5, atol=1e-5)
|
513 |
+
torch.testing.assert_close(grad_W, split_tensor(W_ref_layer1.grad, dim=0), rtol=1e-5, atol=1e-5)
|
514 |
+
|
515 |
+
if __name__ == "__main__":
|
516 |
+
dist.init_process_group("nccl", rank=int(os.environ["RANK"]), world_size=int(os.environ["WORLD_SIZE"]))
|
517 |
+
torch.cuda.set_device(int(os.environ["LOCAL_RANK"]))
|
518 |
+
|
519 |
+
example_column_row_linear()
|
520 |
+
</pre>
|
521 |
+
|
522 |
+
<p>Now that we’ve found the most efficient schema for the Feedforward part of the transformer, let’s take a look at the multi-head attention block (MHA).</p>
|
523 |
+
|
524 |
+
<p>We can generally follow a similar approach where the Q, K, V will be split in a Column Parallel fashion and the output projection will be split along the Row dimension.</p>
|
525 |
+
|
526 |
+
<img src="assets/images/image%2017.png" alt="Multi-Head Attention Block">
|
527 |
+
|
528 |
+
<p>To dive in further particularities, a nice reference paper detailing TP is for instance <a href="https://arxiv.org/abs/2205.05198">Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism</a>.</p>
|
529 |
+
|
530 |
+
<p>Note: Sequence Parallel</p>
|
531 |
+
|
532 |
+
<h3>Sequence Parallelism</h3>
|
533 |
+
|
534 |
+
<p>Tensor parallelism has been a great help to parallelize some of our computation on several GPU nodes with the limited cost of a few communication operations.</p>
|
535 |
+
|
536 |
+
<p>It also had the additional benefit of reducing memory usage by splitting intermediate activations inside the feedforward elements across GPUs and thereby reducing the activations to store on each node.</p>
|
537 |
+
|
538 |
+
<p>Could we push this approach further?</p>
|
539 |
+
|
540 |
+
<p>Sequence parallelism applies this same idea to other parts of our model. We’ve applied tensor parallelism to two main parts in our models where combination of MLP allowed to naturally split the weights along major axis.</p>
|
541 |
+
|
542 |
+
<p>The rest of the model mostly comprises layer norms, dropout and various summation of residuals, these contribute little to the computation but come with rather large forward activations to store.</p>
|
543 |
+
|
544 |
+
<p>[Add some illustration of the forward activations to store for each part]</p>
|
545 |
+
|
546 |
+
<h3>Context Parallelism</h3>
|
547 |
+
|
548 |
+
<p>Even though TP-SP mode helps reduce the memory used by activation values, it has two main drawbacks:</p>
|
549 |
+
<ol>
|
550 |
+
<li>Internode connections are usually slow, so the TP degree shouldn't typically exceed 8</li>
|
551 |
+
<li>The TP degree is limited by the number of Key/Value heads, which is 8 for LLaMA 3 8B.</li>
|
552 |
+
</ol>
|
553 |
+
|
554 |
+
<p>An empirical estimation is that with TP=8, you can only train an 8B model with a 20K context length. However, LLaMA 3.1 has managed to scale the context length to 128K by using context parallelism.</p>
|
555 |
+
|
556 |
+
<p>There are several ways to implement sequence parallelism. We used ring attention, which overlaps communication and computation. LLaMA3.1 uses all-gather along the sequence dimension because it is easier and more flexible to support different types of attention masks in all-gather based CP attention, such as the document mask.</p>
|
557 |
+
|
558 |
+
<h3>Pipeline Parallelism</h3>
|
559 |
+
|
560 |
+
<h3>Overlapping computation and communication</h3>
|
561 |
+
|
562 |
+
<h3>ZeRO</h3>
|
563 |
+
|
564 |
+
<h2>II – Architecture</h2>
|
565 |
+
|
566 |
+
<h3>Transformers</h3>
|
567 |
+
|
568 |
+
<h3>Choosing the right dimensions</h3>
|
569 |
+
|
570 |
+
<h3>Positional Embeddings (Learned, RoPE, ALiBi)</h3>
|
571 |
+
|
572 |
+
<h3>RoPE</h3>
|
573 |
+
|
574 |
+
<p>In the transformer model, tokens have no inherent information about their positional information. For these reasons, we need to use a positional encoding function.</p>
|
575 |
+
|
576 |
+
<p>Assuming that in the multi-head attention layer, <em>q_m</em> is the “position-aware” query vector corresponding to a token at position <em>m</em>, <em>k_n</em> the “position-aware” key vector corresponding to the token at position <em>n</em> and <em>f</em> is our position embedding function, we would like our position vector to be a function of the input vectors and absolute positions like this:</p>
|
577 |
+
|
578 |
+
<d-math>
|
579 |
+
q_m = f(q,m)
|
580 |
+
k_n = f(k,n)
|
581 |
+
</d-math>
|
582 |
+
|
583 |
+
<p>We may also want the positional encoding to model relative positional information between two input tokens. Relative positions help the model to operate across longer context spans and even context lengths not seen during training. The attention operation is generally a dot product operation between “position-aware” vectors <em>q</em> and <em>k</em>, so for a positional encoding that contains relative positional information, we’ll want to have:</p>
|
584 |
+
|
585 |
+
<d-math>
|
586 |
+
<q_m, k_n> = g(q, k, m-n)
|
587 |
+
</d-math>
|
588 |
+
|
589 |
+
<p>In other words, we want the result of <em>⟨ 𝑞_𝑚 , 𝑘_𝑛 ⟩</em> to depend on the values of <em>q</em> and <em>k</em> themselves, as well as their relative position <em>m − n</em>, but not <em>m</em> and <em>n</em>. This way, the model can focus on the relative difference between two tokens rather than their absolute positions.</p>
|
590 |
+
|
591 |
+
<p>Let’s show that the RoPE positional embedding formulation satisfies the above formula.</p>
|
592 |
+
|
593 |
+
<p><strong>Rotation matrix</strong></p>
|
594 |
+
|
595 |
+
<p>RoPE are based on rotation matrices which have simple and interesting properties for us. In a 2D space, a rotation matrix has the following form:</p>
|
596 |
+
|
597 |
+
<d-math>
|
598 |
+
R(θ) =
|
599 |
+
\begin{pmatrix}
|
600 |
+
\cosθ & -\sinθ \\
|
601 |
+
\sinθ & \cosθ
|
602 |
+
\end{pmatrix}
|
603 |
+
</d-math>
|
604 |
+
|
605 |
+
<p>The rotation matrix has the following properties:</p>
|
606 |
+
<ul>
|
607 |
+
<li><em>R(θ)</em><sup>T</sup> = <em>R(-θ)</em></li>
|
608 |
+
<li><em>R(θ<sub>1</sub>)R(θ<sub>2</sub>) = R(θ<sub>1</sub>+θ<sub>2</sub>)</li>
|
609 |
+
</ul>
|
610 |
+
|
611 |
+
<img src="assets/images/rotation.jpeg" alt="Rotation Matrix">
|
612 |
+
|
613 |
+
<p><strong>RoPE in 2D space</strong></p>
|
614 |
+
|
615 |
+
<p>Assuming <em>q</em> and <em>k</em> are 2D column vectors, we can show that:</p>
|
616 |
+
|
617 |
+
<d-math>
|
618 |
+
<R(θ_1)q, R(θ_2)k> = (R(θ_1)q)<sup>T</sup> (R(θ_2)k) = q<sup>T</sup>R(-θ_1)R(θ_2)k = q<sup>T</sup>R(θ_2-θ_1)k = (R(θ_1-θ_2)q)<sup>T</sup>k = <R(θ_1-θ_2)q,k>
|
619 |
+
</d-math>
|
620 |
+
|
621 |
+
<p>Therefore, if we define our position embedding like this: <em>f(x, m) = R(mθ)x</em> where <em>R</em> is a 2D rotation matrix, we have <em>q_m = R(mθ)q</em> and <em>k_n = R(nθ)k</em> and then:</p>
|
622 |
+
|
623 |
+
<d-math>
|
624 |
+
<q_m, k_n> = <R(mθ)q, R(nθ)k> = <R((m-n)θ)q, k>
|
625 |
+
</d-math>
|
626 |
+
|
627 |
+
<p>We can see that a multiplication with a rotation matrix is exactly the positional encoding we were looking for. The result of <em>⟨ 𝑞_𝑚 , 𝑘_𝑛 ⟩</em> only depends on <em>q</em>, <em>k</em> and <em>m-n</em>.</p>
|
628 |
+
|
629 |
+
<p><strong>Implementation</strong></p>
|
630 |
+
|
631 |
+
<p>In our case, our internal vectors (the activations in our model) have much more than two elements. Let’s pair elements to get 2D vectors and apply the 2D rotation operation on these pairs.</p>
|
632 |
+
|
633 |
+
<p>There are combinatorially many ways we can pair elements but generally two options are the most popular for implementing RoPE: we call them the <em>interleaved</em> and <em>non-interleaved</em> versions. (It’s still rather unfortunate to have two popular options)</p>
|
634 |
+
|
635 |
+
<ol>
|
636 |
+
<li>In the interleaved version, we pair consecutive elements <em>(x<sub>0</sub>, x<sub>1</sub>),(x<sub>2</sub>,x<sub>3</sub>),…</em> before applying the rotation matrix:</li>
|
637 |
+
<d-math>
|
638 |
+
R<sup>d</sup>_{θ,m}x=\begin{pmatrix}
|
639 |
+
x_0 \\
|
640 |
+
x_1 \\
|
641 |
+
x_2 \\
|
642 |
+
x_3 \\
|
643 |
+
\vdots \\
|
644 |
+
x_{d-2} \\
|
645 |
+
x_{d-1}
|
646 |
+
\end{pmatrix}
|
647 |
+
\odot
|
648 |
+
\begin{pmatrix}
|
649 |
+
\cos mθ_0 \\
|
650 |
+
\cos mθ_0 \\
|
651 |
+
\cos mθ_1 \\
|
652 |
+
\cos mθ_1 \\
|
653 |
+
\vdots \\
|
654 |
+
\cos mθ_{d/2-1} \\
|
655 |
+
\cos mθ_{d/2-1}
|
656 |
+
\end{pmatrix}
|
657 |
+
+
|
658 |
+
\begin{pmatrix}
|
659 |
+
-x_1 \\
|
660 |
+
x_0 \\
|
661 |
+
-x_3 \\
|
662 |
+
x_2 \\
|
663 |
+
\vdots \\
|
664 |
+
-x_{d-1} \\
|
665 |
+
x_{d-2}
|
666 |
+
\end{pmatrix}
|
667 |
+
\odot
|
668 |
+
\begin{pmatrix}
|
669 |
+
\sin mθ_0 \\
|
670 |
+
\sin mθ_0 \\
|
671 |
+
\sin mθ_1 \\
|
672 |
+
\sin mθ_1 \\
|
673 |
+
\vdots \\
|
674 |
+
\sin mθ_{d/2-1} \\
|
675 |
+
\sin mθ_{d/2-1}
|
676 |
+
\end{pmatrix}
|
677 |
+
</d-math>
|
678 |
+
<d-math>
|
679 |
+
R<sup>d</sup>_{θ,m}x=\begin{pmatrix}
|
680 |
+
x_0\cos mθ_0 - x_1\sin mθ_0 \\
|
681 |
+
x_1\cos mθ_0 + x_0\sin mθ_0 \\
|
682 |
+
x_2\cos mθ_1 - x_3\sin mθ_1 \\
|
683 |
+
x_3\cos mθ_1 + x_2\sin mθ_1 \\
|
684 |
+
\vdots \\
|
685 |
+
x_{d-2}\cos mθ_{d/2-1} - x_{d-1}\sin mθ_{d/2-1} \\
|
686 |
+
x_{d-1}\cos mθ_{d/2-1} + x_{d-2}\sin mθ_{d/2-1}
|
687 |
+
\end{pmatrix}
|
688 |
+
</d-math>
|
689 |
+
<li>In the non-interleaved version, we split the vector in two to pair elements as follows: <em>(x<sub>0</sub>, x<sub>d/2</sub>),(x<sub>1</sub>,x<sub>d/2+1</sub>),…</em> This is the implementation used in the <code>transformers</code> library:</li>
|
690 |
+
<d-math>
|
691 |
+
R<sup>d</sup>_{θ,m}x=\begin{pmatrix}
|
692 |
+
x_0 \\
|
693 |
+
x_1 \\
|
694 |
+
\vdots \\
|
695 |
+
x_{d/2-1} \\
|
696 |
+
x_{d/2} \\
|
697 |
+
x_{d/2+1} \\
|
698 |
+
\vdots \\
|
699 |
+
x_{d-1}
|
700 |
+
\end{pmatrix}
|
701 |
+
\odot
|
702 |
+
\begin{pmatrix}
|
703 |
+
\cos mθ_0 \\
|
704 |
+
\cos mθ_1 \\
|
705 |
+
\vdots \\
|
706 |
+
\cos mθ_{d/2-1} \\
|
707 |
+
\cos mθ_{0} \\
|
708 |
+
\cos mθ_{1} \\
|
709 |
+
\vdots \\
|
710 |
+
\cos mθ_{d/2-1}
|
711 |
+
\end{pmatrix}
|
712 |
+
+
|
713 |
+
\begin{pmatrix}
|
714 |
+
-x_{d/2} \\
|
715 |
+
-x_{d/2+1} \\
|
716 |
+
\vdots \\
|
717 |
+
-x_{d-1} \\
|
718 |
+
x_{0} \\
|
719 |
+
x_{1} \\
|
720 |
+
\vdots \\
|
721 |
+
x_{d/2-1}
|
722 |
+
\end{pmatrix}
|
723 |
+
\odot
|
724 |
+
\begin{pmatrix}
|
725 |
+
\sin mθ_0 \\
|
726 |
+
\sin mθ_1 \\
|
727 |
+
\vdots \\
|
728 |
+
\sin mθ_{d/2-1} \\
|
729 |
+
\sin mθ_{0} \\
|
730 |
+
\sin mθ_{1} \\
|
731 |
+
\vdots \\
|
732 |
+
\sin mθ_{d/2-1}
|
733 |
+
\end{pmatrix}
|
734 |
+
</d-math>
|
735 |
+
<d-math>
|
736 |
+
R<sup>d</sup>_{θ,m}x=\begin{pmatrix}
|
737 |
+
x_0\cos mθ_0 - x_{d/2}\sin mθ_0 \\
|
738 |
+
x_1\cos mθ_1 - x_{d/2+1}\sin mθ_1 \\
|
739 |
+
\vdots \\
|
740 |
+
x_{d/2-1}\cos mθ_{d/2-1} - x_{d-1}\sin mθ_{d/2-1} \\
|
741 |
+
x_{d/2}\cos mθ_0 + x_0\sin mθ_0 \\
|
742 |
+
x_{d/2+1}\cos mθ_1 + x_0\sin mθ_1 \\
|
743 |
+
\vdots \\
|
744 |
+
x_{d-1}\cos mθ_{d/2-1} + x_{d-1}\sin mθ_{d/2-1} \\
|
745 |
+
\end{pmatrix}
|
746 |
+
</d-math>
|
747 |
+
<p>The angle of rotation, <em>θ<sub>i</sub></em> is defined as follows, where <em>d</em> is the dimension of the attention head:</p>
|
748 |
+
<d-math>
|
749 |
+
θ<sub>i</sub> = base<sup>-2(i-1)/d</sup>, i∈ [1,2,...,d/2]
|
750 |
+
</d-math>
|
751 |
+
<p>How does this look? When moving the same distance, vectors in some dimensions rotate faster than vectors in other dimensions.</p>
|
752 |
+
<img src="assets/images/rotation_speed.jpeg" alt="Rotation Speed">
|
753 |
+
</ol>
|
754 |
+
|
755 |
+
<h3>Attention (MHA, MQA, GQA)</h3>
|
756 |
+
|
757 |
+
<h2>Optimized Operations</h2>
|
758 |
+
|
759 |
+
<h3>Flash Attention 1&2&3</h3>
|
760 |
+
|
761 |
+
<h3>Fused Kernels</h3>
|
762 |
+
|
763 |
+
<h2>III – Training Recipe</h2>
|
764 |
+
|
765 |
+
<h3>Batch Size</h3>
|
766 |
+
|
767 |
+
<h3>Initialization + rescaling activations inside the model</h3>
|
768 |
+
|
769 |
+
<h3>Numerical Precision</h3>
|
770 |
+
|
771 |
+
<h4>FP16/BF16/FP8</h4>
|
772 |
+
|
773 |
+
<p>@Phuc Nguyen?</p>
|
774 |
+
|
775 |
+
<h3>Long Context Training</h3>
|
776 |
+
|
777 |
+
<h3>Evaluation</h3>
|
778 |
+
|
779 |
+
<p>@Haojun Zhao</p>
|
780 |
+
|
781 |
+
<h3>Infini-Attention</h3>
|
782 |
+
|
783 |
+
<p>@Phuc Nguyen</p>
|
784 |
+
|
785 |
+
<h3>Ring Attention</h3>
|
786 |
+
|
787 |
+
<p>@Haojun Zhao</p>
|
788 |
+
|
789 |
+
<h3>RoPE scaling / Yarn</h3>
|
790 |
+
|
791 |
+
<p>@Haojun Zhao maybe?</p>
|
792 |
+
|
793 |
+
<h2>References</h2>
|
794 |
+
|
795 |
+
<ul>
|
796 |
+
<li>Harm’s posts:
|
797 |
+
<ul>
|
798 |
+
<li><a href="https://www.harmdevries.com/post/context-length/">https://www.harmdevries.com/post/context-length/</a></li>
|
799 |
+
<li><a href="https://www.harmdevries.com/post/model-size-vs-compute-overhead/">https://www.harmdevries.com/post/model-size-vs-compute-overhead/</a></li>
|
800 |
+
</ul>
|
801 |
+
</li>
|
802 |
+
<li>Stas’ guides:
|
803 |
+
<ul>
|
804 |
+
<li><a href="https://github.com/stas00/ml-engineering">https://github.com/stas00/ml-engineering</a></li>
|
805 |
+
<li><a href="https://github.com/bigscience-workshop/bigscience/blob/master/train/tr11-176B-ml/chronicles.md">https://github.com/bigscience-workshop/bigscience/blob/master/train/tr11-176B-ml/chronicles.md</a></li>
|
806 |
+
</ul>
|
807 |
+
</li>
|
808 |
+
<li>data parallel: <a href="https://siboehm.com/articles/22/data-parallel-training">https://siboehm.com/articles/22/data-parallel-training</a></li>
|
809 |
+
<li>ZeRO: <a href="https://arxiv.org/abs/1910.02054">https://arxiv.org/abs/1910.02054</a></li>
|
810 |
+
<li>TP/SP + Selective Recomputation: <a href="https://arxiv.org/abs/2205.05198">https://arxiv.org/abs/2205.05198</a></li>
|
811 |
+
</ul>
|
812 |
+
<h2>Conclusion and looking forward</h2>
|
813 |
+
<p>Through our open science efforts we hope to keep shining a light on the black box that is the training of high performance large language models as well as to give every model trainer the ability to create state-of-the-art LLMs. We are excited to continue iterating on FineWeb and to release increasingly better filtered subsets of web data, in a fully open and reproducible manner.</p>
|
814 |
+
<p>In the short term, we are looking forward to applying the learnings from (English) FineWeb to other languages. While English currently dominates the LLM landscape, we believe that making high quality web data in other languages as accessible as possible would be incredibly impactful.</p>
|
815 |
+
<p>In a nutshell: the future is bright and exciting for studying the science of creating datasets at scale and in the open 🤗.</p>
|
816 |
+
</d-article>
|
817 |
+
|
818 |
+
<d-appendix>
|
819 |
+
<d-bibliography src="bibliography.bib"></d-bibliography>
|
820 |
+
<style>
|
821 |
+
d-appendix .citation {
|
822 |
+
font-size: 11px;
|
823 |
+
line-height: 15px;
|
824 |
+
border-left: 1px solid rgba(0, 0, 0, 0.1);
|
825 |
+
padding-left: 18px;
|
826 |
+
border: 1px solid rgba(0,0,0,0.1);
|
827 |
+
background: rgba(0, 0, 0, 0.02);
|
828 |
+
padding: 10px 18px;
|
829 |
+
border-radius: 3px;
|
830 |
+
color: rgba(150, 150, 150, 1);
|
831 |
+
overflow: hidden;
|
832 |
+
margin-top: -12px;
|
833 |
+
white-space: pre-wrap;
|
834 |
+
word-wrap: break-word;
|
835 |
+
}
|
836 |
+
</style>
|
837 |
+
|
838 |
+
<h3 id="citation">Citation</h3>
|
839 |
+
<p>For attribution in academic contexts, please cite this work as</p>
|
840 |
+
<pre class="citation short">Penedo, et al., "The FineWeb Datasets: Decanting the Web for the Finest Text Data at Scale", 2024.</pre>
|
841 |
+
<p>BibTeX citation</p>
|
842 |
+
<pre class="citation long">@misc{penedo2024finewebdatasetsdecantingweb,
|
843 |
+
title={The FineWeb Datasets: Decanting the Web for the Finest Text Data at Scale},
|
844 |
+
author={Guilherme Penedo and Hynek Kydlíček and Loubna Ben allal and Anton Lozhkov and Margaret Mitchell and Colin Raffel and Leandro Von Werra and Thomas Wolf},
|
845 |
+
year={2024},
|
846 |
+
eprint={2406.17557},
|
847 |
+
archivePrefix={arXiv},
|
848 |
+
primaryClass={cs.CL}
|
849 |
+
url={https://arxiv.org/abs/2406.17557},
|
850 |
+
}</pre>
|
851 |
+
</d-appendix>
|
852 |
+
|
853 |
+
<script>
|
854 |
+
const article = document.querySelector('d-article');
|
855 |
+
const toc = document.querySelector('d-contents');
|
856 |
+
if (toc) {
|
857 |
+
const headings = article.querySelectorAll('h2, h3, h4');
|
858 |
+
let ToC = `<nav role="navigation" class="l-text figcaption"><h3>Table of contents</h3>`;
|
859 |
+
let prevLevel = 0;
|
860 |
+
|
861 |
+
for (const el of headings) {
|
862 |
+
// should element be included in TOC?
|
863 |
+
const isInTitle = el.parentElement.tagName == 'D-TITLE';
|
864 |
+
const isException = el.getAttribute('no-toc');
|
865 |
+
if (isInTitle || isException) continue;
|
866 |
+
el.setAttribute('id', el.textContent.toLowerCase().replaceAll(" ", "_"))
|
867 |
+
const link = '<a target="_self" href="' + '#' + el.getAttribute('id') + '">' + el.textContent + '</a>';
|
868 |
+
|
869 |
+
const level = el.tagName === 'H2' ? 0 : (el.tagName === 'H3' ? 1 : 2);
|
870 |
+
while (prevLevel < level) {
|
871 |
+
ToC += '<ul>'
|
872 |
+
prevLevel++;
|
873 |
+
}
|
874 |
+
while (prevLevel > level) {
|
875 |
+
ToC += '</ul>'
|
876 |
+
prevLevel--;
|
877 |
+
}
|
878 |
+
if (level === 0)
|
879 |
+
ToC += '<div>' + link + '</div>';
|
880 |
+
else
|
881 |
+
ToC += '<li>' + link + '</li>';
|
882 |
+
}
|
883 |
+
|
884 |
+
while (prevLevel > 0) {
|
885 |
+
ToC += '</ul>'
|
886 |
+
prevLevel--;
|
887 |
+
}
|
888 |
+
ToC += '</nav>';
|
889 |
+
toc.innerHTML = ToC;
|
890 |
+
toc.setAttribute('prerendered', 'true');
|
891 |
+
const toc_links = document.querySelectorAll('d-contents > nav a');
|
892 |
+
|
893 |
+
window.addEventListener('scroll', (_event) => {
|
894 |
+
if (typeof (headings) != 'undefined' && headings != null && typeof (toc_links) != 'undefined' && toc_links != null) {
|
895 |
+
// Then iterate forwards, on the first match highlight it and break
|
896 |
+
find_active: {
|
897 |
+
for (let i = headings.length - 1; i >= 0; i--) {
|
898 |
+
if (headings[i].getBoundingClientRect().top - 50 <= 0) {
|
899 |
+
if (!toc_links[i].classList.contains("active")) {
|
900 |
+
toc_links.forEach((link, _index) => {
|
901 |
+
link.classList.remove("active");
|
902 |
+
});
|
903 |
+
toc_links[i].classList.add('active');
|
904 |
+
}
|
905 |
+
break find_active;
|
906 |
+
}
|
907 |
+
}
|
908 |
+
toc_links.forEach((link, _index) => {
|
909 |
+
link.classList.remove("active");
|
910 |
+
});
|
911 |
+
}
|
912 |
+
}
|
913 |
+
});
|
914 |
+
}
|
915 |
+
</script>
|
916 |
+
</body>
|
917 |
+
</html>
|
src/index.js
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// import { plotClusters } from './clusters'
|
2 |
+
import { init_ablation_plot } from './plotting'
|
3 |
+
import 'katex/dist/katex.min.css';
|
4 |
+
import katex from 'katex';
|
5 |
+
|
6 |
+
document.addEventListener("DOMContentLoaded", () => {
|
7 |
+
console.log("DOMContentLoaded");
|
8 |
+
// plotClusters();
|
9 |
+
init_ablation_plot();
|
10 |
+
}, { once: true });
|
src/plotting.js
ADDED
@@ -0,0 +1,383 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import { getColor, COLORS } from "./colors.mjs"
|
2 |
+
import Plotly from "plotly.js-basic-dist-min"
|
3 |
+
import _ from "lodash"
|
4 |
+
|
5 |
+
const DATA_FOLDER = "assets/data/plots"
|
6 |
+
|
7 |
+
|
8 |
+
const LINE_SETTINGS = {
|
9 |
+
width: 2.5,
|
10 |
+
type: "scatter",
|
11 |
+
mode: "lines",
|
12 |
+
}
|
13 |
+
const BAR_SETTINGS = {
|
14 |
+
width: 0.5,
|
15 |
+
type: "bar",
|
16 |
+
opacity: 0.9,
|
17 |
+
marker: {
|
18 |
+
line: {
|
19 |
+
width: 1.0
|
20 |
+
}
|
21 |
+
}
|
22 |
+
}
|
23 |
+
|
24 |
+
const METRIC_ID_TO_PRIORITY = {
|
25 |
+
"agg_score": 0,
|
26 |
+
"hellaswag/acc_norm": 1,
|
27 |
+
"arc/acc_norm": 2,
|
28 |
+
"mmlu/acc_norm": 3,
|
29 |
+
"openbookqa/acc_norm": 4,
|
30 |
+
"commonsense_qa/acc_norm": 5,
|
31 |
+
"piqa/acc_norm": 6,
|
32 |
+
"siqa/acc_norm": 7,
|
33 |
+
"winogrande/acc_norm": 8,
|
34 |
+
|
35 |
+
|
36 |
+
// Stats
|
37 |
+
"lines_ended_with_punct": 0,
|
38 |
+
"lines_chars": 1,
|
39 |
+
"short_lines": 2,
|
40 |
+
}
|
41 |
+
|
42 |
+
const TASK_ID_TO_NAME = {
|
43 |
+
// Ablations
|
44 |
+
agg_score: "Aggregate Score",
|
45 |
+
"commonsense_qa/acc_norm": "Commonsense QA",
|
46 |
+
"hellaswag/acc_norm": "HellaSwag",
|
47 |
+
"openbookqa/acc_norm": "OpenBook QA",
|
48 |
+
"piqa/acc_norm": "PIQA",
|
49 |
+
"siqa/acc_norm": "Social IQA",
|
50 |
+
"winogrande/acc_norm": "WinoGrande",
|
51 |
+
"arc/acc_norm": "ARC",
|
52 |
+
"mmlu/acc_norm": "MMLU",
|
53 |
+
|
54 |
+
// Stats
|
55 |
+
"lines_ended_with_punct": "Lines Ended With Punctuation",
|
56 |
+
"lines_chars": "Lines Chars",
|
57 |
+
"short_lines": "Short Lines",
|
58 |
+
};
|
59 |
+
|
60 |
+
const DATASET_ID_TO_NAME = {
|
61 |
+
pii_removed: "Fineweb",
|
62 |
+
allenai_c4_en: "C4",
|
63 |
+
"tiiuae_falcon-refinedweb_data": "RefinedWeb",
|
64 |
+
"red-pajama-v2_jsonl-deduplicated-extract": "RedPajamaV2",
|
65 |
+
"dolma-sample": "Dolma1.6",
|
66 |
+
dedup_minhash_independent_output: "Independent Dedup MinHash",
|
67 |
+
"dedup_minhash_CC-MAIN-2013-48_output": "Full MinHash CC-MAIN-2013-48",
|
68 |
+
"dedup_minhash_independent_output_CC-MAIN-2013-48": "Independent MinHash CC-MAIN-2013-48",
|
69 |
+
"ind_minhash-CC-MAIN-2019-18": "Independent MinHash CC-MAIN-2019-18",
|
70 |
+
"wet-extraction-2019-18": "WET Extraction 2019-18",
|
71 |
+
"dedup_minhash_CC-MAIN-2013-48_output": "Full MinHash CC-MAIN-2013-48",
|
72 |
+
"dedup_minhash_independent_output_CC-MAIN-2013-48": "Independent MinHash CC-MAIN-2013-48",
|
73 |
+
|
74 |
+
};
|
75 |
+
|
76 |
+
const DEFAULT_SETTINGS = {
|
77 |
+
slider: {
|
78 |
+
max: 30,
|
79 |
+
min: 0,
|
80 |
+
default: 0,
|
81 |
+
},
|
82 |
+
defaultMetric: "agg_score",
|
83 |
+
type: "line"
|
84 |
+
};
|
85 |
+
|
86 |
+
const DEFAULT_LAYOUT = {
|
87 |
+
font: {
|
88 |
+
family: "apple-system, Arial, sans-serif",
|
89 |
+
},
|
90 |
+
title: {
|
91 |
+
text: "Plot Title",
|
92 |
+
font: {
|
93 |
+
size: 19,
|
94 |
+
family: "apple-system, Arial, sans-serif",
|
95 |
+
},
|
96 |
+
},
|
97 |
+
xaxis: {
|
98 |
+
title: {
|
99 |
+
text: "Training tokens (billions)",
|
100 |
+
font: {
|
101 |
+
size: 15,
|
102 |
+
family: "apple-system, Arial, sans-serif",
|
103 |
+
},
|
104 |
+
},
|
105 |
+
tickfont: {
|
106 |
+
size: 14,
|
107 |
+
family: "apple-system, Arial, sans-serif",
|
108 |
+
},
|
109 |
+
showgrid: false,
|
110 |
+
mirror: true,
|
111 |
+
ticks: "outside",
|
112 |
+
showline: true,
|
113 |
+
},
|
114 |
+
yaxis: {
|
115 |
+
title: {
|
116 |
+
text: "Agg Score",
|
117 |
+
font: {
|
118 |
+
size: 15,
|
119 |
+
family: "apple-system, Arial, sans-serif",
|
120 |
+
},
|
121 |
+
standoff: 10,
|
122 |
+
},
|
123 |
+
showgrid: false,
|
124 |
+
mirror: true,
|
125 |
+
ticks: "outside",
|
126 |
+
showline: true,
|
127 |
+
tickfont: {
|
128 |
+
size: 14,
|
129 |
+
family: "apple-system, Arial, sans-serif",
|
130 |
+
},
|
131 |
+
},
|
132 |
+
yaxis2: {
|
133 |
+
title: {
|
134 |
+
text: "Words Contamination",
|
135 |
+
font: {
|
136 |
+
size: 15,
|
137 |
+
family: "apple-system, Arial, sans-serif",
|
138 |
+
},
|
139 |
+
standoff: 10,
|
140 |
+
},
|
141 |
+
tickfont: {
|
142 |
+
size: 14,
|
143 |
+
family: "apple-system, Arial, sans-serif",
|
144 |
+
},
|
145 |
+
showgrid: false,
|
146 |
+
ticks: "outside",
|
147 |
+
},
|
148 |
+
legend: {
|
149 |
+
orientation: "v",
|
150 |
+
xanchor: "right",
|
151 |
+
yanchor: "bottom",
|
152 |
+
x: 1,
|
153 |
+
y: 0,
|
154 |
+
font: {
|
155 |
+
size: 14,
|
156 |
+
family: "apple-system, Arial, sans-serif",
|
157 |
+
},
|
158 |
+
bgcolor: "rgba(0,0,0,0)",
|
159 |
+
},
|
160 |
+
margin: {
|
161 |
+
t: 30,
|
162 |
+
b: 50,
|
163 |
+
},
|
164 |
+
height: 400,
|
165 |
+
};
|
166 |
+
|
167 |
+
const getAutoRange = (traces) => {
|
168 |
+
let minX = Math.min(...traces.flatMap((trace) => trace.x));
|
169 |
+
let maxX = Math.max(...traces.flatMap((trace) => trace.x));
|
170 |
+
return [minX * 0.95, maxX * 1.05];
|
171 |
+
};
|
172 |
+
|
173 |
+
const getColorForTrace = (traceName, colorsMapping, index) => {
|
174 |
+
// First check if the color already exists in colorsMaping and if so return it
|
175 |
+
const reusedColor = colorsMapping.get(traceName)
|
176 |
+
if (reusedColor) {
|
177 |
+
return reusedColor
|
178 |
+
}
|
179 |
+
|
180 |
+
let color = getColor(index)
|
181 |
+
while (colorsMapping.has(color) && index < COLORS.length) {
|
182 |
+
index += 1
|
183 |
+
color = getColor(index)
|
184 |
+
}
|
185 |
+
colorsMapping.set(traceName, color)
|
186 |
+
return color
|
187 |
+
}
|
188 |
+
|
189 |
+
|
190 |
+
const createAblationPlottingElements = (
|
191 |
+
plotElement,
|
192 |
+
indexMapping,
|
193 |
+
settings
|
194 |
+
) => {
|
195 |
+
const plot = document.createElement("figure");
|
196 |
+
const controls = document.createElement("div");
|
197 |
+
plot.classList.add("plotly");
|
198 |
+
controls.classList.add("plotly_controls");
|
199 |
+
plotElement.appendChild(plot);
|
200 |
+
plotElement.appendChild(controls);
|
201 |
+
|
202 |
+
const metricOptions = Object.keys(indexMapping).filter(
|
203 |
+
(metric) => metric in TASK_ID_TO_NAME
|
204 |
+
);
|
205 |
+
// Dropdown
|
206 |
+
let dropdown = undefined
|
207 |
+
if (metricOptions.length > 1) {
|
208 |
+
const dropdownLabel = document.createElement("label");
|
209 |
+
dropdownLabel.textContent = "Metric:";
|
210 |
+
dropdown = document.createElement("select");
|
211 |
+
dropdown.innerHTML = metricOptions
|
212 |
+
.sort((a, b) => (METRIC_ID_TO_PRIORITY[a] ?? 0) - (METRIC_ID_TO_PRIORITY[b] ?? 0))
|
213 |
+
.map(
|
214 |
+
(option) =>
|
215 |
+
`<option value="${option}">${TASK_ID_TO_NAME[option]}</option>`
|
216 |
+
)
|
217 |
+
.join("");
|
218 |
+
dropdown.value = settings.defaultMetric;
|
219 |
+
|
220 |
+
const dropdownContainer = document.createElement("div");
|
221 |
+
dropdownContainer.classList.add("plotly_input_container");
|
222 |
+
dropdownContainer.appendChild(dropdownLabel);
|
223 |
+
dropdownContainer.appendChild(dropdown);
|
224 |
+
controls.appendChild(dropdownContainer);
|
225 |
+
}
|
226 |
+
|
227 |
+
let slider = undefined;
|
228 |
+
if (settings.slider !== null) {
|
229 |
+
const sliderLabel = document.createElement("label");
|
230 |
+
sliderLabel.textContent = "Rolling window:";
|
231 |
+
slider = document.createElement("input");
|
232 |
+
slider.type = "range";
|
233 |
+
slider.min = settings.slider.min;
|
234 |
+
slider.max = settings.slider.max;
|
235 |
+
slider.value = settings.slider.default;
|
236 |
+
|
237 |
+
// current value
|
238 |
+
const sliderValue = document.createElement("span");
|
239 |
+
sliderValue.textContent = slider.value;
|
240 |
+
slider.addEventListener("input", () => {
|
241 |
+
sliderValue.textContent = slider.value;
|
242 |
+
});
|
243 |
+
const sliderInputContainer = document.createElement("div");
|
244 |
+
sliderInputContainer.classList.add("plotly_slider");
|
245 |
+
sliderInputContainer.appendChild(slider);
|
246 |
+
sliderInputContainer.appendChild(sliderValue);
|
247 |
+
|
248 |
+
const sliderContainer = document.createElement("div");
|
249 |
+
sliderContainer.classList.add("plotly_input_container");
|
250 |
+
|
251 |
+
sliderContainer.appendChild(sliderLabel);
|
252 |
+
sliderContainer.appendChild(sliderInputContainer);
|
253 |
+
controls.appendChild(sliderContainer);
|
254 |
+
}
|
255 |
+
let caption = undefined
|
256 |
+
if (settings.caption) {
|
257 |
+
caption = document.createElement("figcaption");
|
258 |
+
caption.classList.add("plotly_caption");
|
259 |
+
caption.textContent = settings.caption;
|
260 |
+
plotElement.appendChild(caption);
|
261 |
+
}
|
262 |
+
|
263 |
+
return { dropdown, slider, plot, caption };
|
264 |
+
};
|
265 |
+
|
266 |
+
const rollingWindow = function (data, windowSize) {
|
267 |
+
if (windowSize === 0) {
|
268 |
+
return data;
|
269 |
+
}
|
270 |
+
const rollingData = [];
|
271 |
+
|
272 |
+
// Start at halfWindowSize to ensure we can get a full window
|
273 |
+
for (let i = windowSize; i < data.length; i++) {
|
274 |
+
const windowStart = i - windowSize;
|
275 |
+
const windowEnd = i;
|
276 |
+
const windowData = data.slice(windowStart, windowEnd);
|
277 |
+
|
278 |
+
const windowAverage =
|
279 |
+
windowData.reduce((acc, value) => acc + value, 0) /
|
280 |
+
windowData.length;
|
281 |
+
rollingData.push(windowAverage);
|
282 |
+
}
|
283 |
+
|
284 |
+
return rollingData;
|
285 |
+
};
|
286 |
+
|
287 |
+
const createTraces = (data, settings, colorsMapping, sliderValue) => {
|
288 |
+
if (!data) {
|
289 |
+
return []
|
290 |
+
}
|
291 |
+
const res = Array.from(Object.entries(data)).map(([key, traceData], index) => {
|
292 |
+
const y = rollingWindow(traceData.y, sliderValue);
|
293 |
+
const x = traceData.x.slice(0, y.length);
|
294 |
+
const plotSettings = settings?.type === "bar" ? BAR_SETTINGS : LINE_SETTINGS;
|
295 |
+
const traceColor = traceData.color ?? getColorForTrace(key, colorsMapping, index)
|
296 |
+
const trace = _.merge({}, {
|
297 |
+
x: x,
|
298 |
+
y: y,
|
299 |
+
name: traceData.label ?? DATASET_ID_TO_NAME[key] ?? key,
|
300 |
+
marker: {
|
301 |
+
color: traceColor,
|
302 |
+
},
|
303 |
+
line: {
|
304 |
+
color: traceColor,
|
305 |
+
},
|
306 |
+
yaxis: traceData.yaxis ?? "y1"
|
307 |
+
}, plotSettings);
|
308 |
+
return trace
|
309 |
+
});
|
310 |
+
return res
|
311 |
+
}
|
312 |
+
|
313 |
+
export const init_ablation_plot = function () {
|
314 |
+
const plotElements = document.querySelectorAll('[id^="plot-"]');
|
315 |
+
plotElements.forEach(async (plotElement) => {
|
316 |
+
const plotName = plotElement.id.replace("plot-", "");
|
317 |
+
const indexData = await fetch(`${DATA_FOLDER}/${plotName}/index.json`).then(
|
318 |
+
(response) => response.json()
|
319 |
+
);
|
320 |
+
const settings = _.merge({}, DEFAULT_SETTINGS, indexData.settings);
|
321 |
+
const indexMapping = indexData.files;
|
322 |
+
const { dropdown, slider, plot } = createAblationPlottingElements(
|
323 |
+
plotElement,
|
324 |
+
indexMapping,
|
325 |
+
settings
|
326 |
+
);
|
327 |
+
plot.id = `graph-${plotName}`;
|
328 |
+
if (dropdown !== undefined) {
|
329 |
+
dropdown.addEventListener("change", () => updatePlot(dropdown, slider));
|
330 |
+
}
|
331 |
+
let timeoutId;
|
332 |
+
// Debounce the slider
|
333 |
+
if (slider !== undefined) {
|
334 |
+
slider.addEventListener("input", () => {
|
335 |
+
clearTimeout(timeoutId);
|
336 |
+
timeoutId = setTimeout(() => {
|
337 |
+
updatePlot(dropdown, slider);
|
338 |
+
}, 500);
|
339 |
+
});
|
340 |
+
}
|
341 |
+
// Shared plot
|
342 |
+
Plotly.newPlot(plot, []);
|
343 |
+
|
344 |
+
// This is to ensure that the colors are consistent acrros different metrics
|
345 |
+
const colorsMapping = new Map()
|
346 |
+
|
347 |
+
async function updatePlot(dropdown, slider) {
|
348 |
+
const metricName = dropdown?.value ?? settings.defaultMetric;
|
349 |
+
const sliderValue = parseInt(slider?.value ?? 0);
|
350 |
+
const metricData = await fetch(
|
351 |
+
`${DATA_FOLDER}/${plotName}/${indexMapping[metricName]["file"]}`
|
352 |
+
).then((response) => response.json());
|
353 |
+
const traces = (metricData?.traces ?? []).concat(createTraces(metricData.data, settings, colorsMapping, sliderValue))
|
354 |
+
const width = plot.parentElement.offsetWidth;
|
355 |
+
const layout = _.merge(
|
356 |
+
{},
|
357 |
+
DEFAULT_LAYOUT,
|
358 |
+
{
|
359 |
+
width: width,
|
360 |
+
yaxis: { title: { text: TASK_ID_TO_NAME[metricName] } },
|
361 |
+
xaxis: {
|
362 |
+
range: null
|
363 |
+
},
|
364 |
+
},
|
365 |
+
metricData.layout
|
366 |
+
);
|
367 |
+
Plotly.react(plot, traces, layout);
|
368 |
+
|
369 |
+
window.addEventListener("resize", () => {
|
370 |
+
// If the window size is smaller than 768, we don't care as it's not shown
|
371 |
+
if (window.innerWidth < 768) {
|
372 |
+
return;
|
373 |
+
}
|
374 |
+
Plotly.relayout(plot, {
|
375 |
+
width: plot.parentElement.offsetWidth,
|
376 |
+
});
|
377 |
+
});
|
378 |
+
}
|
379 |
+
|
380 |
+
// Initial plot
|
381 |
+
updatePlot(dropdown, slider);
|
382 |
+
});
|
383 |
+
};
|
{dist → src}/style.css
RENAMED
File without changes
|
webpack.config.js
ADDED
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
const path = require("path");
|
2 |
+
const { CleanWebpackPlugin } = require("clean-webpack-plugin");
|
3 |
+
const CopyPlugin = require("copy-webpack-plugin");
|
4 |
+
const BundleAnalyzerPlugin = require("webpack-bundle-analyzer").BundleAnalyzerPlugin;
|
5 |
+
|
6 |
+
const COLOR_KEYS = ["color", "bgColor", "fillcolor"];
|
7 |
+
|
8 |
+
const transformDataColors = async (data, path) => {
|
9 |
+
const {getNamedColor} = await import('./src/colors.mjs');
|
10 |
+
// if not json file, return
|
11 |
+
if (!path.endsWith(".json")) {
|
12 |
+
return data;
|
13 |
+
}
|
14 |
+
const parsedData = JSON.parse(data);
|
15 |
+
// Change the color of the data
|
16 |
+
const deepIterateAndSetColor = (key, val) => {
|
17 |
+
if (val === null) {
|
18 |
+
return null;
|
19 |
+
}
|
20 |
+
if (val == undefined) {
|
21 |
+
return undefined;
|
22 |
+
}
|
23 |
+
if (Array.isArray(val)) {
|
24 |
+
return val.map(item => deepIterateAndSetColor(key, item));
|
25 |
+
}
|
26 |
+
if (typeof val === "object") {
|
27 |
+
return Object.entries(val).reduce((newObj, [key, value]) => {
|
28 |
+
newObj[key] = deepIterateAndSetColor(key, value);
|
29 |
+
return newObj;
|
30 |
+
}, {});
|
31 |
+
}
|
32 |
+
if (COLOR_KEYS.includes(key)) {
|
33 |
+
const [colorName, opacity, ...rest] = val.trim().split(/\s+/);
|
34 |
+
const floatOpacity = parseFloat(opacity);
|
35 |
+
const newColor = getNamedColor(colorName, floatOpacity);
|
36 |
+
if (newColor !== undefined && rest.length === 0 && !isNaN(floatOpacity)) {
|
37 |
+
console.log(`key: ${key} in file ${path} changed from ${val} to ${newColor}`);
|
38 |
+
return newColor;
|
39 |
+
} else {
|
40 |
+
return val;
|
41 |
+
}
|
42 |
+
}
|
43 |
+
return val;
|
44 |
+
};
|
45 |
+
return JSON.stringify(deepIterateAndSetColor(undefined, parsedData))
|
46 |
+
};
|
47 |
+
|
48 |
+
module.exports = {
|
49 |
+
entry: {
|
50 |
+
distill: "./src/distill.js",
|
51 |
+
main: "./src/index.js",
|
52 |
+
},
|
53 |
+
output: {
|
54 |
+
filename: "[name].bundle.js", // The output file
|
55 |
+
path: path.resolve(__dirname, "dist"), // Output directory
|
56 |
+
},
|
57 |
+
module: {
|
58 |
+
rules: [
|
59 |
+
{ test: /\.css$/, use: ["style-loader", "css-loader"] },
|
60 |
+
{
|
61 |
+
test: /\.(js|mjs)$/,
|
62 |
+
exclude: /node_modules/,
|
63 |
+
use: {
|
64 |
+
loader: "babel-loader",
|
65 |
+
options: {
|
66 |
+
presets: ["@babel/preset-env"],
|
67 |
+
},
|
68 |
+
},
|
69 |
+
},
|
70 |
+
],
|
71 |
+
},
|
72 |
+
plugins: [
|
73 |
+
new CleanWebpackPlugin(),
|
74 |
+
new CopyPlugin({
|
75 |
+
patterns: [
|
76 |
+
{
|
77 |
+
from: "assets",
|
78 |
+
to: "assets",
|
79 |
+
transform: transformDataColors,
|
80 |
+
},
|
81 |
+
{ from: "src/style.css", to: "style.css" },
|
82 |
+
{ from: "src/bibliography.bib", to: "bibliography.bib" },
|
83 |
+
{ from: "src/index.html", to: "index.html" },
|
84 |
+
],
|
85 |
+
}),
|
86 |
+
],
|
87 |
+
devtool: process.env.NODE_ENV === 'production' ? 'source-map' : 'eval-source-map',
|
88 |
+
devServer: {
|
89 |
+
static: "./dist", // Serve files from the 'dist' directory
|
90 |
+
open: process.env.NODE_ENV !== 'production', // Automatically open the browser unless in production
|
91 |
+
hot: process.env.NODE_ENV !== 'production', // Enable hot module replacement unless in production
|
92 |
+
},
|
93 |
+
mode: process.env.NODE_ENV === 'production' ? 'production' : 'development',
|
94 |
+
};
|
95 |
+
|
96 |
+
console.log(process.env.NODE_ENV)
|