diff --git a/.gitattributes b/.gitattributes index c1210df242f2e43d624451f3fbe47a0b26623e59..259276f104f9fd36d7d5d3f9dc20c03a7cb1c540 100644 --- a/.gitattributes +++ b/.gitattributes @@ -12334,3 +12334,40 @@ train/cc_en_head/cc_en_head_0047-tokenized-chunked-1024-512-128-backfill-nodups/ train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds filter=lfs diff=lfs merge=lfs -text train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds filter=lfs diff=lfs merge=lfs -text train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00035.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00029.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00030.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00036.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00032.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00031.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00034.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00026.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00011.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00033.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds filter=lfs diff=lfs merge=lfs -text +train/arxiv/arxiv_0052-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds filter=lfs diff=lfs merge=lfs -text +train/arxiv/arxiv_0052-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds filter=lfs diff=lfs merge=lfs -text +train/arxiv/arxiv_0052-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text +train/arxiv/arxiv_0052-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds filter=lfs diff=lfs merge=lfs -text +train/arxiv/arxiv_0052-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds filter=lfs diff=lfs merge=lfs -text +train/arxiv/arxiv_0052-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds filter=lfs diff=lfs merge=lfs -text +train/arxiv/arxiv_0052-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds filter=lfs diff=lfs merge=lfs -text +train/arxiv/arxiv_0052-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text +train/arxiv/arxiv_0052-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds filter=lfs diff=lfs merge=lfs -text +train/arxiv/arxiv_0052-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds filter=lfs diff=lfs merge=lfs -text diff --git a/train/arxiv/arxiv_0052-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds b/train/arxiv/arxiv_0052-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds new file mode 100644 index 0000000000000000000000000000000000000000..2bf34f51f9b3f74e56fbf0362a15b915e0db8ffd --- /dev/null +++ b/train/arxiv/arxiv_0052-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d47a533bb776aafe6edc98cf53da24a0d6f54366d2ecff0b8499173f1d426dd +size 67107628 diff --git a/train/arxiv/arxiv_0052-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds b/train/arxiv/arxiv_0052-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds new file mode 100644 index 0000000000000000000000000000000000000000..1301a8be653cd33eca6091fd9df7644613757f4a --- /dev/null +++ b/train/arxiv/arxiv_0052-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b150330f6f81c00ee478fa0028438420b3c6f73b4577d43ebaf0475cf0e0902 +size 67106789 diff --git a/train/arxiv/arxiv_0052-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds b/train/arxiv/arxiv_0052-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds new file mode 100644 index 0000000000000000000000000000000000000000..96d548cfe062807f4b66ef96f7244eb663190c55 --- /dev/null +++ b/train/arxiv/arxiv_0052-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ca992259e30e7f0b24678d6330d1ece2a6796f5a8d2969441f47e3b861d1d49 +size 67107992 diff --git a/train/arxiv/arxiv_0052-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds b/train/arxiv/arxiv_0052-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds new file mode 100644 index 0000000000000000000000000000000000000000..7d727740a456f72b5e7096de67bccd203f0e6294 --- /dev/null +++ b/train/arxiv/arxiv_0052-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ffa40cb4a7bfbd39a8900f72427314595ae1d6bde85e9e8e12e155aec8ee440c +size 67108750 diff --git a/train/arxiv/arxiv_0052-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds b/train/arxiv/arxiv_0052-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds new file mode 100644 index 0000000000000000000000000000000000000000..476cdef5f9bf92204c8566c4728426d0fd93ef9e --- /dev/null +++ b/train/arxiv/arxiv_0052-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e1fd7d79ec1fd593b4bbf4d01185fee101c2facc823cc734ca68aafecbbccdf1 +size 67107830 diff --git a/train/arxiv/arxiv_0052-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds b/train/arxiv/arxiv_0052-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds new file mode 100644 index 0000000000000000000000000000000000000000..fdb730e971d47cb4d0b9a84340608c9a99889e5e --- /dev/null +++ b/train/arxiv/arxiv_0052-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5ef9b7329a343ec666e073d0db3b584af80328f1b6bd59f8fc249f95cdf5882 +size 67107548 diff --git a/train/arxiv/arxiv_0052-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds b/train/arxiv/arxiv_0052-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds new file mode 100644 index 0000000000000000000000000000000000000000..0ea7a2d9493ffb85356b72fb6c99d740161e033d --- /dev/null +++ b/train/arxiv/arxiv_0052-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d9dead742d304e3e0a23a72cf1938570067d0c728f5aeda8f3bffff7806d0a4d +size 67108104 diff --git a/train/arxiv/arxiv_0052-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds b/train/arxiv/arxiv_0052-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds new file mode 100644 index 0000000000000000000000000000000000000000..915f53e031a9a03eaf21694984622993d50bc73c --- /dev/null +++ b/train/arxiv/arxiv_0052-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea0e11b77fa7b13a7d24a6b9fc168d9ffd2e7b60234989a451a490a988e8c043 +size 67107466 diff --git a/train/arxiv/arxiv_0052-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds b/train/arxiv/arxiv_0052-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds new file mode 100644 index 0000000000000000000000000000000000000000..602261a377d2616f37a7cadca455577e1c63b36c --- /dev/null +++ b/train/arxiv/arxiv_0052-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b2154e48600fce1688c33acb5ae7db3d586eece3b0b841912ccbe1eb12c06e5 +size 67106817 diff --git a/train/arxiv/arxiv_0052-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds b/train/arxiv/arxiv_0052-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds new file mode 100644 index 0000000000000000000000000000000000000000..788eb085277dac1c6f64a8d3c3ecb3f99e66704b --- /dev/null +++ b/train/arxiv/arxiv_0052-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fbf44e54160b9da2ce1f73d481822271b5b39130c37002499d433772d697b9d6 +size 2994601 diff --git a/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds new file mode 100644 index 0000000000000000000000000000000000000000..ca12a6500c8d58d0efe6d3b5840baba4637f3497 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae55af23a82fbea611a38fa2b01ee081dd9d59cbaa3a2295ab871c98be3825c8 +size 67107567 diff --git a/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds new file mode 100644 index 0000000000000000000000000000000000000000..0b52f8692f15ebc9b6abb66174e33f66dc214bfb --- /dev/null +++ b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc6354f4c8e67a800716fc800c13cf1b49cd189ca146d204f2ce79fba20eb27b +size 67108021 diff --git a/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds new file mode 100644 index 0000000000000000000000000000000000000000..f214aa1f3d522a0d62971fe2e2fc6a093b51936d --- /dev/null +++ b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8dafae7957f3d690426b69e181b3397e1bb06f4af4eb83488e9326c64d5f92cf +size 67108841 diff --git a/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds new file mode 100644 index 0000000000000000000000000000000000000000..4052952fa9225f5e8aeaadd422c6b5cbfd0e2d03 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56824004e7c89106e3153a5b542a6722d8399d3de85d40c58c77b5c45b2e074a +size 67107723 diff --git a/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds new file mode 100644 index 0000000000000000000000000000000000000000..eff814eea2fffdd7b2cabca896cbc3851fe258ab --- /dev/null +++ b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:005a5b9e883eb5744740eb0836cf3a9e1aab0904b6a807d1a613ecaf036601a1 +size 67107620 diff --git a/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds new file mode 100644 index 0000000000000000000000000000000000000000..25c3d3b4c1d5d9c1bd0620da93cf93d62f8fb3e9 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bcfe90c11e555b70cc5773d97bc71cb642a6cea8c233ef6681a3e578f027d2fb +size 67107580 diff --git a/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds new file mode 100644 index 0000000000000000000000000000000000000000..87a35a7f7426ba400f095b14bb33e1dcc390775f --- /dev/null +++ b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:16c8168087bd422e3429184ba9d2be68d85853ec69a0f85056b65adb28589351 +size 67108557 diff --git a/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00011.mds b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00011.mds new file mode 100644 index 0000000000000000000000000000000000000000..383f164859feee87a9460b6bbb908b653f003ed0 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00011.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c36506d6576748944a110fd50549e03f3120210238c9b8abea1813f422058ee +size 67108743 diff --git a/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds new file mode 100644 index 0000000000000000000000000000000000000000..bf66a7e473acfca1d999c2c547a4817223e20f98 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d5325916c5565b313fb8eebd4ac7979b0f8f08bd84feb2720f457407ed769f2f +size 67107552 diff --git a/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds new file mode 100644 index 0000000000000000000000000000000000000000..cd8968ce313bb21a2f43f3eefc2dc72cc49e4047 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:836c118de47b21fac524019ad0a3bd6d09ec9613a37d01624243d64b4d0d99f5 +size 67108160 diff --git a/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds new file mode 100644 index 0000000000000000000000000000000000000000..c54d2775c0030a6dca6a51a536b2aa271c9f0899 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:925d46cc83416118b0e36003f790a117a74ed9a32f06ee8c06f1d77b14c39750 +size 67108203 diff --git a/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds new file mode 100644 index 0000000000000000000000000000000000000000..679b3ebb6a6cff8a20789ab98609dcbc2836389e --- /dev/null +++ b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c3b56559cf4159429c839fd2c8922dcc24759dfc7fe93cdbbe00ea833089d189 +size 67108476 diff --git a/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds new file mode 100644 index 0000000000000000000000000000000000000000..b71fe4b080bac5d6416dc159538aadc017a0cc54 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d2660b4022d8ffe44d41253412a7e56227d1ff7628eed02661505465dfb01bb5 +size 67108799 diff --git a/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds new file mode 100644 index 0000000000000000000000000000000000000000..4fc4ec3a8f6a13994302c311ea7b4c6b2132d9fa --- /dev/null +++ b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:18d487ea5418ca01b2f830aca0686a14903fcb38cbd285e386ac75cc676bf029 +size 67107629 diff --git a/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds new file mode 100644 index 0000000000000000000000000000000000000000..1287470fc9a9cad34c455b91e675b976c866b31e --- /dev/null +++ b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c73f3f1d6b0510ec627fc9a7ca3492f44c4bc27ad0c772ef33378c8de4a6a915 +size 67108733 diff --git a/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds new file mode 100644 index 0000000000000000000000000000000000000000..a261e8b4f09742ca10242d14921c914935547af9 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c327f89040d02cc88b20f9cf794e35f7e516d773c5dbc3504d1a9c1ea6f3042c +size 67108526 diff --git a/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds new file mode 100644 index 0000000000000000000000000000000000000000..23ed3913ac08de5187691f537887fe7ffaa72422 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de31bb531ef029f6c5195fabd9343b81e7da4e32011532b5e0e79cbdf7b7abef +size 67108488 diff --git a/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds new file mode 100644 index 0000000000000000000000000000000000000000..0120d9c970243e9ce2cded4354345a2b966a6f11 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a6fec6083148db166b3631428197f4b0c101ba3b6ba3a734c200756b59de451a +size 67108752 diff --git a/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00026.mds b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00026.mds new file mode 100644 index 0000000000000000000000000000000000000000..30922beb2ab94929a0d32a558998ef8e92340d62 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00026.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:12de27198818a1afae4490fc25f4d646a8b260be49774807623485f9d2bd60b8 +size 67108395 diff --git a/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00029.mds b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00029.mds new file mode 100644 index 0000000000000000000000000000000000000000..5b9ab885b1f61590ce4a88b1dcc3c37fc3843b04 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00029.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ceeb2fcf67b9e1b2a1deea722d6277556e2d26b6cf017ba3a9e8006dd5bbc11d +size 67108364 diff --git a/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00030.mds b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00030.mds new file mode 100644 index 0000000000000000000000000000000000000000..1d588d05da48b3b6ce1fc1d780dcf062a89747a3 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00030.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a0e6332351f9b4cd1fc90f1e22a59d1f4999db0d0adb3a149b3124771a012724 +size 67108567 diff --git a/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00031.mds b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00031.mds new file mode 100644 index 0000000000000000000000000000000000000000..493ead335e43e64424087dfa8b92b36b6563c0ce --- /dev/null +++ b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00031.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:721674f06640b2aef2ceec7a072eb86538573b403196e14248ea685b0c61ec53 +size 67106846 diff --git a/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00032.mds b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00032.mds new file mode 100644 index 0000000000000000000000000000000000000000..a714c945290779d768018f49229ea3b915d265f6 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00032.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d5c45b53957240f0c352b38a98caddaca5c2cc2b03c771c0f4f0c7c746dbd88 +size 67108316 diff --git a/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00033.mds b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00033.mds new file mode 100644 index 0000000000000000000000000000000000000000..ebee88b122c289a069adeda0cc2072a805f0aae6 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00033.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0acd075d6abb9c557abbad61626a3413fc10c439d5be9e7b73251c0a85fae64b +size 67108434 diff --git a/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00034.mds b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00034.mds new file mode 100644 index 0000000000000000000000000000000000000000..8a2f3d7848ff9187da34be6b5d123b73bec182fd --- /dev/null +++ b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00034.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3dfd79c5de11fbd9509f1544010b99efba55898ea8070f761cbc0deec13f19d9 +size 67108523 diff --git a/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00035.mds b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00035.mds new file mode 100644 index 0000000000000000000000000000000000000000..886300b89a46032dc4e3b037419ced638ea75125 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00035.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e4062688bf7c7b760beaf131568b3af30b1e1ddf48c7dcf28273f7168733cdc0 +size 67108521 diff --git a/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00036.mds b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00036.mds new file mode 100644 index 0000000000000000000000000000000000000000..8f0dbdc497f4a3c89c32edac2c8cd6f444c52b2d --- /dev/null +++ b/train/cc_en_head/cc_en_head_0095-tokenized-chunked-1024-512-128-backfill-nodups/shard.00036.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a02ffee2e77a7b110b531496bd2f324a50833139e72c0c10084f077e0a1e7f4b +size 52101120 diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_17403-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_17403-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..e9de27cd5cf4fd4ee8bff9c9d14162f5c8f1313e --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_17403-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67106922, "hashes": {}}, "samples": 44188, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47560976, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10222742, "hashes": {}}, "samples": 6706, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7218041, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_17403-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_17403-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..aa6a93974bccea9ab5927c63ab425f8220b4f10d --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_17403-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37089729, + "num_truncated_tokens": 37061851 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_17669-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_17669-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..a56d6c0d1259e7e3e920e97747bdd3aca5a4121c --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_17669-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107691, "hashes": {}}, "samples": 42755, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47528311, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 20128144, "hashes": {}}, "samples": 12854, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 14246086, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_17669-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_17669-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..58bd0baca0034dca2428eb33363d439c2963fb75 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_17669-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 41896574, + "num_truncated_tokens": 41861100 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_19311-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_19311-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..0e2b9857327d25ac843e9999887a3f4b10e89c3c --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_19311-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108296, "hashes": {}}, "samples": 44360, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47566810, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 8876585, "hashes": {}}, "samples": 5827, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6303046, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_19311-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_19311-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..f8ed2ba3d1d36bc498d9ad4408481d26411ed654 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_19311-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 36439114, + "num_truncated_tokens": 36412907 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22439-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22439-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..5ceb0efb8bbbaabb9a41417441765bab149ec7ff --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22439-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107129, "hashes": {}}, "samples": 44194, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47584212, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10425353, "hashes": {}}, "samples": 6790, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7354388, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22439-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22439-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..e09e26ee51e641fef0d7d7620a4bd04b53ed626a --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22439-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37188273, + "num_truncated_tokens": 37160477 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22443-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22443-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..a95e0938ad48badb9f78438964b949856de37638 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22443-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107943, "hashes": {}}, "samples": 43736, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47765820, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12336874, "hashes": {}}, "samples": 8087, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8766810, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22443-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22443-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..07b9dcb95e7b37d248ae95ffd06c124bea9c5846 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22443-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38118390, + "num_truncated_tokens": 38088575 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_26575-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_26575-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..7c940b7e9f46ccd4db6e6414ec6ced94dc730bc5 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_26575-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37196489, + "num_truncated_tokens": 37167869 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28912-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28912-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..7a18a834b1bab555473c2e1b11ed33f1a9d9783f --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28912-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67106782, "hashes": {}}, "samples": 43128, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47633107, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17461359, "hashes": {}}, "samples": 11226, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12368042, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28912-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28912-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..385366e9c25417e647dedfc5af99df127397f40e --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28912-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 40601613, + "num_truncated_tokens": 40568184 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_29664-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_29664-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..287e8ea8ff056d8efd0f86ee2ebfdd1d09084eae --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_29664-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108374, "hashes": {}}, "samples": 44343, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48010846, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9520723, "hashes": {}}, "samples": 6403, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6807734, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_29664-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_29664-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..b9e1da2bb586f5d92f72f07507e1b73c8b8add19 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_29664-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 36744072, + "num_truncated_tokens": 36715324 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_31505-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_31505-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..a404a24c3c6491beffb65ed881a21dfbc485259a --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_31505-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108440, "hashes": {}}, "samples": 43808, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47486903, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 14656423, "hashes": {}}, "samples": 9463, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10440283, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_31505-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_31505-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..749ab4e343f6154b7def28b891bb45beb52848ec --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_31505-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 39233814, + "num_truncated_tokens": 39201503 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_35742-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_35742-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..849c5cf06878f10d3fe5019381db5307e9e2bf1a --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_35742-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107731, "hashes": {}}, "samples": 42420, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47799499, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 24324043, "hashes": {}}, "samples": 15228, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 17350546, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_35742-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_35742-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..e9166f5bb78222d04fc2e3459ba1c9864ad8a995 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_35742-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 43931333, + "num_truncated_tokens": 43891621 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_3645-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_3645-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..849182a75f51ad7c56e3ceda26cb639e1c544cff --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_3645-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108653, "hashes": {}}, "samples": 43935, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47562185, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11557019, "hashes": {}}, "samples": 7586, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8204480, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_3645-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_3645-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..ca333a1ed5984e949aadbf26ae66ee290fecf571 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_3645-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37738206, + "num_truncated_tokens": 37709396 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_37017-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_37017-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..65cbfac05fbdf0a6f4f77d6fdc1f98c8d83fe2a3 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_37017-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107694, "hashes": {}}, "samples": 44311, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47799314, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11098539, "hashes": {}}, "samples": 7222, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7874169, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_37017-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_37017-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..6e235d370258dbdbcf788f6bd63f1130eea9ad6c --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_37017-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37508326, + "num_truncated_tokens": 37478552 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_40467-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_40467-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..129c01a9879d0014885949f2c88521f7f674b01d --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_40467-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108493, "hashes": {}}, "samples": 44259, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47649654, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9652841, "hashes": {}}, "samples": 6391, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6890347, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_40467-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_40467-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..c6c3da3eb43f321aef31358ec250a993324b0447 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_40467-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 36813142, + "num_truncated_tokens": 36784849 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_4126-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_4126-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..722f6af0e7f6d90cdd6461deb2ae2460983d8bf0 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_4126-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108254, "hashes": {}}, "samples": 42876, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48027134, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 18924220, "hashes": {}}, "samples": 12162, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13481754, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_4126-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_4126-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..26c797d3dd4c000e7662d2a62da68385d9358425 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_4126-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 41312497, + "num_truncated_tokens": 41277309 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_41444-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_41444-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..01b0e78977f71dfad9fb8d14228246cfdf4fa179 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_41444-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108458, "hashes": {}}, "samples": 44107, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47801502, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11710836, "hashes": {}}, "samples": 7788, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8345356, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_41444-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_41444-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..806ffb53babcd5c2989cf1779576c3ca51c20059 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_41444-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37803705, + "num_truncated_tokens": 37773586 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_44590-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_44590-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..995f2e9014d01655a0b37d5f256e91915806fd15 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_44590-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107391, "hashes": {}}, "samples": 44200, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47753423, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10009180, "hashes": {}}, "samples": 6638, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7155956, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_44590-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_44590-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..6197554a7a3240a999f868994bd0afe0b1339408 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_44590-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 36984906, + "num_truncated_tokens": 36957045 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_45131-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_45131-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..dd4e764b8e4eda1f301cf788c2817242ff9097af --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_45131-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108718, "hashes": {}}, "samples": 43020, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47902635, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 16779518, "hashes": {}}, "samples": 11033, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12033044, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_45131-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_45131-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..33085587b0b4f0aba9d060ea6490c6dabf9d9e8b --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_45131-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 40270958, + "num_truncated_tokens": 40237132 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_45581-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_45581-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..9c6e59b7b7f2a797154de626d968de2a34596157 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_45581-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108283, "hashes": {}}, "samples": 43846, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47452936, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11358499, "hashes": {}}, "samples": 7407, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8030530, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_45581-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_45581-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..30403c6020b904cd7992c56b00a758d98779b42b --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_45581-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37646983, + "num_truncated_tokens": 37618881 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_49416-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_49416-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..94391d802ee1d004b4761b422a5dc02bd5a726c1 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_49416-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108317, "hashes": {}}, "samples": 43820, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47506323, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 13109713, "hashes": {}}, "samples": 8416, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 9278243, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_49416-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_49416-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..be6f63d4763b7b7600b3610468620026a18f8d19 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_49416-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38492233, + "num_truncated_tokens": 38462544 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_50012-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_50012-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..b7edbd0591e0e89bcbfa6c5e1107379e65c49aa0 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_50012-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108205, "hashes": {}}, "samples": 43894, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48062246, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 13966621, "hashes": {}}, "samples": 8964, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 9975485, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_50012-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_50012-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..04c8296e70efd4ba2cfa872573b3595028f738e9 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_50012-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38901392, + "num_truncated_tokens": 38869294 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_50026-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_50026-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..97061ba31d0032bf42be916d8103ea00246c811a --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_50026-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108084, "hashes": {}}, "samples": 44140, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47860386, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11008047, "hashes": {}}, "samples": 7444, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7889933, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_50026-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_50026-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..068111cabae818760629ed73f7d84d896e334fe1 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_50026-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37461679, + "num_truncated_tokens": 37432744 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_50282-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_50282-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..40f4efcee248ff3dd55a3b150a3300832cd8e1f1 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_50282-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108681, "hashes": {}}, "samples": 43549, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48039830, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 14712336, "hashes": {}}, "samples": 9625, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10554321, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_50282-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_50282-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..59e8c0362d7b0f2c2eed6ed46d91bc667e3a34fb --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_50282-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 39264692, + "num_truncated_tokens": 39232423 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_5049-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_5049-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..84ae8cdfbe58173b1e2b80655679c66650068c4a --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_5049-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107624, "hashes": {}}, "samples": 42744, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47738183, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 19969022, "hashes": {}}, "samples": 12782, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 14203717, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_5049-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_5049-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..fcc4250b84e492387ecf9af874e57d2946ce85b3 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_5049-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 41819562, + "num_truncated_tokens": 41783757 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_53099-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_53099-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..32ac5aaa62ef72770781cf8abe32c2cf3808c263 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_53099-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107826, "hashes": {}}, "samples": 44165, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47647979, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11712638, "hashes": {}}, "samples": 7830, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8329739, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_53099-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_53099-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..22165ce463bb0fa7ff732c334f5e54c58545634f --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_53099-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37801126, + "num_truncated_tokens": 37770879 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_57176-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_57176-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..456b23f068cf68c507f1068dd4d870069debbe37 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_57176-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108393, "hashes": {}}, "samples": 43048, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47641398, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 18385194, "hashes": {}}, "samples": 11778, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13080263, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_57176-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_57176-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..488993321a240e09d461cb0614985ea31d3ed3b5 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_57176-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 41049796, + "num_truncated_tokens": 41015156 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_57873-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_57873-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..79f2180a8baed1e099132cbaf75b7a6787c0ca2b --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_57873-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108431, "hashes": {}}, "samples": 44564, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48040856, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 8870685, "hashes": {}}, "samples": 5891, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6327464, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_57873-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_57873-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..0b9a23419a8fc313e7cddcedd00077229e637402 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_57873-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 36428123, + "num_truncated_tokens": 36400109 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_61299-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_61299-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..97b636b0a825359dd5517ed334ec7e7e93f9ccaf --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_61299-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108789, "hashes": {}}, "samples": 43003, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47716588, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 18888340, "hashes": {}}, "samples": 12009, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13514677, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_61299-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_61299-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..fbdff750ec376f8cd21106d22735a04c41b15a05 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_61299-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 41295648, + "num_truncated_tokens": 41260494 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_63571-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_63571-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..f1896afd68a4254b9a5ccc9aa9f2140f448a2b91 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_63571-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108615, "hashes": {}}, "samples": 43557, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47380140, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 15833373, "hashes": {}}, "samples": 9834, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11191270, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_63571-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_63571-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..d713535e427413c150774848097c054042947df1 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_63571-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 39818318, + "num_truncated_tokens": 39786182 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_63689-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_63689-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..a1812fc682d2e06863ca6ead0434a22420dc1918 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_63689-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108044, "hashes": {}}, "samples": 44115, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47801892, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12241063, "hashes": {}}, "samples": 8206, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8682855, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_63689-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_63689-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..1ff3f82bb44c9701fe9cab5d73fc1a2040494b08 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_63689-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38055440, + "num_truncated_tokens": 38024823 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_77212-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_77212-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..608e71a17a19a3e196ccf4601b5f61887bee54ae --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_77212-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107789, "hashes": {}}, "samples": 44848, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47842613, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 6100597, "hashes": {}}, "samples": 4143, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 4365516, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_77212-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_77212-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..74cda2678651a04925f48c1738dca9baf3eb9533 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_77212-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 35088010, + "num_truncated_tokens": 35063182 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_80042-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_80042-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..df7782545e975d71a48c033aed1247545fe9e233 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_80042-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107531, "hashes": {}}, "samples": 44715, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47919631, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 8495578, "hashes": {}}, "samples": 5588, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6054671, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_80042-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_80042-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..e605394ae6372a9865b97404ce235adb7ba03524 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_80042-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 36244851, + "num_truncated_tokens": 36217279 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_80740-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_80740-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..56b407dd1a624057c8d1ce42b7a9788b6a5c7761 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_80740-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107263, "hashes": {}}, "samples": 43778, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47653853, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12455994, "hashes": {}}, "samples": 8250, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8874253, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_80740-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_80740-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..5de02979b86f18e719c550b461eacfa7a5968837 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_80740-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38171318, + "num_truncated_tokens": 38141344 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_83341-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_83341-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..da45699871676312b766e549d424e8e3b8067398 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_83341-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108579, "hashes": {}}, "samples": 44910, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47914388, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 7437541, "hashes": {}}, "samples": 4894, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 5279164, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_83341-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_83341-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..23aadb59b4229b85b58dcd596a4296628e3bdcdc --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_83341-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 35731832, + "num_truncated_tokens": 35705781 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_8822-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_8822-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..de207880c93a2445515810021940cce801833618 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_8822-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107763, "hashes": {}}, "samples": 42335, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47552464, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 23695170, "hashes": {}}, "samples": 15098, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 16882157, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_8822-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_8822-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..a7d5cceb96d1ec5c0016cae3997fefb80b1e666b --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_8822-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 43623741, + "num_truncated_tokens": 43584759 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_90115-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_90115-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..bf1b4c80a028f377da0a7a8b63b69352a5f26e39 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_90115-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107604, "hashes": {}}, "samples": 43927, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47443373, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12109300, "hashes": {}}, "samples": 7863, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8550985, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_90115-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_90115-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..a5b51acb7242ebd0a849de4da76e13cfce001611 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_90115-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38005533, + "num_truncated_tokens": 37976679 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_91283-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_91283-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..0ce0b5ca43ba84766a1e80d031bcfbb93bbdca09 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_91283-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108085, "hashes": {}}, "samples": 43774, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47694415, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 13487627, "hashes": {}}, "samples": 8837, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 9568824, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_91283-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_91283-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..106375eb10523f709e701b2fed32ad650ac407da --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_91283-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38669631, + "num_truncated_tokens": 38638933 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_94979-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_94979-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..de418a8a41c94296ed7e2ba8958bc9f9f5f49eb5 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_94979-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107587, "hashes": {}}, "samples": 42657, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47690417, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 20964856, "hashes": {}}, "samples": 13398, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 14894043, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_94979-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_94979-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..d1d509f2e004a54922883e543aa2be2c2413b8dc --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_94979-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 42301050, + "num_truncated_tokens": 42264272 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_95890-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_95890-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..c72431de1dda41c3ae5ee0012f29a6abfd87f996 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_95890-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108566, "hashes": {}}, "samples": 43501, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47763162, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17076682, "hashes": {}}, "samples": 10860, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12079269, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_95890-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_95890-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..dfe30adf2431e7319c2ecfad4eef86a73603f745 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_95890-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 40410030, + "num_truncated_tokens": 40376134 +} \ No newline at end of file