Datasets:

Languages:
English
ArXiv:
Tags:
math
File size: 707 Bytes
96ced71
e7709f5
96ced71
 
560b735
 
 
 
96ced71
a39973c
e7709f5
 
 
96ced71
560b735
e7709f5
 
560b735
e7709f5
96ced71
560b735
96ced71
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
from datasets import load_dataset
from tqdm import tqdm
import time

import sentencepiece as spm

s = spm.SentencePieceProcessor(model_file="tokenizer.model") # Llama tokenizer

def main():
    for subset in ["arxiv", "open-web-math", "algebraic-stack"]:
        for split in ["train", "validation", "test"]:
            data = load_dataset("proof-pile-2.py", subset)[split]
            print(data)

            num_toks = 0
            start = time.time()
            for x in tqdm(data):
                num_toks += len(s.encode(x['text']))
            total = time.time() - start

            print(f"Traversed {num_toks:.5e} of {subset}-{split} in {total} seconds")

if __name__=="__main__":
    main()