# Ref: https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb | |
import sys | |
import json | |
from collections import OrderedDict | |
import tiktoken | |
if len(sys.argv) <= 1: | |
sys.stderr.write("Expect a file name") | |
sys.exit(-1) | |
vocab_json = sys.argv[1] | |
# cl100k_base: gpt-4, gpt-3.5-turbo, text-embedding-ada-002 | |
# p50k_base: Codex models, text-davinci-002, text-davinci-003 | |
# r50k_base (or gpt2): GPT-3 models like davinci | |
encoding = tiktoken.get_encoding("r50k_base") | |
vocabulary = OrderedDict() | |
for i in range(encoding.n_vocab): | |
try: | |
bytes = encoding.decode_single_token_bytes(i).decode('utf-8') | |
vocabulary[bytes] = i | |
except: | |
sys.stderr.write("no token for %d\n" %i) | |
with open(vocab_json, "w", encoding='utf8') as f: | |
json.dump(vocabulary, f, indent=4, ensure_ascii=False) | |