################################################## # Data config for Shakespeare ################################################## test_size = 0.1 seed = 110892 shuffle = True dataset_key = 'train' num_proc = -1 # -1 for all, 1 for single process, 2 for two processes, etc. tokenizer = 'gpt2' # 'gpt2' or 'cl100k_base' or 'gpt-4' ################################################## # Training config for Shakespeare ################################################## out_dir = 'gpt2' eval_interval = 2000 log_interval = 1 eval_iters = 200 eval_only = False # if True, script exits right after the first eval always_save_checkpoint = True # if True, always save a checkpoint after each eval init_from = 'resume' # 'scratch' or 'resume' or 'gpt2*' # wandb logging wandb_log = False # disabled by default wandb_project = 'SimpleLLM' wandb_run_name = 'gpt2' # 'run' + str(time.time()) # data dataset = 'openwebtext' gradient_accumulation_steps = 5 * 8 # used to simulate larger batch sizes batch_size = 12 # if gradient_accumulation_steps > 1, this is the micro-batch size block_size = 1024 # model n_layer = 12 n_head = 12 n_embd = 768 dropout = 0.0 # for pretraining 0 is good, for finetuning try 0.1+ bias = False # do we use bias inside LayerNorm and Linear layers? # adamw optimizer learning_rate = 6e-4 # max learning rate max_iters = 600000 # total number of training iterations weight_decay = 1e-1 beta1 = 0.9 beta2 = 0.95 grad_clip = 1.0 # clip gradients at this value, or disable if == 0.0 # learning rate decay settings decay_lr = True # whether to decay the learning rate warmup_iters = 2000 # how many steps to warm up for lr_decay_iters = 600000 # should be ~= max_iters per Chinchilla min_lr = 6e-5 # minimum learning rate, should be ~= learning_rate/10 per Chinchilla # DDP settings backend = 'nccl' # 'nccl', 'gloo', etc. ################################################## # Generator config for Shakespeare ################################################## # init_from = 'resume' # either 'resume' (from an out_dir) or a gpt2 variant (e.g. 'gpt2-xl') start = "\n" # or "<|endoftext|>" or etc. Can also specify a file, use as: "FILE:prompt.txt" num_samples = 10 # number of samples to draw max_new_tokens = 500 # number of tokens generated in each sample temperature = 0.8 # 1.0 = no change, < 1.0 = less random, > 1.0 = more random, in predictions top_k = 200 # retain only the top_k most likely tokens, clamp others to have 0 probability seed = 1337