name: gpt2_text_generation | |
config_type: model | |
add_cross_attention: false | |
vocab_size: 42001 | |
attn_pdrop: 0.1 | |
bos_token_id: 5 | |
embd_pdrop: 0.1 | |
eos_token_id: 5 | |
gradient_checkpointing: false | |
initializer_range: 0.02 | |
layer_norm_epsilon: 1.0e-05 | |
model_type: gpt2 | |
n_ctx: 1024 | |
n_embd: 768 | |
n_head: 12 | |
n_layer: 12 | |
n_positions: 1024 | |
resid_pdrop: 0.1 | |
summary_activation: false | |
summary_first_dropout: 0.1 | |
use_cache: true | |
generation: | |
config_type: model | |
bos_token_id: 0 | |
do_sample: true | |
decoder_start_token_id: 0 | |
early_stopping: true | |
eos_token_id: 2 | |
length_penalty: 2.0 | |
max_new_tokens: 50 | |
no_repeat_ngram_size: 3 | |
num_beams: 4 | |
pad_token_id: 1 | |