|
mcore_gpt: true |
|
micro_batch_size: 1 |
|
global_batch_size: 8 |
|
tensor_model_parallel_size: 1 |
|
pipeline_model_parallel_size: 1 |
|
virtual_pipeline_model_parallel_size: null |
|
encoder_seq_length: 131072 |
|
max_position_embeddings: 131072 |
|
num_layers: 32 |
|
hidden_size: 4096 |
|
ffn_hidden_size: 14336 |
|
num_attention_heads: 32 |
|
init_method_std: 0.02 |
|
use_scaled_init_method: true |
|
hidden_dropout: 0.0 |
|
attention_dropout: 0.0 |
|
ffn_dropout: 0.0 |
|
kv_channels: null |
|
apply_query_key_layer_scaling: true |
|
normalization: rmsnorm |
|
layernorm_epsilon: 1.0e-05 |
|
do_layer_norm_weight_decay: false |
|
make_vocab_size_divisible_by: 128 |
|
pre_process: true |
|
post_process: true |
|
persist_layer_norm: true |
|
bias: false |
|
activation: fast-swiglu |
|
headscale: false |
|
transformer_block_type: pre_ln |
|
openai_gelu: false |
|
normalize_attention_scores: true |
|
position_embedding_type: rope |
|
rotary_percentage: 1.0 |
|
attention_type: multihead |
|
share_embeddings_and_output_weights: false |
|
overlap_p2p_comm: false |
|
batch_p2p_comm: true |
|
num_query_groups: 8 |
|
tokenizer: |
|
library: huggingface |
|
type: meta-llama/Meta-Llama-3-8B |
|
use_fast: true |
|
native_amp_init_scale: 4294967296 |
|
native_amp_growth_interval: 1000 |
|
hysteresis: 2 |
|
fp32_residual_connection: false |
|
fp16_lm_cross_entropy: false |
|
megatron_amp_O2: false |
|
grad_allreduce_chunk_size_mb: 125 |
|
grad_div_ar_fusion: true |
|
gradient_accumulation_fusion: false |
|
bias_activation_fusion: false |
|
bias_dropout_add_fusion: false |
|
masked_softmax_fusion: true |
|
get_attention_mask_from_fusion: true |
|
apply_rope_fusion: false |
|
seed: 1234 |
|
resume_from_checkpoint: null |
|
use_cpu_initialization: false |
|
onnx_safe: false |
|
apex_transformer_log_level: 30 |
|
gradient_as_bucket_view: true |
|
sync_batch_comm: false |
|
activations_checkpoint_granularity: null |
|
activations_checkpoint_method: null |
|
activations_checkpoint_num_layers: null |
|
num_micro_batches_with_partial_activation_checkpoints: null |
|
activations_checkpoint_layers_per_pipeline: null |
|
sequence_parallel: false |
|
transformer_engine: true |
|
fp8: false |
|
fp8_e4m3: false |
|
fp8_hybrid: true |
|
fp8_margin: 0 |
|
fp8_interval: 1 |
|
fp8_amax_history_len: 1024 |
|
fp8_amax_compute_algo: max |
|
reduce_amax: true |
|
use_emha: false |
|
data: |
|
index_mapping_dir: null |
|
data_impl: mmap |
|
splits_string: 900,50,50 |
|
seq_length: 131072 |
|
skip_warmup: true |
|
num_workers: 2 |
|
dataloader_type: single |
|
reset_position_ids: false |
|
reset_attention_mask: false |
|
eod_mask_loss: false |
|
validation_drop_last: true |
|
no_seqlen_plus_one_input_tokens: false |
|
pad_samples_to_global_batch_size: false |
|
shuffle_documents: true |
|
nsys_profile: |
|
enabled: false |
|
start_step: 10 |
|
end_step: 10 |
|
ranks: |
|
- 0 |
|
gen_shape: false |
|
optim: |
|
name: fused_adam |
|
lr: 0.0002 |
|
weight_decay: 0.01 |
|
betas: |
|
- 0.9 |
|
- 0.98 |
|
sched: |
|
name: CosineAnnealing |
|
warmup_steps: 500 |
|
constant_steps: 50000 |
|
min_lr: 2.0e-05 |
|
rotary_base: 500000.0 |
|
scale_positional_embedding: true |
|
precision: bf16 |
|
target: nemo.collections.nlp.models.language_modeling.megatron_gpt_model.MegatronGPTModel |
|
nemo_version: 2.0.0rc2 |
|
|