3outeille HF Staff commited on
Commit
730c344
·
verified ·
1 Parent(s): c257140

Upload llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-64

Browse files
llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-64/bench.slurm ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_cluster
4
+ #SBATCH --time=02:00:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --nodes=1
7
+ #SBATCH --gres=gpu:8
8
+ #SBATCH --qos=normal
9
+ #SBATCH --ntasks-per-node=1
10
+ #SBATCH --cpus-per-task=96
11
+ #SBATCH --exclusive
12
+ #SBATCH --output=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-64/log.out
13
+ #SBATCH --error=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-64/log.out
14
+
15
+ # Function to update status based on squeue output
16
+ update_status() {
17
+ job_id=$1
18
+ status_file=$2
19
+ # For unknown reasons, it doenst update status for pending. It only works for running
20
+ while true; do
21
+ job_status=$(squeue --job $job_id --noheader --format=%T)
22
+ echo "Job status: $job_status"
23
+ if [ -z "$job_status" ]; then
24
+ # Job has finished or is not found
25
+ break
26
+ elif [ "$job_status" = "RUNNING" ]; then
27
+ printf "running" > $status_file
28
+ break
29
+ fi
30
+ sleep 10
31
+ done
32
+ }
33
+
34
+ # Misc initializations.
35
+ echo "========================"
36
+ echo "START TIME: $(date)"
37
+ source /fsx/ferdinandmom/miniforge3/etc/profile.d/conda.sh
38
+ conda activate /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster
39
+ echo python3 version = $(python3 --version)
40
+ echo "========================"
41
+
42
+ # Slurm stuff
43
+ export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
44
+ export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
45
+ export MASTER_PORT=$((1024 + RANDOM % 64511))
46
+
47
+ export TMPDIR=/scratch
48
+ export HF_DATASETS_CACHE="/admin/home/ferdinand_mom/.cache"
49
+ export CUBLAS_WORKSPACE_CONFIG=":4096:8"
50
+ export CUDA_DEVICE_MAX_CONNECTIONS="1"
51
+
52
+ huggingface-cli login --token $HUGGINGFACE_TOKEN
53
+
54
+
55
+ NANOTRON_REPO="/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron"
56
+ CMD="$NANOTRON_REPO/run_train.py --config-file /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-64/config.yaml"
57
+
58
+ LAUNCHER="torchrun \
59
+ --nproc_per_node 8 \
60
+ --nnodes 1 \
61
+ --rdzv_endpoint ${MASTER_ADDR}:${MASTER_PORT} \
62
+ --rdzv_backend c10d \
63
+ --max_restarts 0 \
64
+ --tee 3 \
65
+ --node_rank ${SLURM_PROCID}"
66
+
67
+ # Checkout the bench_cluster branch
68
+ cd $NANOTRON_REPO
69
+ git checkout bench_cluster
70
+ cd ..
71
+ # Get the current job ID
72
+ job_id=${SLURM_JOB_ID}
73
+
74
+ # Update status to "pending" or "running" in the background
75
+ update_status $job_id /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-64/status.txt &
76
+
77
+ # Run the main command
78
+ srun -u $LAUNCHER $CMD
79
+ exit_status=$?
80
+
81
+ # Update status based on the exit status of `srun`
82
+ if [ $exit_status -eq 0 ]; then
83
+ printf "completed" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-64/status.txt
84
+ else
85
+ if grep -q "OutOfMemoryError" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-64/log.out; then
86
+ printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-64/status.txt
87
+ elif grep -q " CUDA error: an illegal memory access" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-64/log.out; then
88
+ printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-64/status.txt
89
+ elif grep -q "Timeout at NCCL" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-64/log.out; then
90
+ printf "timeout" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-64/status.txt
91
+ else
92
+ printf "fail" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-64/status.txt
93
+ fi
94
+ fi
95
+
96
+ # Run the report script if the job completed successfully
97
+ if [ $exit_status -eq 0 ]; then
98
+ python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-64 --is_logs
99
+ python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-64 --is_profiler
100
+ fi
101
+
102
+
103
+ # Push to hub the folder using huggingface_cli
104
+ huggingface-cli upload nanotron/bench_cluster /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-64 llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-64 --commit-message "Upload llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-64"
105
+
106
+ # Verify the upload
107
+ if [ $? -eq 0 ]; then
108
+ echo "Uploading to Huggingface Hub successful"
109
+ else
110
+ echo "Failed to upload to Huggingface Hub"
111
+ fi
llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-64/config.yaml ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ general:
2
+ project: bench_cluster
3
+ seed: 42
4
+ model:
5
+ ddp_bucket_cap_mb: 25
6
+ dtype: bfloat16
7
+ init_method:
8
+ std: 0.025
9
+ make_vocab_size_divisible_by: 1
10
+ model_config:
11
+ bos_token_id: 1
12
+ eos_token_id: 2
13
+ hidden_act: silu
14
+ hidden_size: 2048
15
+ initializer_range: 0.02
16
+ intermediate_size: 4096
17
+ is_llama_config: true
18
+ max_position_embeddings: 4096
19
+ num_attention_heads: 32
20
+ num_hidden_layers: 24
21
+ num_key_value_heads: 32
22
+ pad_token_id: null
23
+ pretraining_tp: 1
24
+ rms_norm_eps: 1.0e-05
25
+ rope_scaling: null
26
+ rope_theta: 10000.0
27
+ tie_word_embeddings: true
28
+ use_cache: true
29
+ vocab_size: 50257
30
+ optimizer:
31
+ accumulate_grad_in_fp32: true
32
+ clip_grad: 1.0
33
+ learning_rate_scheduler:
34
+ learning_rate: 0.0001
35
+ lr_decay_style: linear
36
+ lr_warmup_style: linear
37
+ lr_warmup_steps: 1
38
+ min_decay_lr: 1.0e-05
39
+ optimizer_factory:
40
+ adam_beta1: 0.9
41
+ adam_beta2: 0.95
42
+ adam_eps: 1.0e-08
43
+ name: adamW
44
+ torch_adam_is_fused: true
45
+ weight_decay: 0.01
46
+ zero_stage: 1
47
+ parallelism:
48
+ dp: 8
49
+ expert_parallel_size: 1
50
+ pp: 1
51
+ pp_engine: 1f1b
52
+ tp: 1
53
+ tp_linear_async_communication: false
54
+ tp_mode: REDUCE_SCATTER
55
+ profiler:
56
+ profiler_export_path: /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-64
57
+ tokenizer:
58
+ tokenizer_max_length: null
59
+ tokenizer_name_or_path: openai-community/gpt2
60
+ tokenizer_revision: null
61
+ data_stages:
62
+ - name: Training Stage
63
+ start_training_step: 1
64
+ data:
65
+ dataset:
66
+ dataset_overwrite_cache: false
67
+ dataset_processing_num_proc_per_process: 64
68
+ hf_dataset_config_name: null
69
+ hf_dataset_or_datasets: roneneldan/TinyStories
70
+ hf_dataset_splits: train
71
+ text_column_name: text
72
+ num_loading_workers: 0
73
+ seed: 42
74
+ lighteval: null
75
+ tokens:
76
+ train_steps: 20
77
+ val_check_interval: -1
78
+ batch_accumulation_per_replica: 2
79
+ limit_test_batches: 0
80
+ limit_val_batches: 0
81
+ micro_batch_size: 64
82
+ sequence_length: 4096
83
+ logging:
84
+ iteration_step_info_interval: 1
85
+ log_level: info
86
+ log_level_replica: info
87
+ checkpoints:
88
+ checkpoint_interval: 100000
89
+ checkpoints_path: /dev/null
90
+ resume_checkpoint_path: null
llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-64/log.out ADDED
@@ -0,0 +1,437 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ========================
2
+ START TIME: Wed Jul 3 23:33:38 UTC 2024
3
+ python3 version = Python 3.10.14
4
+ ========================
5
+ The token has not been saved to the git credentials helper. Pass `add_to_git_credential=True` in this function directly or `--add-to-git-credential` if using via `huggingface-cli` if you want to set the git credential as well.
6
+ Token is valid (permission: write).
7
+ Your token has been saved to /admin/home/ferdinand_mom/.cache/huggingface/token
8
+ Login successful
9
+ Already on 'bench_cluster'
10
+ M examples/config_tiny_llama.py
11
+ M examples/config_tiny_llama.yaml
12
+ M examples/train_tiny_llama.sh
13
+ M src/nanotron/models/llama.py
14
+ M src/nanotron/trainer.py
15
+ Your branch is up to date with 'origin/bench_cluster'.
16
+ Job status: RUNNING
17
+ W0703 23:33:42.120000 140451258931008 torch/distributed/run.py:757]
18
+ W0703 23:33:42.120000 140451258931008 torch/distributed/run.py:757] *****************************************
19
+ W0703 23:33:42.120000 140451258931008 torch/distributed/run.py:757] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
20
+ W0703 23:33:42.120000 140451258931008 torch/distributed/run.py:757] *****************************************
21
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: Config:
22
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: Config(general=GeneralArgs(project='bench_cluster',
23
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: run='%date_%jobid',
24
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: seed=42,
25
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: step=None,
26
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: consumed_train_samples=None,
27
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: benchmark_csv_path=None,
28
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: ignore_sanity_checks=True),
29
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: parallelism=ParallelismArgs(dp=8,
30
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: pp=1,
31
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: tp=1,
32
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: pp_engine=<nanotron.parallel.pipeline_parallel.engine.OneForwardOneBackwardPipelineEngine object at 0x7f7cf6c70700>,
33
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: tp_mode=<TensorParallelLinearMode.REDUCE_SCATTER: 2>,
34
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: tp_linear_async_communication=False,
35
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: expert_parallel_size=1),
36
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: model=ModelArgs(model_config=LlamaConfig(bos_token_id=1,
37
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: eos_token_id=2,
38
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: hidden_act='silu',
39
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: hidden_size=2048,
40
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: initializer_range=0.02,
41
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: intermediate_size=4096,
42
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: is_llama_config=True,
43
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: max_position_embeddings=4096,
44
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: num_attention_heads=32,
45
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: num_hidden_layers=24,
46
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: num_key_value_heads=32,
47
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: pad_token_id=None,
48
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: pretraining_tp=1,
49
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: rms_norm_eps=1e-05,
50
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: rope_scaling=None,
51
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: rope_theta=10000.0,
52
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: tie_word_embeddings=True,
53
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: use_cache=True,
54
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: vocab_size=50257),
55
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: init_method=RandomInit(std=0.025),
56
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: dtype=torch.bfloat16,
57
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: make_vocab_size_divisible_by=1,
58
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: ddp_bucket_cap_mb=25),
59
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: tokenizer=TokenizerArgs(tokenizer_name_or_path='openai-community/gpt2',
60
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: tokenizer_revision=None,
61
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: tokenizer_max_length=None),
62
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: checkpoints=CheckpointsArgs(checkpoints_path=Path('/dev/null'),
63
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: checkpoint_interval=100000,
64
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: save_initial_state=False,
65
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: resume_checkpoint_path=None,
66
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: checkpoints_path_is_shared_file_system=False),
67
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: logging=LoggingArgs(log_level='info',
68
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: log_level_replica='info',
69
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: iteration_step_info_interval=1),
70
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: tokens=TokensArgs(sequence_length=4096,
71
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: train_steps=20,
72
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: micro_batch_size=64,
73
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: batch_accumulation_per_replica=2,
74
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: val_check_interval=-1,
75
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: limit_val_batches=0,
76
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: limit_test_batches=0),
77
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: optimizer=OptimizerArgs(optimizer_factory=AdamWOptimizerArgs(adam_eps=1e-08,
78
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: adam_beta1=0.9,
79
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: adam_beta2=0.95,
80
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: torch_adam_is_fused=True,
81
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: name='adamW'),
82
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: zero_stage=1,
83
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: weight_decay=0.01,
84
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: clip_grad=1.0,
85
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: accumulate_grad_in_fp32=True,
86
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: learning_rate_scheduler=LRSchedulerArgs(learning_rate=0.0001,
87
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: lr_warmup_steps=1,
88
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: lr_warmup_style='linear',
89
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: lr_decay_style='linear',
90
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: lr_decay_steps=19,
91
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: lr_decay_starting_step=None,
92
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: min_decay_lr=1e-05)),
93
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: data_stages=[DatasetStageArgs(name='Training Stage',
94
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: start_training_step=1,
95
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: data=DataArgs(dataset=PretrainDatasetsArgs(hf_dataset_or_datasets='roneneldan/TinyStories',
96
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: hf_dataset_splits='train',
97
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: hf_dataset_config_name=None,
98
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: dataset_processing_num_proc_per_process=64,
99
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: dataset_overwrite_cache=False,
100
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: text_column_name='text'),
101
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: seed=42,
102
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: num_loading_workers=0))],
103
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: profiler=ProfilerArgs(profiler_export_path=Path('/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-64')),
104
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: lighteval=None)
105
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: Model Config:
106
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: LlamaConfig(bos_token_id=1,
107
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: eos_token_id=2,
108
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: hidden_act='silu',
109
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: hidden_size=2048,
110
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: initializer_range=0.02,
111
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: intermediate_size=4096,
112
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: is_llama_config=True,
113
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: max_position_embeddings=4096,
114
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: num_attention_heads=32,
115
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: num_hidden_layers=24,
116
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: num_key_value_heads=32,
117
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: pad_token_id=None,
118
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: pretraining_tp=1,
119
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: rms_norm_eps=1e-05,
120
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: rope_scaling=None,
121
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: rope_theta=10000.0,
122
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: tie_word_embeddings=True,
123
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: use_cache=True,
124
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: vocab_size=50257)
125
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: Building model..
126
+ [default0]:07/03/2024 23:34:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: Setting PP block ranks...
127
+ [default6]:07/03/2024 23:34:10 [INFO|DP=6|PP=0|TP=0|ip-26-0-171-88]: No checkpoint path provided.
128
+ [default1]:07/03/2024 23:34:10 [INFO|DP=1|PP=0|TP=0|ip-26-0-171-88]: No checkpoint path provided.
129
+ [default4]:07/03/2024 23:34:10 [INFO|DP=4|PP=0|TP=0|ip-26-0-171-88]: No checkpoint path provided.
130
+ [default7]:07/03/2024 23:34:10 [INFO|DP=7|PP=0|TP=0|ip-26-0-171-88]: No checkpoint path provided.
131
+ [default5]:07/03/2024 23:34:10 [INFO|DP=5|PP=0|TP=0|ip-26-0-171-88]: No checkpoint path provided.
132
+ [default0]:07/03/2024 23:34:10 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: Total number of parameters: 1.11G (2116.51MiB)
133
+ [default0]:07/03/2024 23:34:10 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: Local number of parameters: 1.11G (2116.51MiB)
134
+ [default0]:07/03/2024 23:34:10 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: [After model building] Memory usage: 2140.53MiB. Peak allocated: 2338.88MiB Peak reserved: 2392.00MiB
135
+ [default3]:07/03/2024 23:34:10 [INFO|DP=3|PP=0|TP=0|ip-26-0-171-88]: No checkpoint path provided.
136
+ [default0]:07/03/2024 23:34:10 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: No checkpoint path provided.
137
+ [default0]:07/03/2024 23:34:10 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: Parametrizing model parameters using StandardParametrizator
138
+ [default2]:07/03/2024 23:34:10 [INFO|DP=2|PP=0|TP=0|ip-26-0-171-88]: No checkpoint path provided.
139
+ [default0]:07/03/2024 23:34:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: [Optimizer Building] Using LearningRateForSP as learning rate
140
+ [default0]:07/03/2024 23:34:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: [ZeRO sharding] Size of optimizer params per rank:
141
+ [default0]:07/03/2024 23:34:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: [ZeRO sharding] DP Rank 0 has 139M out of 1.11G (12.50%) params' optimizer states
142
+ [default0]:07/03/2024 23:34:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: [ZeRO sharding] DP Rank 1 has 139M out of 1.11G (12.50%) params' optimizer states
143
+ [default0]:07/03/2024 23:34:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: [ZeRO sharding] DP Rank 2 has 139M out of 1.11G (12.50%) params' optimizer states
144
+ [default0]:07/03/2024 23:34:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: [ZeRO sharding] DP Rank 3 has 139M out of 1.11G (12.50%) params' optimizer states
145
+ [default0]:07/03/2024 23:34:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: [ZeRO sharding] DP Rank 4 has 139M out of 1.11G (12.50%) params' optimizer states
146
+ [default0]:07/03/2024 23:34:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: [ZeRO sharding] DP Rank 5 has 139M out of 1.11G (12.50%) params' optimizer states
147
+ [default0]:07/03/2024 23:34:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: [ZeRO sharding] DP Rank 6 has 139M out of 1.11G (12.50%) params' optimizer states
148
+ [default0]:07/03/2024 23:34:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: [ZeRO sharding] DP Rank 7 has 139M out of 1.11G (12.50%) params' optimizer states
149
+ [default0]:07/03/2024 23:34:19 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: [Training Plan] Stage Training Stage has 19 remaining training steps and has consumed 0 samples
150
+ [default0]:07/03/2024 23:34:19 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: Using `datasets` library
151
+ [default0]:07/03/2024 23:34:19 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: Loading tokenizer from openai-community/gpt2 and transformers/hf_hub versions ('4.41.2', '0.23.4')
152
+ [default0]:07/03/2024 23:34:19 [WARNING|DP=0|PP=0|TP=0|ip-26-0-171-88]: Repo card metadata block was not found. Setting CardData to empty.
153
+ [default0]:Repo card metadata block was not found. Setting CardData to empty.
154
+ [default0]:07/03/2024 23:34:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: [Training Plan] There are 1 training stages
155
+ [default0]:07/03/2024 23:34:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: [Stage Training Stage] start from step 1
156
+ [default0]:07/03/2024 23:34:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]:
157
+ [default0]:07/03/2024 23:34:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: [Start training] datetime: 2024-07-03 23:34:21.053076 | mbs: 64 | grad_accum: 2 | global_batch_size: 1024 | sequence_length: 4096 | train_steps: 20 | start_iteration_step: 0 | consumed_train_samples: 0
158
+ [default0]:07/03/2024 23:34:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: Resuming training from stage Training Stage, it has trained for 0 samples and has 19 remaining train steps
159
+ [default0]:07/03/2024 23:34:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-171-88]: Memory usage: 6904.53MiB. Peak allocated 6904.53MiB. Peak reserved: 7156.00MiB
160
+ [default7]:07/03/2024 23:34:21 [WARNING|DP=7|PP=0|TP=0|ip-26-0-171-88]: Repo card metadata block was not found. Setting CardData to empty.
161
+ [default7]:Repo card metadata block was not found. Setting CardData to empty.
162
+ [default4]:Repo card metadata block was not found. Setting CardData to empty.
163
+ [default3]:07/03/2024 23:34:21 [WARNING|DP=3|PP=0|TP=0|ip-26-0-171-88]: Repo card metadata block was not found. Setting CardData to empty.
164
+ [default4]:07/03/2024 23:34:21 [WARNING|DP=4|PP=0|TP=0|ip-26-0-171-88]: Repo card metadata block was not found. Setting CardData to empty.
165
+ [default1]:07/03/2024 23:34:21 [WARNING|DP=1|PP=0|TP=0|ip-26-0-171-88]: Repo card metadata block was not found. Setting CardData to empty.
166
+ [default6]:07/03/2024 23:34:21 [WARNING|DP=6|PP=0|TP=0|ip-26-0-171-88]: Repo card metadata block was not found. Setting CardData to empty.
167
+ [default5]:07/03/2024 23:34:21 [WARNING|DP=5|PP=0|TP=0|ip-26-0-171-88]: Repo card metadata block was not found. Setting CardData to empty.
168
+ [default3]:Repo card metadata block was not found. Setting CardData to empty.
169
+ [default6]:Repo card metadata block was not found. Setting CardData to empty.
170
+ [default5]:Repo card metadata block was not found. Setting CardData to empty.
171
+ [default1]:Repo card metadata block was not found. Setting CardData to empty.
172
+ [default2]:07/03/2024 23:34:21 [WARNING|DP=2|PP=0|TP=0|ip-26-0-171-88]: Repo card metadata block was not found. Setting CardData to empty.
173
+ [default2]:Repo card metadata block was not found. Setting CardData to empty.
174
+ [default0]:[rank0]: Traceback (most recent call last):
175
+ [default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py", line 237, in <module>
176
+ [default0]:[rank0]: trainer.train(dataloader)
177
+ [default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 429, in train
178
+ [default0]:[rank0]: outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
179
+ [default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 462, in training_step
180
+ [default0]:[rank0]: outputs = self.pipeline_engine.train_batch_iter(
181
+ [default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
182
+ [default0]:[rank0]: output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
183
+ [default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
184
+ [default0]:[rank0]: output = model(**micro_batch)
185
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
186
+ [default0]:[rank0]: return self._call_impl(*args, **kwargs)
187
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
188
+ [default0]:[rank0]: return forward_call(*args, **kwargs)
189
+ [default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 891, in forward
190
+ [default0]:[rank0]: sharded_logits = self.model(
191
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
192
+ [default0]:[rank0]: return self._call_impl(*args, **kwargs)
193
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
194
+ [default0]:[rank0]: return forward_call(*args, **kwargs)
195
+ [default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 764, in forward
196
+ [default0]:[rank0]: return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
197
+ [default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 780, in forward_with_hidden_states
198
+ [default0]:[rank0]: hidden_encoder_states = encoder_block(**hidden_encoder_states)
199
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
200
+ [default0]:[rank0]: return self._call_impl(*args, **kwargs)
201
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
202
+ [default0]:[rank0]: return forward_call(*args, **kwargs)
203
+ [default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
204
+ [default0]:[rank0]: output = self.pp_block(**new_kwargs)
205
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
206
+ [default0]:[rank0]: return self._call_impl(*args, **kwargs)
207
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
208
+ [default0]:[rank0]: return forward_call(*args, **kwargs)
209
+ [default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 631, in forward
210
+ [default0]:[rank0]: output = self.attn(hidden_states=hidden_states, sequence_mask=sequence_mask)
211
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
212
+ [default0]:[rank0]: return self._call_impl(*args, **kwargs)
213
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
214
+ [default0]:[rank0]: return forward_call(*args, **kwargs)
215
+ [default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 360, in forward
216
+ [default0]:[rank0]: qkv_states = self.qkv_proj(
217
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
218
+ [default0]:[rank0]: return self._call_impl(*args, **kwargs)
219
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
220
+ [default0]:[rank0]: return forward_call(*args, **kwargs)
221
+ [default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/tensor_parallel/nn.py", line 87, in forward
222
+ [default0]:[rank0]: return column_linear(
223
+ [default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 359, in column_linear
224
+ [default0]:[rank0]: return F.linear(input, weight, bias)
225
+ [default0]:[rank0]: torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 3.00 GiB. GPU
226
+ [default3]:[rank3]: Traceback (most recent call last):
227
+ [default3]:[rank3]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py", line 237, in <module>
228
+ [default3]:[rank3]: trainer.train(dataloader)
229
+ [default3]:[rank3]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 429, in train
230
+ [default3]:[rank3]: outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
231
+ [default3]:[rank3]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 462, in training_step
232
+ [default3]:[rank3]: outputs = self.pipeline_engine.train_batch_iter(
233
+ [default3]:[rank3]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
234
+ [default3]:[rank3]: output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
235
+ [default3]:[rank3]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
236
+ [default3]:[rank3]: output = model(**micro_batch)
237
+ [default3]:[rank3]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
238
+ [default3]:[rank3]: return self._call_impl(*args, **kwargs)
239
+ [default3]:[rank3]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
240
+ [default3]:[rank3]: return forward_call(*args, **kwargs)
241
+ [default6]:[rank6]: Traceback (most recent call last):
242
+ [default5]:[rank5]: Traceback (most recent call last):
243
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py", line 237, in <module>
244
+ [default5]:[rank5]: trainer.train(dataloader)
245
+ [default3]:[rank3]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 891, in forward
246
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py", line 237, in <module>
247
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 429, in train
248
+ [default3]:[rank3]: sharded_logits = self.model(
249
+ [default3]:[rank3]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
250
+ [default3]:[rank3]: return self._call_impl(*args, **kwargs)
251
+ [default3]:[rank3]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
252
+ [default5]:[rank5]: outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
253
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 462, in training_step
254
+ [default5]:[rank5]: outputs = self.pipeline_engine.train_batch_iter(
255
+ [default3]:[rank3]: return forward_call(*args, **kwargs)
256
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
257
+ [default5]:[rank5]: output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
258
+ [default3]:[rank3]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 764, in forward
259
+ [default3]:[rank3]: return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
260
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
261
+ [default5]:[rank5]: output = model(**micro_batch)
262
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
263
+ [default5]:[rank5]: return self._call_impl(*args, **kwargs)
264
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
265
+ [default6]:[rank6]: trainer.train(dataloader)
266
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 429, in train
267
+ [default6]:[rank6]: outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
268
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 462, in training_step
269
+ [default6]:[rank6]: outputs = self.pipeline_engine.train_batch_iter(
270
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
271
+ [default3]:[rank3]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 780, in forward_with_hidden_states
272
+ [default5]:[rank5]: return forward_call(*args, **kwargs)
273
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 891, in forward
274
+ [default5]:[rank5]: sharded_logits = self.model(
275
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
276
+ [default5]:[rank5]: return self._call_impl(*args, **kwargs)
277
+ [default3]:[rank3]: hidden_encoder_states = encoder_block(**hidden_encoder_states)
278
+ [default3]:[rank3]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
279
+ [default3]:[rank3]: return self._call_impl(*args, **kwargs)
280
+ [default3]:[rank3]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
281
+ [default3]:[rank3]: return forward_call(*args, **kwargs)
282
+ [default6]:[rank6]: output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
283
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
284
+ [default5]:[rank5]: return forward_call(*args, **kwargs)
285
+ [default3]:[rank3]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
286
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 764, in forward
287
+ [default3]:[rank3]: output = self.pp_block(**new_kwargs)
288
+ [default5]:[rank5]: return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
289
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
290
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 780, in forward_with_hidden_states
291
+ [default3]:[rank3]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
292
+ [default6]:[rank6]: output = model(**micro_batch)
293
+ [default5]:[rank5]: hidden_encoder_states = encoder_block(**hidden_encoder_states)
294
+ [default3]:[rank3]: return self._call_impl(*args, **kwargs)
295
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
296
+ [default5]:[rank5]: return self._call_impl(*args, **kwargs)
297
+ [default3]:[rank3]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
298
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
299
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
300
+ [default6]:[rank6]: return self._call_impl(*args, **kwargs)
301
+ [default5]:[rank5]: return forward_call(*args, **kwargs)
302
+ [default3]:[rank3]: return forward_call(*args, **kwargs)
303
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
304
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
305
+ [default3]:[rank3]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 631, in forward
306
+ [default5]:[rank5]: output = self.pp_block(**new_kwargs)
307
+ [default3]:[rank3]: output = self.attn(hidden_states=hidden_states, sequence_mask=sequence_mask)
308
+ [default3]:[rank3]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
309
+ [default6]:[rank6]: return forward_call(*args, **kwargs)
310
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 891, in forward
311
+ [default3]:[rank3]: return self._call_impl(*args, **kwargs)
312
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
313
+ [default3]:[rank3]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
314
+ [default3]:[rank3]: return forward_call(*args, **kwargs)
315
+ [default6]:[rank6]: sharded_logits = self.model(
316
+ [default5]:[rank5]: return self._call_impl(*args, **kwargs)
317
+ [default3]:[rank3]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 360, in forward
318
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
319
+ [default3]:[rank3]: qkv_states = self.qkv_proj(
320
+ [default6]:[rank6]: return self._call_impl(*args, **kwargs)
321
+ [default3]:[rank3]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
322
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
323
+ [default3]:[rank3]: return self._call_impl(*args, **kwargs)
324
+ [default5]:[rank5]: return forward_call(*args, **kwargs)
325
+ [default3]:[rank3]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
326
+ [default3]:[rank3]: return forward_call(*args, **kwargs)
327
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
328
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 631, in forward
329
+ [default3]:[rank3]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/tensor_parallel/nn.py", line 87, in forward
330
+ [default5]:[rank5]: output = self.attn(hidden_states=hidden_states, sequence_mask=sequence_mask)
331
+ [default3]:[rank3]: return column_linear(
332
+ [default6]:[rank6]: return forward_call(*args, **kwargs)
333
+ [default3]:[rank3]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 359, in column_linear
334
+ [default3]:[rank3]: return F.linear(input, weight, bias)
335
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 764, in forward
336
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
337
+ [default5]:[rank5]: return self._call_impl(*args, **kwargs)
338
+ [default3]:[rank3]: torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 3.00 GiB. GPU  has a total capacity of 79.33 GiB of which 549.94 MiB is free. Including non-PyTorch memory, this process has 78.78 GiB memory in use. Of the allocated memory 65.88 GiB is allocated by PyTorch, and 1.16 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
339
+ [default6]:[rank6]: return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
340
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 780, in forward_with_hidden_states
341
+ [default6]:[rank6]: hidden_encoder_states = encoder_block(**hidden_encoder_states)
342
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
343
+ [default6]:[rank6]: return self._call_impl(*args, **kwargs)
344
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
345
+ [default6]:[rank6]: return forward_call(*args, **kwargs)
346
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
347
+ [default5]:[rank5]: return forward_call(*args, **kwargs)
348
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
349
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 360, in forward
350
+ [default5]:[rank5]: qkv_states = self.qkv_proj(
351
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
352
+ [default5]:[rank5]: return self._call_impl(*args, **kwargs)
353
+ [default6]:[rank6]: output = self.pp_block(**new_kwargs)
354
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
355
+ [default5]:[rank5]: return forward_call(*args, **kwargs)
356
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/tensor_parallel/nn.py", line 87, in forward
357
+ [default5]:[rank5]: return column_linear(
358
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 359, in column_linear
359
+ [default5]:[rank5]: return F.linear(input, weight, bias)
360
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
361
+ [default5]:[rank5]: torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 3.00 GiB. GPU  has a total capacity of 79.33 GiB of which 549.94 MiB is free. Including non-PyTorch memory, this process has 78.78 GiB memory in use. Of the allocated memory 65.88 GiB is allocated by PyTorch, and 1.16 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
362
+ [default6]:[rank6]: return self._call_impl(*args, **kwargs)
363
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
364
+ [default6]:[rank6]: return forward_call(*args, **kwargs)
365
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 631, in forward
366
+ [default6]:[rank6]: output = self.attn(hidden_states=hidden_states, sequence_mask=sequence_mask)
367
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
368
+ [default6]:[rank6]: return self._call_impl(*args, **kwargs)
369
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
370
+ [default6]:[rank6]: return forward_call(*args, **kwargs)
371
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 360, in forward
372
+ [default6]:[rank6]: qkv_states = self.qkv_proj(
373
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
374
+ [default6]:[rank6]: return self._call_impl(*args, **kwargs)
375
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
376
+ [default6]:[rank6]: return forward_call(*args, **kwargs)
377
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/tensor_parallel/nn.py", line 87, in forward
378
+ [default6]:[rank6]: return column_linear(
379
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/tensor_parallel/functional.py", line 359, in column_linear
380
+ [default6]:[rank6]: return F.linear(input, weight, bias)
381
+ [default6]:[rank6]: torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 3.00 GiB. GPU  has a total capacity of 79.33 GiB of which 549.94 MiB is free. Including non-PyTorch memory, this process has 78.78 GiB memory in use. Of the allocated memory 65.88 GiB is allocated by PyTorch, and 1.16 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
382
+ W0703 23:34:27.464000 140451258931008 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 1102601 closing signal SIGTERM
383
+ W0703 23:34:27.464000 140451258931008 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 1102602 closing signal SIGTERM
384
+ W0703 23:34:27.464000 140451258931008 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 1102604 closing signal SIGTERM
385
+ W0703 23:34:27.466000 140451258931008 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 1102607 closing signal SIGTERM
386
+ E0703 23:34:28.680000 140451258931008 torch/distributed/elastic/multiprocessing/api.py:826] failed (exitcode: 1) local_rank: 0 (pid: 1102600) of binary: /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10
387
+ Traceback (most recent call last):
388
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/torchrun", line 8, in <module>
389
+ sys.exit(main())
390
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py", line 347, in wrapper
391
+ return f(*args, **kwargs)
392
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/run.py", line 879, in main
393
+ run(args)
394
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/run.py", line 870, in run
395
+ elastic_launch(
396
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 132, in __call__
397
+ return launch_agent(self._config, self._entrypoint, list(args))
398
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 263, in launch_agent
399
+ raise ChildFailedError(
400
+ torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
401
+ ============================================================
402
+ /fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py FAILED
403
+ ------------------------------------------------------------
404
+ Failures:
405
+ [1]:
406
+ time : 2024-07-03_23:34:27
407
+ host : ip-26-0-171-88.ec2.internal
408
+ rank : 3 (local_rank: 3)
409
+ exitcode : 1 (pid: 1102603)
410
+ error_file: <N/A>
411
+ traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
412
+ [2]:
413
+ time : 2024-07-03_23:34:27
414
+ host : ip-26-0-171-88.ec2.internal
415
+ rank : 5 (local_rank: 5)
416
+ exitcode : 1 (pid: 1102605)
417
+ error_file: <N/A>
418
+ traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
419
+ [3]:
420
+ time : 2024-07-03_23:34:27
421
+ host : ip-26-0-171-88.ec2.internal
422
+ rank : 6 (local_rank: 6)
423
+ exitcode : 1 (pid: 1102606)
424
+ error_file: <N/A>
425
+ traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
426
+ ------------------------------------------------------------
427
+ Root Cause (first observed failure):
428
+ [0]:
429
+ time : 2024-07-03_23:34:27
430
+ host : ip-26-0-171-88.ec2.internal
431
+ rank : 0 (local_rank: 0)
432
+ exitcode : 1 (pid: 1102600)
433
+ error_file: <N/A>
434
+ traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
435
+ ============================================================
436
+ srun: error: ip-26-0-171-88: task 0: Exited with exit code 1
437
+ Consider using `hf_transfer` for faster uploads. This solution comes with some limitations. See https://huggingface.co/docs/huggingface_hub/hf_transfer for more details.
llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-64/status.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ oom