3outeille HF staff commited on
Commit
7718b3d
·
verified ·
1 Parent(s): 0647d56

Upload llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-2

Browse files
llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-2/bench.slurm ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_cluster
4
+ #SBATCH --time=00:59:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --nodes=2
7
+ #SBATCH --gres=gpu:8
8
+ #SBATCH --qos=high
9
+ #SBATCH --ntasks-per-node=1
10
+ #SBATCH --cpus-per-task=96
11
+ #SBATCH --exclusive
12
+ #SBATCH --output=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-2/log.out
13
+ #SBATCH --error=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-2/log.out
14
+
15
+ # Function to update status based on squeue output
16
+ update_status() {
17
+ job_id=$1
18
+ status_file=$2
19
+ # For unknown reasons, it doenst update status for pending. It only works for running
20
+ while true; do
21
+ job_status=$(squeue --job $job_id --noheader --format=%T)
22
+ echo "Job status: $job_status"
23
+ if [ -z "$job_status" ]; then
24
+ # Job has finished or is not found
25
+ break
26
+ elif [ "$job_status" = "RUNNING" ]; then
27
+ printf "running" > $status_file
28
+ break
29
+ fi
30
+ sleep 10
31
+ done
32
+ }
33
+
34
+ # Misc initializations.
35
+ echo "========================"
36
+ echo "START TIME: $(date)"
37
+ source /fsx/ferdinandmom/miniforge3/etc/profile.d/conda.sh
38
+ conda activate /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster
39
+ echo python3 version = $(python3 --version)
40
+ echo "========================"
41
+
42
+ # Slurm stuff
43
+ export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
44
+ export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
45
+ export MASTER_PORT=$((1024 + RANDOM % 64511))
46
+
47
+ export TMPDIR=/scratch
48
+ export HF_DATASETS_CACHE="/admin/home/ferdinand_mom/.cache"
49
+ export CUBLAS_WORKSPACE_CONFIG=":4096:8"
50
+ export CUDA_DEVICE_MAX_CONNECTIONS="1"
51
+
52
+ huggingface-cli login --token $HUGGINGFACE_TOKEN
53
+
54
+
55
+ NANOTRON_REPO="/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron"
56
+ CMD="$NANOTRON_REPO/run_train.py --config-file /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-2/config.yaml"
57
+
58
+ LAUNCHER="torchrun \
59
+ --nproc_per_node 8 \
60
+ --nnodes 2 \
61
+ --rdzv_endpoint ${MASTER_ADDR}:${MASTER_PORT} \
62
+ --rdzv_backend c10d \
63
+ --max_restarts 0 \
64
+ --tee 3 \
65
+ --node_rank ${SLURM_PROCID}"
66
+
67
+ # Checkout the bench_cluster branch
68
+ cd $NANOTRON_REPO
69
+ git checkout bench_cluster
70
+ cd ..
71
+ # Get the current job ID
72
+ job_id=${SLURM_JOB_ID}
73
+
74
+ # Update status to "pending" or "running" in the background
75
+ update_status $job_id /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-2/status.txt &
76
+
77
+ # Run the main command
78
+ srun -u $LAUNCHER $CMD
79
+ exit_status=$?
80
+
81
+ # Update status based on the exit status of `srun`
82
+ if [ $exit_status -eq 0 ]; then
83
+ printf "completed" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-2/status.txt
84
+ else
85
+ if grep -q "OutOfMemoryError" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-2/log.out; then
86
+ printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-2/status.txt
87
+ elif grep -q " CUDA error: an illegal memory access" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-2/log.out; then
88
+ printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-2/status.txt
89
+ elif grep -q "Timeout at NCCL" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-2/log.out; then
90
+ printf "timeout" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-2/status.txt
91
+ else
92
+ printf "fail" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-2/status.txt
93
+ fi
94
+ fi
95
+
96
+ # Run the report script if the job completed successfully
97
+ if [ $exit_status -eq 0 ]; then
98
+ python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-2 --is_logs
99
+ python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-2 --is_profiler
100
+ fi
101
+
102
+
103
+ # Push to hub the folder using huggingface_cli
104
+ huggingface-cli upload nanotron/bench_cluster /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-2 llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-2 --commit-message "Upload llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-2"
105
+
106
+ # Verify the upload
107
+ if [ $? -eq 0 ]; then
108
+ echo "Uploading to Huggingface Hub successful"
109
+ else
110
+ echo "Failed to upload to Huggingface Hub"
111
+ fi
llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-2/config.yaml ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ general:
2
+ project: bench_cluster
3
+ seed: 42
4
+ model:
5
+ ddp_bucket_cap_mb: 25
6
+ dtype: bfloat16
7
+ init_method:
8
+ std: 0.025
9
+ make_vocab_size_divisible_by: 1
10
+ model_config:
11
+ bos_token_id: 1
12
+ eos_token_id: 2
13
+ hidden_act: silu
14
+ hidden_size: 2048
15
+ initializer_range: 0.02
16
+ intermediate_size: 4096
17
+ is_llama_config: true
18
+ max_position_embeddings: 4096
19
+ num_attention_heads: 32
20
+ num_hidden_layers: 24
21
+ num_key_value_heads: 32
22
+ pad_token_id: null
23
+ pretraining_tp: 1
24
+ rms_norm_eps: 1.0e-05
25
+ rope_scaling: null
26
+ rope_theta: 10000.0
27
+ tie_word_embeddings: true
28
+ use_cache: true
29
+ vocab_size: 50257
30
+ optimizer:
31
+ accumulate_grad_in_fp32: true
32
+ clip_grad: 1.0
33
+ learning_rate_scheduler:
34
+ learning_rate: 0.0001
35
+ lr_decay_style: linear
36
+ lr_warmup_style: linear
37
+ lr_warmup_steps: 1
38
+ min_decay_lr: 1.0e-05
39
+ optimizer_factory:
40
+ adam_beta1: 0.9
41
+ adam_beta2: 0.95
42
+ adam_eps: 1.0e-08
43
+ name: adamW
44
+ torch_adam_is_fused: true
45
+ weight_decay: 0.01
46
+ zero_stage: 1
47
+ parallelism:
48
+ dp: 4
49
+ expert_parallel_size: 1
50
+ pp: 1
51
+ pp_engine: 1f1b
52
+ tp: 4
53
+ tp_linear_async_communication: false
54
+ tp_mode: REDUCE_SCATTER
55
+ profiler:
56
+ profiler_export_path: /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-2
57
+ tokenizer:
58
+ tokenizer_max_length: null
59
+ tokenizer_name_or_path: openai-community/gpt2
60
+ tokenizer_revision: null
61
+ data_stages:
62
+ - name: Training Stage
63
+ start_training_step: 1
64
+ data:
65
+ dataset:
66
+ dataset_overwrite_cache: false
67
+ dataset_processing_num_proc_per_process: 64
68
+ hf_dataset_config_name: null
69
+ hf_dataset_or_datasets: roneneldan/TinyStories
70
+ hf_dataset_splits: train
71
+ text_column_name: text
72
+ num_loading_workers: 32
73
+ seed: 42
74
+ lighteval: null
75
+ tokens:
76
+ train_steps: 20
77
+ val_check_interval: -1
78
+ batch_accumulation_per_replica: 128
79
+ limit_test_batches: 0
80
+ limit_val_batches: 0
81
+ micro_batch_size: 2
82
+ sequence_length: 4096
83
+ logging:
84
+ iteration_step_info_interval: 1
85
+ log_level: info
86
+ log_level_replica: info
87
+ checkpoints:
88
+ checkpoint_interval: 100000
89
+ checkpoints_path: /dev/null
90
+ resume_checkpoint_path: null
llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-2/log.out ADDED
@@ -0,0 +1,868 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ========================
2
+ START TIME: Tue Jul 2 16:32:34 UTC 2024
3
+ python3 version = Python 3.10.14
4
+ ========================
5
+ The token has not been saved to the git credentials helper. Pass `add_to_git_credential=True` in this function directly or `--add-to-git-credential` if using via `huggingface-cli` if you want to set the git credential as well.
6
+ Token is valid (permission: write).
7
+ OSError: [Errno 122] Disk quota exceeded
8
+
9
+ During handling of the above exception, another exception occurred:
10
+
11
+ Traceback (most recent call last):
12
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/huggingface-cli", line 8, in <module>
13
+ sys.exit(main())
14
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/huggingface_hub/commands/huggingface_cli.py", line 51, in main
15
+ service.run()
16
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/huggingface_hub/commands/user.py", line 98, in run
17
+ login(token=self.args.token, add_to_git_credential=self.args.add_to_git_credential)
18
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/huggingface_hub/_login.py", line 111, in login
19
+ _login(token, add_to_git_credential=add_to_git_credential, write_permission=write_permission)
20
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/huggingface_hub/_login.py", line 328, in _login
21
+ path.write_text(token)
22
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/pathlib.py", line 1154, in write_text
23
+ with self.open(mode='w', encoding=encoding, errors=errors, newline=newline) as f:
24
+ OSError: [Errno 122] Disk quota exceeded
25
+ Already on 'bench_cluster'
26
+ M examples/config_tiny_llama.py
27
+ M examples/config_tiny_llama.yaml
28
+ M examples/train_tiny_llama.sh
29
+ M src/nanotron/models/llama.py
30
+ M src/nanotron/trainer.py
31
+ Your branch is up to date with 'origin/bench_cluster'.
32
+ Job status: RUNNING
33
+ W0702 16:32:37.194000 140146465437504 torch/distributed/run.py:757]
34
+ W0702 16:32:37.194000 140146465437504 torch/distributed/run.py:757] *****************************************
35
+ W0702 16:32:37.194000 140146465437504 torch/distributed/run.py:757] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
36
+ W0702 16:32:37.194000 140146465437504 torch/distributed/run.py:757] *****************************************
37
+ W0702 16:32:37.303000 140548878317376 torch/distributed/run.py:757]
38
+ W0702 16:32:37.303000 140548878317376 torch/distributed/run.py:757] *****************************************
39
+ W0702 16:32:37.303000 140548878317376 torch/distributed/run.py:757] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
40
+ W0702 16:32:37.303000 140548878317376 torch/distributed/run.py:757] *****************************************
41
+ [default0]:07/02/2024 16:32:55 [WARNING|DP=0|PP=0|TP=0|ip-26-0-167-177]: [Vocab Size Padding] Padded vocab (size: 50257) with 3 dummy tokens (new size: 50260)
42
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: Config:
43
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: Config(general=GeneralArgs(project='bench_cluster',
44
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: run='%date_%jobid',
45
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: seed=42,
46
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: step=None,
47
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: consumed_train_samples=None,
48
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: benchmark_csv_path=None,
49
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: ignore_sanity_checks=True),
50
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: parallelism=ParallelismArgs(dp=4,
51
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: pp=1,
52
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: tp=4,
53
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: pp_engine=<nanotron.parallel.pipeline_parallel.engine.OneForwardOneBackwardPipelineEngine object at 0x7fc860d68790>,
54
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: tp_mode=<TensorParallelLinearMode.REDUCE_SCATTER: 2>,
55
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: tp_linear_async_communication=False,
56
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: expert_parallel_size=1),
57
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: model=ModelArgs(model_config=LlamaConfig(bos_token_id=1,
58
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: eos_token_id=2,
59
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: hidden_act='silu',
60
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: hidden_size=2048,
61
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: initializer_range=0.02,
62
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: intermediate_size=4096,
63
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: is_llama_config=True,
64
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: max_position_embeddings=4096,
65
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: num_attention_heads=32,
66
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: num_hidden_layers=24,
67
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: num_key_value_heads=32,
68
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: pad_token_id=None,
69
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: pretraining_tp=1,
70
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: rms_norm_eps=1e-05,
71
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: rope_scaling=None,
72
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: rope_theta=10000.0,
73
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: tie_word_embeddings=True,
74
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: use_cache=True,
75
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: vocab_size=50260),
76
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: init_method=RandomInit(std=0.025),
77
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: dtype=torch.bfloat16,
78
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: make_vocab_size_divisible_by=1,
79
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: ddp_bucket_cap_mb=25),
80
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: tokenizer=TokenizerArgs(tokenizer_name_or_path='openai-community/gpt2',
81
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: tokenizer_revision=None,
82
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: tokenizer_max_length=None),
83
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: checkpoints=CheckpointsArgs(checkpoints_path=Path('/dev/null'),
84
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: checkpoint_interval=100000,
85
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: save_initial_state=False,
86
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: resume_checkpoint_path=None,
87
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: checkpoints_path_is_shared_file_system=False),
88
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: logging=LoggingArgs(log_level='info',
89
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: log_level_replica='info',
90
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: iteration_step_info_interval=1),
91
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: tokens=TokensArgs(sequence_length=4096,
92
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: train_steps=20,
93
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: micro_batch_size=2,
94
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: batch_accumulation_per_replica=128,
95
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: val_check_interval=-1,
96
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: limit_val_batches=0,
97
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: limit_test_batches=0),
98
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: optimizer=OptimizerArgs(optimizer_factory=AdamWOptimizerArgs(adam_eps=1e-08,
99
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: adam_beta1=0.9,
100
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: adam_beta2=0.95,
101
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: torch_adam_is_fused=True,
102
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: name='adamW'),
103
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: zero_stage=1,
104
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: weight_decay=0.01,
105
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: clip_grad=1.0,
106
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: accumulate_grad_in_fp32=True,
107
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: learning_rate_scheduler=LRSchedulerArgs(learning_rate=0.0001,
108
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: lr_warmup_steps=1,
109
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: lr_warmup_style='linear',
110
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: lr_decay_style='linear',
111
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: lr_decay_steps=19,
112
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: lr_decay_starting_step=None,
113
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: min_decay_lr=1e-05)),
114
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: data_stages=[DatasetStageArgs(name='Training Stage',
115
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: start_training_step=1,
116
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: data=DataArgs(dataset=PretrainDatasetsArgs(hf_dataset_or_datasets='roneneldan/TinyStories',
117
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: hf_dataset_splits='train',
118
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: hf_dataset_config_name=None,
119
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: dataset_processing_num_proc_per_process=64,
120
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: dataset_overwrite_cache=False,
121
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: text_column_name='text'),
122
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: seed=42,
123
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: num_loading_workers=32))],
124
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: profiler=ProfilerArgs(profiler_export_path=Path('/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-2')),
125
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: lighteval=None)
126
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: Model Config:
127
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: LlamaConfig(bos_token_id=1,
128
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: eos_token_id=2,
129
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: hidden_act='silu',
130
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: hidden_size=2048,
131
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: initializer_range=0.02,
132
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: intermediate_size=4096,
133
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: is_llama_config=True,
134
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: max_position_embeddings=4096,
135
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: num_attention_heads=32,
136
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: num_hidden_layers=24,
137
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: num_key_value_heads=32,
138
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: pad_token_id=None,
139
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: pretraining_tp=1,
140
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: rms_norm_eps=1e-05,
141
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: rope_scaling=None,
142
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: rope_theta=10000.0,
143
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: tie_word_embeddings=True,
144
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: use_cache=True,
145
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: vocab_size=50260)
146
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: Building model..
147
+ [default0]:07/02/2024 16:32:55 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: Setting PP block ranks...
148
+ [default0]:07/02/2024 16:33:09 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: Total number of parameters: 1.11G (2117.09MiB)
149
+ [default0]:07/02/2024 16:33:09 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: Local number of parameters: 277M (529.27MiB)
150
+ [default0]:07/02/2024 16:33:09 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: [After model building] Memory usage: 554.21MiB. Peak allocated: 606.24MiB Peak reserved: 608.00MiB
151
+ [default0]:07/02/2024 16:33:09 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: No checkpoint path provided.
152
+ [default0]:07/02/2024 16:33:09 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: Parametrizing model parameters using StandardParametrizator
153
+ [default1]:07/02/2024 16:33:09 [INFO|DP=0|PP=0|TP=1|ip-26-0-167-177]: Local number of parameters: 277M (529.27MiB)
154
+ [default1]:07/02/2024 16:33:09 [INFO|DP=0|PP=0|TP=1|ip-26-0-167-177]: [After model building] Memory usage: 554.21MiB. Peak allocated: 606.24MiB Peak reserved: 608.00MiB
155
+ [default1]:07/02/2024 16:33:09 [INFO|DP=0|PP=0|TP=1|ip-26-0-167-177]: No checkpoint path provided.
156
+ [default6]:07/02/2024 16:33:09 [INFO|DP=1|PP=0|TP=2|ip-26-0-167-177]: No checkpoint path provided.
157
+ [default2]:07/02/2024 16:33:09 [INFO|DP=0|PP=0|TP=2|ip-26-0-167-177]: Local number of parameters: 277M (529.27MiB)
158
+ [default2]:07/02/2024 16:33:09 [INFO|DP=0|PP=0|TP=2|ip-26-0-167-177]: [After model building] Memory usage: 554.21MiB. Peak allocated: 606.24MiB Peak reserved: 608.00MiB
159
+ [default2]:07/02/2024 16:33:09 [INFO|DP=0|PP=0|TP=2|ip-26-0-167-177]: No checkpoint path provided.
160
+ [default4]:07/02/2024 16:33:09 [INFO|DP=1|PP=0|TP=0|ip-26-0-167-177]: No checkpoint path provided.
161
+ [default5]:07/02/2024 16:33:09 [INFO|DP=1|PP=0|TP=1|ip-26-0-167-177]: No checkpoint path provided.
162
+ [default7]:07/02/2024 16:33:09 [INFO|DP=1|PP=0|TP=3|ip-26-0-167-177]: No checkpoint path provided.
163
+ [default3]:07/02/2024 16:33:09 [INFO|DP=0|PP=0|TP=3|ip-26-0-167-177]: Local number of parameters: 277M (529.27MiB)
164
+ [default3]:07/02/2024 16:33:09 [INFO|DP=0|PP=0|TP=3|ip-26-0-167-177]: [After model building] Memory usage: 554.21MiB. Peak allocated: 606.24MiB Peak reserved: 608.00MiB
165
+ [default3]:07/02/2024 16:33:09 [INFO|DP=0|PP=0|TP=3|ip-26-0-167-177]: No checkpoint path provided.
166
+ [default1]:07/02/2024 16:33:09 [INFO|DP=2|PP=0|TP=1|ip-26-0-170-31]: No checkpoint path provided.
167
+ [default2]:07/02/2024 16:33:09 [INFO|DP=2|PP=0|TP=2|ip-26-0-170-31]: No checkpoint path provided.
168
+ [default3]:07/02/2024 16:33:09 [INFO|DP=2|PP=0|TP=3|ip-26-0-170-31]: No checkpoint path provided.
169
+ [default0]:07/02/2024 16:33:09 [INFO|DP=2|PP=0|TP=0|ip-26-0-170-31]: No checkpoint path provided.
170
+ [default4]:07/02/2024 16:33:09 [INFO|DP=3|PP=0|TP=0|ip-26-0-170-31]: No checkpoint path provided.
171
+ [default6]:07/02/2024 16:33:09 [INFO|DP=3|PP=0|TP=2|ip-26-0-170-31]: No checkpoint path provided.
172
+ [default7]:07/02/2024 16:33:09 [INFO|DP=3|PP=0|TP=3|ip-26-0-170-31]: No checkpoint path provided.
173
+ [default5]:07/02/2024 16:33:09 [INFO|DP=3|PP=0|TP=1|ip-26-0-170-31]: No checkpoint path provided.
174
+ [default0]:07/02/2024 16:33:11 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: [Optimizer Building] Using LearningRateForSP as learning rate
175
+ [default0]:07/02/2024 16:33:11 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: [ZeRO sharding] Size of optimizer params per rank:
176
+ [default0]:07/02/2024 16:33:11 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: [ZeRO sharding] DP Rank 0 has 69.4M out of 277M (25.00%) params' optimizer states
177
+ [default0]:07/02/2024 16:33:11 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: [ZeRO sharding] DP Rank 1 has 69.4M out of 277M (25.00%) params' optimizer states
178
+ [default0]:07/02/2024 16:33:11 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: [ZeRO sharding] DP Rank 2 has 69.4M out of 277M (25.00%) params' optimizer states
179
+ [default0]:07/02/2024 16:33:11 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: [ZeRO sharding] DP Rank 3 has 69.4M out of 277M (25.00%) params' optimizer states
180
+ [default0]:07/02/2024 16:33:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: [Training Plan] Stage Training Stage has 19 remaining training steps and has consumed 0 samples
181
+ [default0]:07/02/2024 16:33:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: Using `datasets` library
182
+ [default0]:07/02/2024 16:33:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: Loading tokenizer from openai-community/gpt2 and transformers/hf_hub versions ('4.41.2', '0.23.4')
183
+ [default0]:07/02/2024 16:33:13 [WARNING|DP=0|PP=0|TP=0|ip-26-0-167-177]: Repo card metadata block was not found. Setting CardData to empty.
184
+ [default0]:Repo card metadata block was not found. Setting CardData to empty.
185
+ [default0]:07/02/2024 16:33:14 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: [Training Plan] There are 1 training stages
186
+ [default0]:07/02/2024 16:33:14 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: [Stage Training Stage] start from step 1
187
+ [default0]:07/02/2024 16:33:14 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]:
188
+ [default0]:07/02/2024 16:33:14 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: [Start training] datetime: 2024-07-02 16:33:14.575571 | mbs: 2 | grad_accum: 128 | global_batch_size: 1024 | sequence_length: 4096 | train_steps: 20 | start_iteration_step: 0 | consumed_train_samples: 0
189
+ [default0]:07/02/2024 16:33:14 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: Resuming training from stage Training Stage, it has trained for 0 samples and has 19 remaining train steps
190
+ [default0]:07/02/2024 16:33:14 [INFO|DP=0|PP=0|TP=0|ip-26-0-167-177]: Memory usage: 1877.40MiB. Peak allocated 1877.40MiB. Peak reserved: 1934.00MiB
191
+ [default3]:Repo card metadata block was not found. Setting CardData to empty.
192
+ [default3]:07/02/2024 16:33:14 [WARNING|DP=2|PP=0|TP=3|ip-26-0-170-31]: Repo card metadata block was not found. Setting CardData to empty.
193
+ [default5]:Repo card metadata block was not found. Setting CardData to empty.
194
+ [default4]:Repo card metadata block was not found. Setting CardData to empty.
195
+ [default5]:07/02/2024 16:33:14 [WARNING|DP=1|PP=0|TP=1|ip-26-0-167-177]: Repo card metadata block was not found. Setting CardData to empty.
196
+ [default4]:07/02/2024 16:33:14 [WARNING|DP=1|PP=0|TP=0|ip-26-0-167-177]: Repo card metadata block was not found. Setting CardData to empty.
197
+ [default7]:07/02/2024 16:33:14 [WARNING|DP=3|PP=0|TP=3|ip-26-0-170-31]: Repo card metadata block was not found. Setting CardData to empty.
198
+ [default2]:Repo card metadata block was not found. Setting CardData to empty.
199
+ [default7]:Repo card metadata block was not found. Setting CardData to empty.
200
+ [default4]:Repo card metadata block was not found. Setting CardData to empty.
201
+ [default5]:Repo card metadata block was not found. Setting CardData to empty.
202
+ [default5]:07/02/2024 16:33:14 [WARNING|DP=3|PP=0|TP=1|ip-26-0-170-31]: Repo card metadata block was not found. Setting CardData to empty.
203
+ [default4]:07/02/2024 16:33:14 [WARNING|DP=3|PP=0|TP=0|ip-26-0-170-31]: Repo card metadata block was not found. Setting CardData to empty.
204
+ [default1]:07/02/2024 16:33:14 [WARNING|DP=2|PP=0|TP=1|ip-26-0-170-31]: Repo card metadata block was not found. Setting CardData to empty.
205
+ [default2]:07/02/2024 16:33:14 [WARNING|DP=2|PP=0|TP=2|ip-26-0-170-31]: Repo card metadata block was not found. Setting CardData to empty.
206
+ [default6]:07/02/2024 16:33:14 [WARNING|DP=3|PP=0|TP=2|ip-26-0-170-31]: Repo card metadata block was not found. Setting CardData to empty.
207
+ [default0]:07/02/2024 16:33:14 [WARNING|DP=2|PP=0|TP=0|ip-26-0-170-31]: Repo card metadata block was not found. Setting CardData to empty.
208
+ [default1]:Repo card metadata block was not found. Setting CardData to empty.
209
+ [default6]:07/02/2024 16:33:14 [WARNING|DP=1|PP=0|TP=2|ip-26-0-167-177]: Repo card metadata block was not found. Setting CardData to empty.
210
+ [default2]:Repo card metadata block was not found. Setting CardData to empty.
211
+ [default7]:07/02/2024 16:33:14 [WARNING|DP=1|PP=0|TP=3|ip-26-0-167-177]: Repo card metadata block was not found. Setting CardData to empty.
212
+ [default6]:Repo card metadata block was not found. Setting CardData to empty.
213
+ [default7]:Repo card metadata block was not found. Setting CardData to empty.
214
+ [default3]:Repo card metadata block was not found. Setting CardData to empty.
215
+ [default3]:07/02/2024 16:33:14 [WARNING|DP=0|PP=0|TP=3|ip-26-0-167-177]: Repo card metadata block was not found. Setting CardData to empty.
216
+ [default2]:07/02/2024 16:33:14 [WARNING|DP=0|PP=0|TP=2|ip-26-0-167-177]: Repo card metadata block was not found. Setting CardData to empty.
217
+ [default6]:Repo card metadata block was not found. Setting CardData to empty.
218
+ [default0]:Repo card metadata block was not found. Setting CardData to empty.
219
+ [default1]:Repo card metadata block was not found. Setting CardData to empty.
220
+ [default1]:07/02/2024 16:33:14 [WARNING|DP=0|PP=0|TP=1|ip-26-0-167-177]: Repo card metadata block was not found. Setting CardData to empty.
221
+ [default6]:[rank14]: OSError: [Errno 122] Disk quota exceeded
222
+ [default6]:
223
+ [default6]:[rank14]: During handling of the above exception, another exception occurred:
224
+ [default6]:
225
+ [default6]:[rank14]: Traceback (most recent call last):
226
+ [default6]:[rank14]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py", line 237, in <module>
227
+ [default6]:[rank14]: trainer.train(dataloader)
228
+ [default6]:[rank14]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 429, in train
229
+ [default6]:[rank14]: outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
230
+ [default6]:[rank14]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 462, in training_step
231
+ [default6]:[rank14]: outputs = self.pipeline_engine.train_batch_iter(
232
+ [default6]:[rank14]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
233
+ [default6]:[rank14]: output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
234
+ [default6]:[rank14]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
235
+ [default6]:[rank14]: output = model(**micro_batch)
236
+ [default6]:[rank14]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
237
+ [default6]:[rank14]: return self._call_impl(*args, **kwargs)
238
+ [default6]:[rank14]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
239
+ [default6]:[rank14]: return forward_call(*args, **kwargs)
240
+ [default6]:[rank14]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 891, in forward
241
+ [default6]:[rank14]: sharded_logits = self.model(
242
+ [default6]:[rank14]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
243
+ [default6]:[rank14]: return self._call_impl(*args, **kwargs)
244
+ [default6]:[rank14]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
245
+ [default6]:[rank14]: return forward_call(*args, **kwargs)
246
+ [default6]:[rank14]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 764, in forward
247
+ [default6]:[rank14]: return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
248
+ [default6]:[rank14]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 780, in forward_with_hidden_states
249
+ [default6]:[rank14]: hidden_encoder_states = encoder_block(**hidden_encoder_states)
250
+ [default6]:[rank14]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
251
+ [default6]:[rank14]: return self._call_impl(*args, **kwargs)
252
+ [default6]:[rank14]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
253
+ [default6]:[rank14]: return forward_call(*args, **kwargs)
254
+ [default6]:[rank14]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
255
+ [default6]:[rank14]: output = self.pp_block(**new_kwargs)
256
+ [default6]:[rank14]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
257
+ [default6]:[rank14]: return self._call_impl(*args, **kwargs)
258
+ [default6]:[rank14]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
259
+ [default6]:[rank14]: return forward_call(*args, **kwargs)
260
+ [default6]:[rank14]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 629, in forward
261
+ [default6]:[rank14]: hidden_states = self.input_layernorm(hidden_states)
262
+ [default6]:[rank14]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
263
+ [default6]:[rank14]: return self._call_impl(*args, **kwargs)
264
+ [default6]:[rank14]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
265
+ [default6]:[rank14]: return forward_call(*args, **kwargs)
266
+ [default6]:[rank14]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/nn/layer_norm.py", line 42, in forward
267
+ [default6]:[rank14]: return layer_norm_fn(
268
+ [default6]:[rank14]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 875, in layer_norm_fn
269
+ [default6]:[rank14]: return LayerNormFn.apply(
270
+ [default6]:[rank14]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/function.py", line 598, in apply
271
+ [default6]:[rank14]: return super().apply(*args, **kwargs) # type: ignore[misc]
272
+ [default6]:[rank14]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 748, in forward
273
+ [default6]:[rank14]: y, y1, mean, rstd, residual_out, seeds, dropout_mask, dropout_mask1 = _layer_norm_fwd(
274
+ [default6]:[rank14]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 335, in _layer_norm_fwd
275
+ [default6]:[rank14]: _layer_norm_fwd_1pass_kernel[(M,)](
276
+ [default6]:[rank14]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/jit.py", line 167, in <lambda>
277
+ [default6]:[rank14]: return lambda *args, **kwargs: self.run(grid=grid, warmup=False, *args, **kwargs)
278
+ [default6]:[rank14]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 143, in run
279
+ [default6]:[rank14]: timings = {config: self._bench(*args, config=config, **kwargs) for config in pruned_configs}
280
+ [default6]:[rank14]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 143, in <dictcomp>
281
+ [default6]:[rank14]: timings = {config: self._bench(*args, config=config, **kwargs) for config in pruned_configs}
282
+ [default6]:[rank14]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 122, in _bench
283
+ [default6]:[rank14]: return do_bench(kernel_call, warmup=self.warmup, rep=self.rep, quantiles=(0.5, 0.2, 0.8))
284
+ [default6]:[rank14]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/testing.py", line 102, in do_bench
285
+ [default6]:[rank14]: fn()
286
+ [default6]:[rank14]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 110, in kernel_call
287
+ [default6]:[rank14]: self.fn.run(
288
+ [default6]:[rank14]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
289
+ [default6]:[rank14]: return self.fn.run(*args, **kwargs)
290
+ [default6]:[rank14]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
291
+ [default6]:[rank14]: return self.fn.run(*args, **kwargs)
292
+ [default6]:[rank14]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
293
+ [default6]:[rank14]: return self.fn.run(*args, **kwargs)
294
+ [default6]:[rank14]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/jit.py", line 416, in run
295
+ [default6]:[rank14]: self.cache[device][key] = compile(
296
+ [default6]:[rank14]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/compiler/compiler.py", line 194, in compile
297
+ [default6]:[rank14]: metadata_group[f"{src.name}.{ext}"] = fn_cache_manager.put(next_module, f"{src.name}.{ext}")
298
+ [default6]:[rank14]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/cache.py", line 123, in put
299
+ [default6]:[rank14]: with open(temp_path, mode) as f:
300
+ [default6]:[rank14]: OSError: [Errno 122] Disk quota exceeded
301
+ [default2]:[rank2]: OSError: [Errno 122] Disk quota exceeded
302
+ [default2]:
303
+ [default2]:[rank2]: During handling of the above exception, another exception occurred:
304
+ [default2]:
305
+ [default2]:[rank2]: Traceback (most recent call last):
306
+ [default2]:[rank2]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py", line 237, in <module>
307
+ [default2]:[rank2]: trainer.train(dataloader)
308
+ [default2]:[rank2]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 429, in train
309
+ [default2]:[rank2]: outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
310
+ [default2]:[rank2]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 462, in training_step
311
+ [default2]:[rank2]: outputs = self.pipeline_engine.train_batch_iter(
312
+ [default2]:[rank2]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
313
+ [default2]:[rank2]: output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
314
+ [default2]:[rank2]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
315
+ [default2]:[rank2]: output = model(**micro_batch)
316
+ [default2]:[rank2]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
317
+ [default2]:[rank2]: return self._call_impl(*args, **kwargs)
318
+ [default2]:[rank2]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
319
+ [default2]:[rank2]: return forward_call(*args, **kwargs)
320
+ [default2]:[rank2]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 891, in forward
321
+ [default2]:[rank2]: sharded_logits = self.model(
322
+ [default2]:[rank2]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
323
+ [default2]:[rank2]: return self._call_impl(*args, **kwargs)
324
+ [default2]:[rank2]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
325
+ [default2]:[rank2]: return forward_call(*args, **kwargs)
326
+ [default2]:[rank2]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 764, in forward
327
+ [default2]:[rank2]: return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
328
+ [default2]:[rank2]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 780, in forward_with_hidden_states
329
+ [default2]:[rank2]: hidden_encoder_states = encoder_block(**hidden_encoder_states)
330
+ [default2]:[rank2]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
331
+ [default2]:[rank2]: return self._call_impl(*args, **kwargs)
332
+ [default2]:[rank2]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
333
+ [default2]:[rank2]: return forward_call(*args, **kwargs)
334
+ [default2]:[rank2]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
335
+ [default2]:[rank2]: output = self.pp_block(**new_kwargs)
336
+ [default2]:[rank2]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
337
+ [default2]:[rank2]: return self._call_impl(*args, **kwargs)
338
+ [default2]:[rank2]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
339
+ [default2]:[rank2]: return forward_call(*args, **kwargs)
340
+ [default2]:[rank2]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 629, in forward
341
+ [default2]:[rank2]: hidden_states = self.input_layernorm(hidden_states)
342
+ [default2]:[rank2]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
343
+ [default2]:[rank2]: return self._call_impl(*args, **kwargs)
344
+ [default2]:[rank2]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
345
+ [default2]:[rank2]: return forward_call(*args, **kwargs)
346
+ [default2]:[rank2]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/nn/layer_norm.py", line 42, in forward
347
+ [default2]:[rank2]: return layer_norm_fn(
348
+ [default2]:[rank2]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 875, in layer_norm_fn
349
+ [default2]:[rank2]: return LayerNormFn.apply(
350
+ [default2]:[rank2]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/function.py", line 598, in apply
351
+ [default2]:[rank2]: return super().apply(*args, **kwargs) # type: ignore[misc]
352
+ [default2]:[rank2]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 748, in forward
353
+ [default2]:[rank2]: y, y1, mean, rstd, residual_out, seeds, dropout_mask, dropout_mask1 = _layer_norm_fwd(
354
+ [default2]:[rank2]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 335, in _layer_norm_fwd
355
+ [default2]:[rank2]: _layer_norm_fwd_1pass_kernel[(M,)](
356
+ [default2]:[rank2]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/jit.py", line 167, in <lambda>
357
+ [default2]:[rank2]: return lambda *args, **kwargs: self.run(grid=grid, warmup=False, *args, **kwargs)
358
+ [default2]:[rank2]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 143, in run
359
+ [default2]:[rank2]: timings = {config: self._bench(*args, config=config, **kwargs) for config in pruned_configs}
360
+ [default2]:[rank2]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 143, in <dictcomp>
361
+ [default2]:[rank2]: timings = {config: self._bench(*args, config=config, **kwargs) for config in pruned_configs}
362
+ [default2]:[rank2]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 122, in _bench
363
+ [default2]:[rank2]: return do_bench(kernel_call, warmup=self.warmup, rep=self.rep, quantiles=(0.5, 0.2, 0.8))
364
+ [default2]:[rank2]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/testing.py", line 102, in do_bench
365
+ [default2]:[rank2]: fn()
366
+ [default2]:[rank2]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 110, in kernel_call
367
+ [default2]:[rank2]: self.fn.run(
368
+ [default2]:[rank2]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
369
+ [default2]:[rank2]: return self.fn.run(*args, **kwargs)
370
+ [default2]:[rank2]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
371
+ [default2]:[rank2]: return self.fn.run(*args, **kwargs)
372
+ [default2]:[rank2]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
373
+ [default2]:[rank2]: return self.fn.run(*args, **kwargs)
374
+ [default2]:[rank2]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/jit.py", line 416, in run
375
+ [default2]:[rank2]: self.cache[device][key] = compile(
376
+ [default2]:[rank2]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/compiler/compiler.py", line 194, in compile
377
+ [default2]:[rank2]: metadata_group[f"{src.name}.{ext}"] = fn_cache_manager.put(next_module, f"{src.name}.{ext}")
378
+ [default2]:[rank2]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/cache.py", line 123, in put
379
+ [default2]:[rank2]: with open(temp_path, mode) as f:
380
+ [default2]:[rank2]: OSError: [Errno 122] Disk quota exceeded
381
+ [default1]:[rank1]: OSError: [Errno 122] Disk quota exceeded
382
+ [default1]:
383
+ [default1]:[rank1]: During handling of the above exception, another exception occurred:
384
+ [default1]:
385
+ [default1]:[rank1]: Traceback (most recent call last):
386
+ [default1]:[rank1]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py", line 237, in <module>
387
+ [default1]:[rank1]: trainer.train(dataloader)
388
+ [default1]:[rank1]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 429, in train
389
+ [default1]:[rank1]: outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
390
+ [default1]:[rank1]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 462, in training_step
391
+ [default1]:[rank1]: outputs = self.pipeline_engine.train_batch_iter(
392
+ [default1]:[rank1]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
393
+ [default1]:[rank1]: output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
394
+ [default1]:[rank1]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
395
+ [default1]:[rank1]: output = model(**micro_batch)
396
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
397
+ [default1]:[rank1]: return self._call_impl(*args, **kwargs)
398
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
399
+ [default1]:[rank1]: return forward_call(*args, **kwargs)
400
+ [default1]:[rank1]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 891, in forward
401
+ [default1]:[rank1]: sharded_logits = self.model(
402
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
403
+ [default1]:[rank1]: return self._call_impl(*args, **kwargs)
404
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
405
+ [default1]:[rank1]: return forward_call(*args, **kwargs)
406
+ [default1]:[rank1]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 764, in forward
407
+ [default1]:[rank1]: return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
408
+ [default1]:[rank1]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 780, in forward_with_hidden_states
409
+ [default1]:[rank1]: hidden_encoder_states = encoder_block(**hidden_encoder_states)
410
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
411
+ [default1]:[rank1]: return self._call_impl(*args, **kwargs)
412
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
413
+ [default1]:[rank1]: return forward_call(*args, **kwargs)
414
+ [default1]:[rank1]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
415
+ [default1]:[rank1]: output = self.pp_block(**new_kwargs)
416
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
417
+ [default1]:[rank1]: return self._call_impl(*args, **kwargs)
418
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
419
+ [default1]:[rank1]: return forward_call(*args, **kwargs)
420
+ [default1]:[rank1]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 629, in forward
421
+ [default1]:[rank1]: hidden_states = self.input_layernorm(hidden_states)
422
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
423
+ [default1]:[rank1]: return self._call_impl(*args, **kwargs)
424
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
425
+ [default1]:[rank1]: return forward_call(*args, **kwargs)
426
+ [default1]:[rank1]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/nn/layer_norm.py", line 42, in forward
427
+ [default1]:[rank1]: return layer_norm_fn(
428
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 875, in layer_norm_fn
429
+ [default1]:[rank1]: return LayerNormFn.apply(
430
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/function.py", line 598, in apply
431
+ [default1]:[rank1]: return super().apply(*args, **kwargs) # type: ignore[misc]
432
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 748, in forward
433
+ [default1]:[rank1]: y, y1, mean, rstd, residual_out, seeds, dropout_mask, dropout_mask1 = _layer_norm_fwd(
434
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 335, in _layer_norm_fwd
435
+ [default1]:[rank1]: _layer_norm_fwd_1pass_kernel[(M,)](
436
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/jit.py", line 167, in <lambda>
437
+ [default1]:[rank1]: return lambda *args, **kwargs: self.run(grid=grid, warmup=False, *args, **kwargs)
438
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 143, in run
439
+ [default1]:[rank1]: timings = {config: self._bench(*args, config=config, **kwargs) for config in pruned_configs}
440
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 143, in <dictcomp>
441
+ [default1]:[rank1]: timings = {config: self._bench(*args, config=config, **kwargs) for config in pruned_configs}
442
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 122, in _bench
443
+ [default1]:[rank1]: return do_bench(kernel_call, warmup=self.warmup, rep=self.rep, quantiles=(0.5, 0.2, 0.8))
444
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/testing.py", line 102, in do_bench
445
+ [default1]:[rank1]: fn()
446
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 110, in kernel_call
447
+ [default1]:[rank1]: self.fn.run(
448
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
449
+ [default1]:[rank1]: return self.fn.run(*args, **kwargs)
450
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
451
+ [default1]:[rank1]: return self.fn.run(*args, **kwargs)
452
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
453
+ [default1]:[rank1]: return self.fn.run(*args, **kwargs)
454
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/jit.py", line 416, in run
455
+ [default1]:[rank1]: self.cache[device][key] = compile(
456
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/compiler/compiler.py", line 194, in compile
457
+ [default1]:[rank1]: metadata_group[f"{src.name}.{ext}"] = fn_cache_manager.put(next_module, f"{src.name}.{ext}")
458
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/cache.py", line 123, in put
459
+ [default1]:[rank1]: with open(temp_path, mode) as f:
460
+ [default1]:[rank1]: OSError: [Errno 122] Disk quota exceeded
461
+ [default7]:[rank15]: OSError: [Errno 122] Disk quota exceeded
462
+ [default7]:
463
+ [default7]:[rank15]: During handling of the above exception, another exception occurred:
464
+ [default7]:
465
+ [default7]:[rank15]: Traceback (most recent call last):
466
+ [default7]:[rank15]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py", line 237, in <module>
467
+ [default7]:[rank15]: trainer.train(dataloader)
468
+ [default7]:[rank15]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 429, in train
469
+ [default7]:[rank15]: outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
470
+ [default7]:[rank15]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 462, in training_step
471
+ [default7]:[rank15]: outputs = self.pipeline_engine.train_batch_iter(
472
+ [default7]:[rank15]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
473
+ [default7]:[rank15]: output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
474
+ [default7]:[rank15]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
475
+ [default7]:[rank15]: output = model(**micro_batch)
476
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
477
+ [default7]:[rank15]: return self._call_impl(*args, **kwargs)
478
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
479
+ [default7]:[rank15]: return forward_call(*args, **kwargs)
480
+ [default7]:[rank15]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 891, in forward
481
+ [default7]:[rank15]: sharded_logits = self.model(
482
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
483
+ [default7]:[rank15]: return self._call_impl(*args, **kwargs)
484
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
485
+ [default7]:[rank15]: return forward_call(*args, **kwargs)
486
+ [default7]:[rank15]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 764, in forward
487
+ [default7]:[rank15]: return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
488
+ [default7]:[rank15]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 780, in forward_with_hidden_states
489
+ [default7]:[rank15]: hidden_encoder_states = encoder_block(**hidden_encoder_states)
490
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
491
+ [default7]:[rank15]: return self._call_impl(*args, **kwargs)
492
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
493
+ [default7]:[rank15]: return forward_call(*args, **kwargs)
494
+ [default7]:[rank15]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
495
+ [default7]:[rank15]: output = self.pp_block(**new_kwargs)
496
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
497
+ [default7]:[rank15]: return self._call_impl(*args, **kwargs)
498
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
499
+ [default7]:[rank15]: return forward_call(*args, **kwargs)
500
+ [default7]:[rank15]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 629, in forward
501
+ [default7]:[rank15]: hidden_states = self.input_layernorm(hidden_states)
502
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
503
+ [default7]:[rank15]: return self._call_impl(*args, **kwargs)
504
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
505
+ [default7]:[rank15]: return forward_call(*args, **kwargs)
506
+ [default7]:[rank15]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/nn/layer_norm.py", line 42, in forward
507
+ [default7]:[rank15]: return layer_norm_fn(
508
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 875, in layer_norm_fn
509
+ [default7]:[rank15]: return LayerNormFn.apply(
510
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/function.py", line 598, in apply
511
+ [default7]:[rank15]: return super().apply(*args, **kwargs) # type: ignore[misc]
512
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 748, in forward
513
+ [default7]:[rank15]: y, y1, mean, rstd, residual_out, seeds, dropout_mask, dropout_mask1 = _layer_norm_fwd(
514
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 335, in _layer_norm_fwd
515
+ [default7]:[rank15]: _layer_norm_fwd_1pass_kernel[(M,)](
516
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/jit.py", line 167, in <lambda>
517
+ [default7]:[rank15]: return lambda *args, **kwargs: self.run(grid=grid, warmup=False, *args, **kwargs)
518
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 143, in run
519
+ [default7]:[rank15]: timings = {config: self._bench(*args, config=config, **kwargs) for config in pruned_configs}
520
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 143, in <dictcomp>
521
+ [default7]:[rank15]: timings = {config: self._bench(*args, config=config, **kwargs) for config in pruned_configs}
522
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 122, in _bench
523
+ [default7]:[rank15]: return do_bench(kernel_call, warmup=self.warmup, rep=self.rep, quantiles=(0.5, 0.2, 0.8))
524
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/testing.py", line 102, in do_bench
525
+ [default7]:[rank15]: fn()
526
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 110, in kernel_call
527
+ [default7]:[rank15]: self.fn.run(
528
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
529
+ [default7]:[rank15]: return self.fn.run(*args, **kwargs)
530
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
531
+ [default7]:[rank15]: return self.fn.run(*args, **kwargs)
532
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
533
+ [default7]:[rank15]: return self.fn.run(*args, **kwargs)
534
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/jit.py", line 416, in run
535
+ [default7]:[rank15]: self.cache[device][key] = compile(
536
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/compiler/compiler.py", line 194, in compile
537
+ [default7]:[rank15]: metadata_group[f"{src.name}.{ext}"] = fn_cache_manager.put(next_module, f"{src.name}.{ext}")
538
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/cache.py", line 123, in put
539
+ [default7]:[rank15]: with open(temp_path, mode) as f:
540
+ [default7]:[rank15]: OSError: [Errno 122] Disk quota exceeded
541
+ [default0]:[rank8]: OSError: [Errno 122] Disk quota exceeded
542
+ [default0]:
543
+ [default0]:[rank8]: During handling of the above exception, another exception occurred:
544
+ [default0]:
545
+ [default0]:[rank8]: Traceback (most recent call last):
546
+ [default0]:[rank8]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py", line 237, in <module>
547
+ [default0]:[rank8]: trainer.train(dataloader)
548
+ [default0]:[rank8]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 429, in train
549
+ [default0]:[rank8]: outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
550
+ [default0]:[rank8]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 462, in training_step
551
+ [default0]:[rank8]: outputs = self.pipeline_engine.train_batch_iter(
552
+ [default0]:[rank8]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
553
+ [default0]:[rank8]: output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
554
+ [default0]:[rank8]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
555
+ [default0]:[rank8]: output = model(**micro_batch)
556
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
557
+ [default0]:[rank8]: return self._call_impl(*args, **kwargs)
558
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
559
+ [default0]:[rank8]: return forward_call(*args, **kwargs)
560
+ [default0]:[rank8]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 891, in forward
561
+ [default0]:[rank8]: sharded_logits = self.model(
562
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
563
+ [default0]:[rank8]: return self._call_impl(*args, **kwargs)
564
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
565
+ [default0]:[rank8]: return forward_call(*args, **kwargs)
566
+ [default0]:[rank8]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 764, in forward
567
+ [default0]:[rank8]: return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
568
+ [default0]:[rank8]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 780, in forward_with_hidden_states
569
+ [default0]:[rank8]: hidden_encoder_states = encoder_block(**hidden_encoder_states)
570
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
571
+ [default0]:[rank8]: return self._call_impl(*args, **kwargs)
572
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
573
+ [default0]:[rank8]: return forward_call(*args, **kwargs)
574
+ [default0]:[rank8]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
575
+ [default0]:[rank8]: output = self.pp_block(**new_kwargs)
576
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
577
+ [default0]:[rank8]: return self._call_impl(*args, **kwargs)
578
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
579
+ [default0]:[rank8]: return forward_call(*args, **kwargs)
580
+ [default0]:[rank8]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 629, in forward
581
+ [default0]:[rank8]: hidden_states = self.input_layernorm(hidden_states)
582
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
583
+ [default0]:[rank8]: return self._call_impl(*args, **kwargs)
584
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
585
+ [default0]:[rank8]: return forward_call(*args, **kwargs)
586
+ [default0]:[rank8]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/nn/layer_norm.py", line 42, in forward
587
+ [default0]:[rank8]: return layer_norm_fn(
588
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 875, in layer_norm_fn
589
+ [default0]:[rank8]: return LayerNormFn.apply(
590
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/function.py", line 598, in apply
591
+ [default0]:[rank8]: return super().apply(*args, **kwargs) # type: ignore[misc]
592
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 748, in forward
593
+ [default0]:[rank8]: y, y1, mean, rstd, residual_out, seeds, dropout_mask, dropout_mask1 = _layer_norm_fwd(
594
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 335, in _layer_norm_fwd
595
+ [default0]:[rank8]: _layer_norm_fwd_1pass_kernel[(M,)](
596
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/jit.py", line 167, in <lambda>
597
+ [default0]:[rank8]: return lambda *args, **kwargs: self.run(grid=grid, warmup=False, *args, **kwargs)
598
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 143, in run
599
+ [default0]:[rank8]: timings = {config: self._bench(*args, config=config, **kwargs) for config in pruned_configs}
600
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 143, in <dictcomp>
601
+ [default0]:[rank8]: timings = {config: self._bench(*args, config=config, **kwargs) for config in pruned_configs}
602
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 122, in _bench
603
+ [default0]:[rank8]: return do_bench(kernel_call, warmup=self.warmup, rep=self.rep, quantiles=(0.5, 0.2, 0.8))
604
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/testing.py", line 102, in do_bench
605
+ [default0]:[rank8]: fn()
606
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 110, in kernel_call
607
+ [default0]:[rank8]: self.fn.run(
608
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
609
+ [default0]:[rank8]: return self.fn.run(*args, **kwargs)
610
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
611
+ [default0]:[rank8]: return self.fn.run(*args, **kwargs)
612
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
613
+ [default0]:[rank8]: return self.fn.run(*args, **kwargs)
614
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/jit.py", line 416, in run
615
+ [default0]:[rank8]: self.cache[device][key] = compile(
616
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/compiler/compiler.py", line 194, in compile
617
+ [default0]:[rank8]: metadata_group[f"{src.name}.{ext}"] = fn_cache_manager.put(next_module, f"{src.name}.{ext}")
618
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/cache.py", line 123, in put
619
+ [default0]:[rank8]: with open(temp_path, mode) as f:
620
+ [default0]:[rank8]: OSError: [Errno 122] Disk quota exceeded
621
+ [default5]:[rank5]: OSError: [Errno 122] Disk quota exceeded
622
+ [default5]:
623
+ [default5]:[rank5]: During handling of the above exception, another exception occurred:
624
+ [default5]:
625
+ [default5]:[rank5]: Traceback (most recent call last):
626
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py", line 237, in <module>
627
+ [default5]:[rank5]: trainer.train(dataloader)
628
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 429, in train
629
+ [default5]:[rank5]: outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
630
+ [default6]:[rank6]: OSError: [Errno 122] Disk quota exceeded
631
+ [default6]:
632
+ [default6]:[rank6]: During handling of the above exception, another exception occurred:
633
+ [default6]:
634
+ [default6]:[rank6]: Traceback (most recent call last):
635
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py", line 237, in <module>
636
+ [default6]:[rank6]: trainer.train(dataloader)
637
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 429, in train
638
+ [default6]:[rank6]: outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
639
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 462, in training_step
640
+ [default6]:[rank6]: outputs = self.pipeline_engine.train_batch_iter(
641
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
642
+ [default6]:[rank6]: output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
643
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
644
+ [default6]:[rank6]: output = model(**micro_batch)
645
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
646
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 462, in training_step
647
+ [default5]:[rank5]: outputs = self.pipeline_engine.train_batch_iter(
648
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
649
+ [default6]:[rank6]: return self._call_impl(*args, **kwargs)
650
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
651
+ [default6]:[rank6]: return forward_call(*args, **kwargs)
652
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 891, in forward
653
+ [default6]:[rank6]: sharded_logits = self.model(
654
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
655
+ [default6]:[rank6]: return self._call_impl(*args, **kwargs)
656
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
657
+ [default6]:[rank6]: return forward_call(*args, **kwargs)
658
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 764, in forward
659
+ [default6]:[rank6]: return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
660
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 780, in forward_with_hidden_states
661
+ [default6]:[rank6]: hidden_encoder_states = encoder_block(**hidden_encoder_states)
662
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
663
+ [default6]:[rank6]: return self._call_impl(*args, **kwargs)
664
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
665
+ [default6]:[rank6]: return forward_call(*args, **kwargs)
666
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
667
+ [default6]:[rank6]: output = self.pp_block(**new_kwargs)
668
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
669
+ [default6]:[rank6]: return self._call_impl(*args, **kwargs)
670
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
671
+ [default6]:[rank6]: return forward_call(*args, **kwargs)
672
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 631, in forward
673
+ [default6]:[rank6]: output = self.attn(hidden_states=hidden_states, sequence_mask=sequence_mask)
674
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
675
+ [default6]:[rank6]: return self._call_impl(*args, **kwargs)
676
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
677
+ [default6]:[rank6]: return forward_call(*args, **kwargs)
678
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 566, in forward
679
+ [default6]:[rank6]: query_states, key_value_states = self.flash_rotary_embedding(query_states, kv=key_value_states)
680
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
681
+ [default6]:[rank6]: return self._call_impl(*args, **kwargs)
682
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
683
+ [default6]:[rank6]: return forward_call(*args, **kwargs)
684
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/layers/rotary.py", line 457, in forward
685
+ [default6]:[rank6]: q = apply_rotary_emb_func(
686
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/layers/rotary.py", line 122, in apply_rotary_emb
687
+ [default6]:[rank6]: return ApplyRotaryEmb.apply(
688
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/function.py", line 598, in apply
689
+ [default6]:[rank6]: return super().apply(*args, **kwargs) # type: ignore[misc]
690
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/layers/rotary.py", line 48, in forward
691
+ [default6]:[rank6]: out = apply_rotary(
692
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/rotary.py", line 202, in apply_rotary
693
+ [default6]:[rank6]: rotary_kernel[grid](
694
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/jit.py", line 167, in <lambda>
695
+ [default6]:[rank6]: return lambda *args, **kwargs: self.run(grid=grid, warmup=False, *args, **kwargs)
696
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/jit.py", line 416, in run
697
+ [default6]:[rank6]: self.cache[device][key] = compile(
698
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/compiler/compiler.py", line 194, in compile
699
+ [default6]:[rank6]: metadata_group[f"{src.name}.{ext}"] = fn_cache_manager.put(next_module, f"{src.name}.{ext}")
700
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/cache.py", line 123, in put
701
+ [default6]:[rank6]: with open(temp_path, mode) as f:
702
+ [default6]:[rank6]: OSError: [Errno 122] Disk quota exceeded
703
+ [default5]:[rank5]: output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
704
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
705
+ [default5]:[rank5]: output = model(**micro_batch)
706
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
707
+ [default5]:[rank5]: return self._call_impl(*args, **kwargs)
708
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
709
+ [default5]:[rank5]: return forward_call(*args, **kwargs)
710
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 891, in forward
711
+ [default5]:[rank5]: sharded_logits = self.model(
712
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
713
+ [default5]:[rank5]: return self._call_impl(*args, **kwargs)
714
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
715
+ [default5]:[rank5]: return forward_call(*args, **kwargs)
716
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 764, in forward
717
+ [default5]:[rank5]: return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
718
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 780, in forward_with_hidden_states
719
+ [default5]:[rank5]: hidden_encoder_states = encoder_block(**hidden_encoder_states)
720
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
721
+ [default5]:[rank5]: return self._call_impl(*args, **kwargs)
722
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
723
+ [default5]:[rank5]: return forward_call(*args, **kwargs)
724
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
725
+ [default5]:[rank5]: output = self.pp_block(**new_kwargs)
726
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
727
+ [default5]:[rank5]: return self._call_impl(*args, **kwargs)
728
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
729
+ [default5]:[rank5]: return forward_call(*args, **kwargs)
730
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 631, in forward
731
+ [default5]:[rank5]: output = self.attn(hidden_states=hidden_states, sequence_mask=sequence_mask)
732
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
733
+ [default5]:[rank5]: return self._call_impl(*args, **kwargs)
734
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
735
+ [default5]:[rank5]: return forward_call(*args, **kwargs)
736
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 566, in forward
737
+ [default5]:[rank5]: query_states, key_value_states = self.flash_rotary_embedding(query_states, kv=key_value_states)
738
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
739
+ [default5]:[rank5]: return self._call_impl(*args, **kwargs)
740
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
741
+ [default5]:[rank5]: return forward_call(*args, **kwargs)
742
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/layers/rotary.py", line 457, in forward
743
+ [default5]:[rank5]: q = apply_rotary_emb_func(
744
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/layers/rotary.py", line 122, in apply_rotary_emb
745
+ [default5]:[rank5]: return ApplyRotaryEmb.apply(
746
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/function.py", line 598, in apply
747
+ [default5]:[rank5]: return super().apply(*args, **kwargs) # type: ignore[misc]
748
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/layers/rotary.py", line 48, in forward
749
+ [default5]:[rank5]: out = apply_rotary(
750
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/rotary.py", line 202, in apply_rotary
751
+ [default5]:[rank5]: rotary_kernel[grid](
752
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/jit.py", line 167, in <lambda>
753
+ [default5]:[rank5]: return lambda *args, **kwargs: self.run(grid=grid, warmup=False, *args, **kwargs)
754
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/jit.py", line 416, in run
755
+ [default5]:[rank5]: self.cache[device][key] = compile(
756
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/compiler/compiler.py", line 194, in compile
757
+ [default5]:[rank5]: metadata_group[f"{src.name}.{ext}"] = fn_cache_manager.put(next_module, f"{src.name}.{ext}")
758
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/cache.py", line 123, in put
759
+ [default5]:[rank5]: with open(temp_path, mode) as f:
760
+ [default5]:[rank5]: OSError: [Errno 122] Disk quota exceeded
761
+ W0702 16:33:28.327000 140548878317376 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 2973788 closing signal SIGTERM
762
+ W0702 16:33:28.331000 140146465437504 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 721346 closing signal SIGTERM
763
+ W0702 16:33:28.330000 140548878317376 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 2973789 closing signal SIGTERM
764
+ W0702 16:33:28.334000 140146465437504 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 721349 closing signal SIGTERM
765
+ W0702 16:33:28.334000 140146465437504 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 721350 closing signal SIGTERM
766
+ W0702 16:33:28.331000 140548878317376 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 2973790 closing signal SIGTERM
767
+ W0702 16:33:28.343000 140146465437504 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 721353 closing signal SIGTERM
768
+ W0702 16:33:28.340000 140548878317376 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 2973791 closing signal SIGTERM
769
+ W0702 16:33:28.345000 140548878317376 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 2973792 closing signal SIGTERM
770
+ E0702 16:33:29.863000 140146465437504 torch/distributed/elastic/multiprocessing/api.py:826] failed (exitcode: 1) local_rank: 1 (pid: 721347) of binary: /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10
771
+ Traceback (most recent call last):
772
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/torchrun", line 8, in <module>
773
+ sys.exit(main())
774
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py", line 347, in wrapper
775
+ return f(*args, **kwargs)
776
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/run.py", line 879, in main
777
+ run(args)
778
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/run.py", line 870, in run
779
+ elastic_launch(
780
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 132, in __call__
781
+ return launch_agent(self._config, self._entrypoint, list(args))
782
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 263, in launch_agent
783
+ raise ChildFailedError(
784
+ torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
785
+ ============================================================
786
+ /fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py FAILED
787
+ ------------------------------------------------------------
788
+ Failures:
789
+ [1]:
790
+ time : 2024-07-02_16:33:28
791
+ host : ip-26-0-167-177.ec2.internal
792
+ rank : 2 (local_rank: 2)
793
+ exitcode : 1 (pid: 721348)
794
+ error_file: <N/A>
795
+ traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
796
+ [2]:
797
+ time : 2024-07-02_16:33:28
798
+ host : ip-26-0-167-177.ec2.internal
799
+ rank : 5 (local_rank: 5)
800
+ exitcode : 1 (pid: 721351)
801
+ error_file: <N/A>
802
+ traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
803
+ [3]:
804
+ time : 2024-07-02_16:33:28
805
+ host : ip-26-0-167-177.ec2.internal
806
+ rank : 6 (local_rank: 6)
807
+ exitcode : 1 (pid: 721352)
808
+ error_file: <N/A>
809
+ traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
810
+ ------------------------------------------------------------
811
+ Root Cause (first observed failure):
812
+ [0]:
813
+ time : 2024-07-02_16:33:28
814
+ host : ip-26-0-167-177.ec2.internal
815
+ rank : 1 (local_rank: 1)
816
+ exitcode : 1 (pid: 721347)
817
+ error_file: <N/A>
818
+ traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
819
+ ============================================================
820
+ srun: error: ip-26-0-167-177: task 0: Exited with exit code 1
821
+ E0702 16:33:30.570000 140548878317376 torch/distributed/elastic/multiprocessing/api.py:826] failed (exitcode: 1) local_rank: 0 (pid: 2973787) of binary: /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10
822
+ W0702 16:33:30.577000 140548878317376 torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1203] The node 'ip-26-0-170-31.ec2.internal_2973718_0' has failed to shutdown the rendezvous 'none' due to an error of type RendezvousConnectionError.
823
+ W0702 16:33:30.605000 140548878317376 torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1203] The node 'ip-26-0-170-31.ec2.internal_2973718_0' has failed to shutdown the rendezvous 'none' due to an error of type RendezvousConnectionError.
824
+ W0702 16:33:30.619000 140548878317376 torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1203] The node 'ip-26-0-170-31.ec2.internal_2973718_0' has failed to shutdown the rendezvous 'none' due to an error of type RendezvousConnectionError.
825
+ Traceback (most recent call last):
826
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/torchrun", line 8, in <module>
827
+ sys.exit(main())
828
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py", line 347, in wrapper
829
+ return f(*args, **kwargs)
830
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/run.py", line 879, in main
831
+ run(args)
832
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/run.py", line 870, in run
833
+ elastic_launch(
834
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 132, in __call__
835
+ return launch_agent(self._config, self._entrypoint, list(args))
836
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 263, in launch_agent
837
+ raise ChildFailedError(
838
+ torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
839
+ ============================================================
840
+ /fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py FAILED
841
+ ------------------------------------------------------------
842
+ Failures:
843
+ [1]:
844
+ time : 2024-07-02_16:33:28
845
+ host : ip-26-0-170-31.ec2.internal
846
+ rank : 14 (local_rank: 6)
847
+ exitcode : 1 (pid: 2973793)
848
+ error_file: <N/A>
849
+ traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
850
+ [2]:
851
+ time : 2024-07-02_16:33:28
852
+ host : ip-26-0-170-31.ec2.internal
853
+ rank : 15 (local_rank: 7)
854
+ exitcode : 1 (pid: 2973794)
855
+ error_file: <N/A>
856
+ traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
857
+ ------------------------------------------------------------
858
+ Root Cause (first observed failure):
859
+ [0]:
860
+ time : 2024-07-02_16:33:28
861
+ host : ip-26-0-170-31.ec2.internal
862
+ rank : 8 (local_rank: 0)
863
+ exitcode : 1 (pid: 2973787)
864
+ error_file: <N/A>
865
+ traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
866
+ ============================================================
867
+ srun: error: ip-26-0-170-31: task 1: Exited with exit code 1
868
+ Consider using `hf_transfer` for faster uploads. This solution comes with some limitations. See https://huggingface.co/docs/huggingface_hub/hf_transfer for more details.
llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-2/status.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ fail