Upload llama-1B/8_GPUS/dp-4_tp-1_pp-2_mbz-128
Browse files
llama-1B/8_GPUS/dp-4_tp-1_pp-2_mbz-128/bench.slurm
ADDED
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=bench_cluster
|
4 |
+
#SBATCH --time=02:00:00
|
5 |
+
#SBATCH --partition=hopper-prod
|
6 |
+
#SBATCH --nodes=1
|
7 |
+
#SBATCH --gres=gpu:8
|
8 |
+
#SBATCH --qos=normal
|
9 |
+
#SBATCH --ntasks-per-node=1
|
10 |
+
#SBATCH --cpus-per-task=96
|
11 |
+
#SBATCH --exclusive
|
12 |
+
#SBATCH --output=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-4_tp-1_pp-2_mbz-128/log.out
|
13 |
+
#SBATCH --error=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-4_tp-1_pp-2_mbz-128/log.out
|
14 |
+
|
15 |
+
# Function to update status based on squeue output
|
16 |
+
update_status() {
|
17 |
+
job_id=$1
|
18 |
+
status_file=$2
|
19 |
+
# For unknown reasons, it doenst update status for pending. It only works for running
|
20 |
+
while true; do
|
21 |
+
job_status=$(squeue --job $job_id --noheader --format=%T)
|
22 |
+
echo "Job status: $job_status"
|
23 |
+
if [ -z "$job_status" ]; then
|
24 |
+
# Job has finished or is not found
|
25 |
+
break
|
26 |
+
elif [ "$job_status" = "RUNNING" ]; then
|
27 |
+
printf "running" > $status_file
|
28 |
+
break
|
29 |
+
fi
|
30 |
+
sleep 10
|
31 |
+
done
|
32 |
+
}
|
33 |
+
|
34 |
+
# Misc initializations.
|
35 |
+
echo "========================"
|
36 |
+
echo "START TIME: $(date)"
|
37 |
+
source /fsx/ferdinandmom/miniforge3/etc/profile.d/conda.sh
|
38 |
+
conda activate /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster
|
39 |
+
echo python3 version = $(python3 --version)
|
40 |
+
echo "========================"
|
41 |
+
|
42 |
+
# Slurm stuff
|
43 |
+
export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
|
44 |
+
export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
|
45 |
+
export MASTER_PORT=$((1024 + RANDOM % 64511))
|
46 |
+
|
47 |
+
export TMPDIR=/scratch
|
48 |
+
export HF_DATASETS_CACHE="/admin/home/ferdinand_mom/.cache"
|
49 |
+
export CUBLAS_WORKSPACE_CONFIG=":4096:8"
|
50 |
+
export CUDA_DEVICE_MAX_CONNECTIONS="1"
|
51 |
+
|
52 |
+
huggingface-cli login --token $HUGGINGFACE_TOKEN
|
53 |
+
|
54 |
+
|
55 |
+
NANOTRON_REPO="/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron"
|
56 |
+
CMD="$NANOTRON_REPO/run_train.py --config-file /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-4_tp-1_pp-2_mbz-128/config.yaml"
|
57 |
+
|
58 |
+
LAUNCHER="torchrun \
|
59 |
+
--nproc_per_node 8 \
|
60 |
+
--nnodes 1 \
|
61 |
+
--rdzv_endpoint ${MASTER_ADDR}:${MASTER_PORT} \
|
62 |
+
--rdzv_backend c10d \
|
63 |
+
--max_restarts 0 \
|
64 |
+
--tee 3 \
|
65 |
+
--node_rank ${SLURM_PROCID}"
|
66 |
+
|
67 |
+
# Checkout the bench_cluster branch
|
68 |
+
cd $NANOTRON_REPO
|
69 |
+
git checkout bench_cluster
|
70 |
+
cd ..
|
71 |
+
# Get the current job ID
|
72 |
+
job_id=${SLURM_JOB_ID}
|
73 |
+
|
74 |
+
# Update status to "pending" or "running" in the background
|
75 |
+
update_status $job_id /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-4_tp-1_pp-2_mbz-128/status.txt &
|
76 |
+
|
77 |
+
# Run the main command
|
78 |
+
srun -u $LAUNCHER $CMD
|
79 |
+
exit_status=$?
|
80 |
+
|
81 |
+
# Update status based on the exit status of `srun`
|
82 |
+
if [ $exit_status -eq 0 ]; then
|
83 |
+
printf "completed" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-4_tp-1_pp-2_mbz-128/status.txt
|
84 |
+
else
|
85 |
+
if grep -q "OutOfMemoryError" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-4_tp-1_pp-2_mbz-128/log.out; then
|
86 |
+
printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-4_tp-1_pp-2_mbz-128/status.txt
|
87 |
+
elif grep -q " CUDA error: an illegal memory access" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-4_tp-1_pp-2_mbz-128/log.out; then
|
88 |
+
printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-4_tp-1_pp-2_mbz-128/status.txt
|
89 |
+
elif grep -q "Timeout at NCCL" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-4_tp-1_pp-2_mbz-128/log.out; then
|
90 |
+
printf "timeout" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-4_tp-1_pp-2_mbz-128/status.txt
|
91 |
+
else
|
92 |
+
printf "fail" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-4_tp-1_pp-2_mbz-128/status.txt
|
93 |
+
fi
|
94 |
+
fi
|
95 |
+
|
96 |
+
# Run the report script if the job completed successfully
|
97 |
+
if [ $exit_status -eq 0 ]; then
|
98 |
+
python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-4_tp-1_pp-2_mbz-128 --is_logs
|
99 |
+
python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-4_tp-1_pp-2_mbz-128 --is_profiler
|
100 |
+
fi
|
101 |
+
|
102 |
+
|
103 |
+
# Push to hub the folder using huggingface_cli
|
104 |
+
huggingface-cli upload nanotron/bench_cluster /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-4_tp-1_pp-2_mbz-128 llama-1B/8_GPUS/dp-4_tp-1_pp-2_mbz-128 --commit-message "Upload llama-1B/8_GPUS/dp-4_tp-1_pp-2_mbz-128"
|
105 |
+
|
106 |
+
# Verify the upload
|
107 |
+
if [ $? -eq 0 ]; then
|
108 |
+
echo "Uploading to Huggingface Hub successful"
|
109 |
+
else
|
110 |
+
echo "Failed to upload to Huggingface Hub"
|
111 |
+
fi
|
llama-1B/8_GPUS/dp-4_tp-1_pp-2_mbz-128/config.yaml
ADDED
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
general:
|
2 |
+
project: bench_cluster
|
3 |
+
seed: 42
|
4 |
+
model:
|
5 |
+
ddp_bucket_cap_mb: 25
|
6 |
+
dtype: bfloat16
|
7 |
+
init_method:
|
8 |
+
std: 0.025
|
9 |
+
make_vocab_size_divisible_by: 1
|
10 |
+
model_config:
|
11 |
+
bos_token_id: 1
|
12 |
+
eos_token_id: 2
|
13 |
+
hidden_act: silu
|
14 |
+
hidden_size: 2048
|
15 |
+
initializer_range: 0.02
|
16 |
+
intermediate_size: 4096
|
17 |
+
is_llama_config: true
|
18 |
+
max_position_embeddings: 4096
|
19 |
+
num_attention_heads: 32
|
20 |
+
num_hidden_layers: 24
|
21 |
+
num_key_value_heads: 32
|
22 |
+
pad_token_id: null
|
23 |
+
pretraining_tp: 1
|
24 |
+
rms_norm_eps: 1.0e-05
|
25 |
+
rope_scaling: null
|
26 |
+
rope_theta: 10000.0
|
27 |
+
tie_word_embeddings: true
|
28 |
+
use_cache: true
|
29 |
+
vocab_size: 50257
|
30 |
+
optimizer:
|
31 |
+
accumulate_grad_in_fp32: true
|
32 |
+
clip_grad: 1.0
|
33 |
+
learning_rate_scheduler:
|
34 |
+
learning_rate: 0.0001
|
35 |
+
lr_decay_style: linear
|
36 |
+
lr_warmup_style: linear
|
37 |
+
lr_warmup_steps: 1
|
38 |
+
min_decay_lr: 1.0e-05
|
39 |
+
optimizer_factory:
|
40 |
+
adam_beta1: 0.9
|
41 |
+
adam_beta2: 0.95
|
42 |
+
adam_eps: 1.0e-08
|
43 |
+
name: adamW
|
44 |
+
torch_adam_is_fused: true
|
45 |
+
weight_decay: 0.01
|
46 |
+
zero_stage: 1
|
47 |
+
parallelism:
|
48 |
+
dp: 4
|
49 |
+
expert_parallel_size: 1
|
50 |
+
pp: 2
|
51 |
+
pp_engine: 1f1b
|
52 |
+
tp: 1
|
53 |
+
tp_linear_async_communication: false
|
54 |
+
tp_mode: REDUCE_SCATTER
|
55 |
+
profiler:
|
56 |
+
profiler_export_path: /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-4_tp-1_pp-2_mbz-128
|
57 |
+
tokenizer:
|
58 |
+
tokenizer_max_length: null
|
59 |
+
tokenizer_name_or_path: openai-community/gpt2
|
60 |
+
tokenizer_revision: null
|
61 |
+
data_stages:
|
62 |
+
- name: Training Stage
|
63 |
+
start_training_step: 1
|
64 |
+
data:
|
65 |
+
dataset:
|
66 |
+
dataset_overwrite_cache: false
|
67 |
+
dataset_processing_num_proc_per_process: 64
|
68 |
+
hf_dataset_config_name: null
|
69 |
+
hf_dataset_or_datasets: roneneldan/TinyStories
|
70 |
+
hf_dataset_splits: train
|
71 |
+
text_column_name: text
|
72 |
+
num_loading_workers: 0
|
73 |
+
seed: 42
|
74 |
+
lighteval: null
|
75 |
+
tokens:
|
76 |
+
train_steps: 20
|
77 |
+
val_check_interval: -1
|
78 |
+
batch_accumulation_per_replica: 2
|
79 |
+
limit_test_batches: 0
|
80 |
+
limit_val_batches: 0
|
81 |
+
micro_batch_size: 128
|
82 |
+
sequence_length: 4096
|
83 |
+
logging:
|
84 |
+
iteration_step_info_interval: 1
|
85 |
+
log_level: info
|
86 |
+
log_level_replica: info
|
87 |
+
checkpoints:
|
88 |
+
checkpoint_interval: 100000
|
89 |
+
checkpoints_path: /dev/null
|
90 |
+
resume_checkpoint_path: null
|
llama-1B/8_GPUS/dp-4_tp-1_pp-2_mbz-128/log.out
ADDED
@@ -0,0 +1,368 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
========================
|
2 |
+
START TIME: Wed Jul 3 23:48:12 UTC 2024
|
3 |
+
python3 version = Python 3.10.14
|
4 |
+
========================
|
5 |
+
The token has not been saved to the git credentials helper. Pass `add_to_git_credential=True` in this function directly or `--add-to-git-credential` if using via `huggingface-cli` if you want to set the git credential as well.
|
6 |
+
Token is valid (permission: write).
|
7 |
+
Your token has been saved to /admin/home/ferdinand_mom/.cache/huggingface/token
|
8 |
+
Login successful
|
9 |
+
Already on 'bench_cluster'
|
10 |
+
M examples/config_tiny_llama.py
|
11 |
+
M examples/config_tiny_llama.yaml
|
12 |
+
M examples/train_tiny_llama.sh
|
13 |
+
M src/nanotron/models/llama.py
|
14 |
+
M src/nanotron/trainer.py
|
15 |
+
Your branch is up to date with 'origin/bench_cluster'.
|
16 |
+
Job status: RUNNING
|
17 |
+
W0703 23:48:16.055000 139776274089792 torch/distributed/run.py:757]
|
18 |
+
W0703 23:48:16.055000 139776274089792 torch/distributed/run.py:757] *****************************************
|
19 |
+
W0703 23:48:16.055000 139776274089792 torch/distributed/run.py:757] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
20 |
+
W0703 23:48:16.055000 139776274089792 torch/distributed/run.py:757] *****************************************
|
21 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: Config:
|
22 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: Config(general=GeneralArgs(project='bench_cluster',
|
23 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: run='%date_%jobid',
|
24 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: seed=42,
|
25 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: step=None,
|
26 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: consumed_train_samples=None,
|
27 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: benchmark_csv_path=None,
|
28 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: ignore_sanity_checks=True),
|
29 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: parallelism=ParallelismArgs(dp=4,
|
30 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: pp=2,
|
31 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: tp=1,
|
32 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: pp_engine=<nanotron.parallel.pipeline_parallel.engine.OneForwardOneBackwardPipelineEngine object at 0x7f33ccad4730>,
|
33 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: tp_mode=<TensorParallelLinearMode.REDUCE_SCATTER: 2>,
|
34 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: tp_linear_async_communication=False,
|
35 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: expert_parallel_size=1),
|
36 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: model=ModelArgs(model_config=LlamaConfig(bos_token_id=1,
|
37 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: eos_token_id=2,
|
38 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: hidden_act='silu',
|
39 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: hidden_size=2048,
|
40 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: initializer_range=0.02,
|
41 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: intermediate_size=4096,
|
42 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: is_llama_config=True,
|
43 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: max_position_embeddings=4096,
|
44 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: num_attention_heads=32,
|
45 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: num_hidden_layers=24,
|
46 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: num_key_value_heads=32,
|
47 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: pad_token_id=None,
|
48 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: pretraining_tp=1,
|
49 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: rms_norm_eps=1e-05,
|
50 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: rope_scaling=None,
|
51 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: rope_theta=10000.0,
|
52 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: tie_word_embeddings=True,
|
53 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: use_cache=True,
|
54 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: vocab_size=50257),
|
55 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: init_method=RandomInit(std=0.025),
|
56 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: dtype=torch.bfloat16,
|
57 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: make_vocab_size_divisible_by=1,
|
58 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: ddp_bucket_cap_mb=25),
|
59 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: tokenizer=TokenizerArgs(tokenizer_name_or_path='openai-community/gpt2',
|
60 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: tokenizer_revision=None,
|
61 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: tokenizer_max_length=None),
|
62 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: checkpoints=CheckpointsArgs(checkpoints_path=Path('/dev/null'),
|
63 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: checkpoint_interval=100000,
|
64 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: save_initial_state=False,
|
65 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: resume_checkpoint_path=None,
|
66 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: checkpoints_path_is_shared_file_system=False),
|
67 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: logging=LoggingArgs(log_level='info',
|
68 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: log_level_replica='info',
|
69 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: iteration_step_info_interval=1),
|
70 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: tokens=TokensArgs(sequence_length=4096,
|
71 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: train_steps=20,
|
72 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: micro_batch_size=128,
|
73 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: batch_accumulation_per_replica=2,
|
74 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: val_check_interval=-1,
|
75 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: limit_val_batches=0,
|
76 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: limit_test_batches=0),
|
77 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: optimizer=OptimizerArgs(optimizer_factory=AdamWOptimizerArgs(adam_eps=1e-08,
|
78 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: adam_beta1=0.9,
|
79 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: adam_beta2=0.95,
|
80 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: torch_adam_is_fused=True,
|
81 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: name='adamW'),
|
82 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: zero_stage=1,
|
83 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: weight_decay=0.01,
|
84 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: clip_grad=1.0,
|
85 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: accumulate_grad_in_fp32=True,
|
86 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: learning_rate_scheduler=LRSchedulerArgs(learning_rate=0.0001,
|
87 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: lr_warmup_steps=1,
|
88 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: lr_warmup_style='linear',
|
89 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: lr_decay_style='linear',
|
90 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: lr_decay_steps=19,
|
91 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: lr_decay_starting_step=None,
|
92 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: min_decay_lr=1e-05)),
|
93 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: data_stages=[DatasetStageArgs(name='Training Stage',
|
94 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: start_training_step=1,
|
95 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: data=DataArgs(dataset=PretrainDatasetsArgs(hf_dataset_or_datasets='roneneldan/TinyStories',
|
96 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: hf_dataset_splits='train',
|
97 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: hf_dataset_config_name=None,
|
98 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: dataset_processing_num_proc_per_process=64,
|
99 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: dataset_overwrite_cache=False,
|
100 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: text_column_name='text'),
|
101 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: seed=42,
|
102 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: num_loading_workers=0))],
|
103 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: profiler=ProfilerArgs(profiler_export_path=Path('/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-4_tp-1_pp-2_mbz-128')),
|
104 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: lighteval=None)
|
105 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: Model Config:
|
106 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: LlamaConfig(bos_token_id=1,
|
107 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: eos_token_id=2,
|
108 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: hidden_act='silu',
|
109 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: hidden_size=2048,
|
110 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: initializer_range=0.02,
|
111 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: intermediate_size=4096,
|
112 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: is_llama_config=True,
|
113 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: max_position_embeddings=4096,
|
114 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: num_attention_heads=32,
|
115 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: num_hidden_layers=24,
|
116 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: num_key_value_heads=32,
|
117 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: pad_token_id=None,
|
118 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: pretraining_tp=1,
|
119 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: rms_norm_eps=1e-05,
|
120 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: rope_scaling=None,
|
121 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: rope_theta=10000.0,
|
122 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: tie_word_embeddings=True,
|
123 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: use_cache=True,
|
124 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: vocab_size=50257)
|
125 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: Building model..
|
126 |
+
[default0]:07/03/2024 23:48:35 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: Setting PP block ranks...
|
127 |
+
[default4]:07/03/2024 23:48:45 [INFO|DP=0|PP=1|TP=0|ip-26-0-174-36]: Local number of parameters: 522M (996.40MiB)
|
128 |
+
[default4]:07/03/2024 23:48:45 [INFO|DP=0|PP=1|TP=0|ip-26-0-174-36]: [After model building] Memory usage: 1006.41MiB. Peak allocated: 1008.44MiB Peak reserved: 1032.00MiB
|
129 |
+
[default4]:07/03/2024 23:48:45 [INFO|DP=0|PP=1|TP=0|ip-26-0-174-36]: No checkpoint path provided.
|
130 |
+
[default0]:07/03/2024 23:48:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: Total number of parameters: 1.21G (2312.82MiB)
|
131 |
+
[default0]:07/03/2024 23:48:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: Local number of parameters: 690M (1316.43MiB)
|
132 |
+
[default0]:07/03/2024 23:48:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: [After model building] Memory usage: 1330.44MiB. Peak allocated: 1332.47MiB Peak reserved: 1364.00MiB
|
133 |
+
[default0]:07/03/2024 23:48:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: No checkpoint path provided.
|
134 |
+
[default0]:07/03/2024 23:48:45 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: Parametrizing model parameters using StandardParametrizator
|
135 |
+
[default2]:07/03/2024 23:48:45 [INFO|DP=2|PP=0|TP=0|ip-26-0-174-36]: No checkpoint path provided.
|
136 |
+
[default1]:07/03/2024 23:48:45 [INFO|DP=1|PP=0|TP=0|ip-26-0-174-36]: No checkpoint path provided.
|
137 |
+
[default3]:07/03/2024 23:48:45 [INFO|DP=3|PP=0|TP=0|ip-26-0-174-36]: No checkpoint path provided.
|
138 |
+
[default6]:07/03/2024 23:48:45 [INFO|DP=2|PP=1|TP=0|ip-26-0-174-36]: No checkpoint path provided.
|
139 |
+
[default5]:07/03/2024 23:48:45 [INFO|DP=1|PP=1|TP=0|ip-26-0-174-36]: No checkpoint path provided.
|
140 |
+
[default7]:07/03/2024 23:48:45 [INFO|DP=3|PP=1|TP=0|ip-26-0-174-36]: No checkpoint path provided.
|
141 |
+
[default0]:07/03/2024 23:48:49 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: [Optimizer Building] Using LearningRateForSP as learning rate
|
142 |
+
[default0]:07/03/2024 23:48:49 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: [ZeRO sharding] Size of optimizer params per rank:
|
143 |
+
[default0]:07/03/2024 23:48:49 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: [ZeRO sharding] DP Rank 0 has 173M out of 690M (25.00%) params' optimizer states
|
144 |
+
[default0]:07/03/2024 23:48:49 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: [ZeRO sharding] DP Rank 1 has 173M out of 690M (25.00%) params' optimizer states
|
145 |
+
[default0]:07/03/2024 23:48:49 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: [ZeRO sharding] DP Rank 2 has 173M out of 690M (25.00%) params' optimizer states
|
146 |
+
[default0]:07/03/2024 23:48:49 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: [ZeRO sharding] DP Rank 3 has 173M out of 690M (25.00%) params' optimizer states
|
147 |
+
[default0]:07/03/2024 23:48:51 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: [Training Plan] Stage Training Stage has 19 remaining training steps and has consumed 0 samples
|
148 |
+
[default0]:07/03/2024 23:48:51 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: Using `datasets` library
|
149 |
+
[default0]:07/03/2024 23:48:51 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: Loading tokenizer from openai-community/gpt2 and transformers/hf_hub versions ('4.41.2', '0.23.4')
|
150 |
+
[default0]:07/03/2024 23:48:51 [WARNING|DP=0|PP=0|TP=0|ip-26-0-174-36]: Repo card metadata block was not found. Setting CardData to empty.
|
151 |
+
[default0]:Repo card metadata block was not found. Setting CardData to empty.
|
152 |
+
[default0]:07/03/2024 23:48:52 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: [Training Plan] There are 1 training stages
|
153 |
+
[default0]:07/03/2024 23:48:52 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: [Stage Training Stage] start from step 1
|
154 |
+
[default0]:07/03/2024 23:48:52 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]:
|
155 |
+
[default0]:07/03/2024 23:48:52 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: [Start training] datetime: 2024-07-03 23:48:52.841307 | mbs: 128 | grad_accum: 2 | global_batch_size: 1024 | sequence_length: 4096 | train_steps: 20 | start_iteration_step: 0 | consumed_train_samples: 0
|
156 |
+
[default0]:07/03/2024 23:48:52 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: Resuming training from stage Training Stage, it has trained for 0 samples and has 19 remaining train steps
|
157 |
+
[default0]:07/03/2024 23:48:52 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: Memory usage: 4621.51MiB. Peak allocated 4621.51MiB. Peak reserved: 4658.00MiB
|
158 |
+
[default3]:07/03/2024 23:48:53 [WARNING|DP=3|PP=0|TP=0|ip-26-0-174-36]: Repo card metadata block was not found. Setting CardData to empty.
|
159 |
+
[default5]:07/03/2024 23:48:53 [WARNING|DP=1|PP=1|TP=0|ip-26-0-174-36]: Repo card metadata block was not found. Setting CardData to empty.
|
160 |
+
[default5]:Repo card metadata block was not found. Setting CardData to empty.
|
161 |
+
[default3]:Repo card metadata block was not found. Setting CardData to empty.
|
162 |
+
[default1]:07/03/2024 23:48:53 [WARNING|DP=1|PP=0|TP=0|ip-26-0-174-36]: Repo card metadata block was not found. Setting CardData to empty.
|
163 |
+
[default6]:07/03/2024 23:48:53 [WARNING|DP=2|PP=1|TP=0|ip-26-0-174-36]: Repo card metadata block was not found. Setting CardData to empty.
|
164 |
+
[default7]:07/03/2024 23:48:53 [WARNING|DP=3|PP=1|TP=0|ip-26-0-174-36]: Repo card metadata block was not found. Setting CardData to empty.
|
165 |
+
[default6]:Repo card metadata block was not found. Setting CardData to empty.
|
166 |
+
[default1]:Repo card metadata block was not found. Setting CardData to empty.
|
167 |
+
[default7]:Repo card metadata block was not found. Setting CardData to empty.
|
168 |
+
[default2]:07/03/2024 23:48:53 [WARNING|DP=2|PP=0|TP=0|ip-26-0-174-36]: Repo card metadata block was not found. Setting CardData to empty.
|
169 |
+
[default2]:Repo card metadata block was not found. Setting CardData to empty.
|
170 |
+
[default4]:07/03/2024 23:48:53 [WARNING|DP=0|PP=1|TP=0|ip-26-0-174-36]: Repo card metadata block was not found. Setting CardData to empty.
|
171 |
+
[default4]:Repo card metadata block was not found. Setting CardData to empty.
|
172 |
+
[default0]:[rank0]: Traceback (most recent call last):
|
173 |
+
[default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py", line 237, in <module>
|
174 |
+
[default0]:[rank0]: trainer.train(dataloader)
|
175 |
+
[default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 429, in train
|
176 |
+
[default0]:[rank0]: outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
|
177 |
+
[default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 462, in training_step
|
178 |
+
[default0]:[rank0]: outputs = self.pipeline_engine.train_batch_iter(
|
179 |
+
[default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 252, in train_batch_iter
|
180 |
+
[default0]:[rank0]: output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
|
181 |
+
[default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
|
182 |
+
[default0]:[rank0]: output = model(**micro_batch)
|
183 |
+
[default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
|
184 |
+
[default0]:[rank0]: return self._call_impl(*args, **kwargs)
|
185 |
+
[default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
|
186 |
+
[default0]:[rank0]: return forward_call(*args, **kwargs)
|
187 |
+
[default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 891, in forward
|
188 |
+
[default0]:[rank0]: sharded_logits = self.model(
|
189 |
+
[default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
|
190 |
+
[default0]:[rank0]: return self._call_impl(*args, **kwargs)
|
191 |
+
[default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
|
192 |
+
[default0]:[rank0]: return forward_call(*args, **kwargs)
|
193 |
+
[default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 764, in forward
|
194 |
+
[default0]:[rank0]: return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
|
195 |
+
[default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 780, in forward_with_hidden_states
|
196 |
+
[default0]:[rank0]: hidden_encoder_states = encoder_block(**hidden_encoder_states)
|
197 |
+
[default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
|
198 |
+
[default0]:[rank0]: return self._call_impl(*args, **kwargs)
|
199 |
+
[default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
|
200 |
+
[default0]:[rank0]: return forward_call(*args, **kwargs)
|
201 |
+
[default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
|
202 |
+
[default0]:[rank0]: output = self.pp_block(**new_kwargs)
|
203 |
+
[default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
|
204 |
+
[default0]:[rank0]: return self._call_impl(*args, **kwargs)
|
205 |
+
[default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
|
206 |
+
[default0]:[rank0]: return forward_call(*args, **kwargs)
|
207 |
+
[default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 631, in forward
|
208 |
+
[default0]:[rank0]: output = self.attn(hidden_states=hidden_states, sequence_mask=sequence_mask)
|
209 |
+
[default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
|
210 |
+
[default0]:[rank0]: return self._call_impl(*args, **kwargs)
|
211 |
+
[default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
|
212 |
+
[default0]:[rank0]: return forward_call(*args, **kwargs)
|
213 |
+
[default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 565, in forward
|
214 |
+
[default0]:[rank0]: key_value_states = key_value_states.permute(1, 2, 0, 3, 4).contiguous()
|
215 |
+
[default0]:[rank0]: torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 4.00 GiB. GPU
|
216 |
+
[default5]:[rank5]: Traceback (most recent call last):
|
217 |
+
[default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py", line 237, in <module>
|
218 |
+
[default5]:[rank5]: trainer.train(dataloader)
|
219 |
+
[default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 429, in train
|
220 |
+
[default5]:[rank5]: outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
|
221 |
+
[default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 462, in training_step
|
222 |
+
[default5]:[rank5]: outputs = self.pipeline_engine.train_batch_iter(
|
223 |
+
[default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
|
224 |
+
[default5]:[rank5]: output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
|
225 |
+
[default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
|
226 |
+
[default5]:[rank5]: output = model(**micro_batch)
|
227 |
+
[default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
|
228 |
+
[default5]:[rank5]: return self._call_impl(*args, **kwargs)
|
229 |
+
[default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
|
230 |
+
[default5]:[rank5]: return forward_call(*args, **kwargs)
|
231 |
+
[default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 891, in forward
|
232 |
+
[default5]:[rank5]: sharded_logits = self.model(
|
233 |
+
[default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
|
234 |
+
[default5]:[rank5]: return self._call_impl(*args, **kwargs)
|
235 |
+
[default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
|
236 |
+
[default5]:[rank5]: return forward_call(*args, **kwargs)
|
237 |
+
[default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 764, in forward
|
238 |
+
[default5]:[rank5]: return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
|
239 |
+
[default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 780, in forward_with_hidden_states
|
240 |
+
[default5]:[rank5]: hidden_encoder_states = encoder_block(**hidden_encoder_states)
|
241 |
+
[default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
|
242 |
+
[default5]:[rank5]: return self._call_impl(*args, **kwargs)
|
243 |
+
[default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
|
244 |
+
[default5]:[rank5]: return forward_call(*args, **kwargs)
|
245 |
+
[default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 126, in forward
|
246 |
+
[default5]:[rank5]: new_kwargs[name] = recv_from_pipeline_state_buffer(
|
247 |
+
[default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/functional.py", line 117, in recv_from_pipeline_state_buffer
|
248 |
+
[default5]:[rank5]: pipeline_state.run_communication()
|
249 |
+
[default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/state.py", line 150, in run_communication
|
250 |
+
[default5]:[rank5]: recv_activation_tensor = recv_activation()
|
251 |
+
[default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/state.py", line 31, in __call__
|
252 |
+
[default5]:[rank5]: return self.p2p.recv_tensors(num_tensors=1, from_rank=self.from_rank)[0]
|
253 |
+
[default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/p2p.py", line 353, in recv_tensors
|
254 |
+
[default5]:[rank5]: buffers, futures = self.irecv_tensors(num_tensors=num_tensors, from_rank=from_rank, tag=tag)
|
255 |
+
[default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/p2p.py", line 326, in irecv_tensors
|
256 |
+
[default5]:[rank5]: meta = self._recv_meta(from_rank=from_rank, tag=tag)
|
257 |
+
[default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/p2p.py", line 246, in _recv_meta
|
258 |
+
[default5]:[rank5]: dist.recv(
|
259 |
+
[default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/c10d_logger.py", line 75, in wrapper
|
260 |
+
[default5]:[rank5]: return func(*args, **kwargs)
|
261 |
+
[default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/distributed_c10d.py", line 1932, in recv
|
262 |
+
[default5]:[rank5]: pg.recv([tensor], group_src_rank, tag).wait()
|
263 |
+
[default5]:[rank5]: torch.distributed.DistBackendError: [1] is setting up NCCL communicator and retrieving ncclUniqueId from [0] via c10d key-value store by key '0:1', but store->get('0:1') got error: Connection reset by peer
|
264 |
+
[default5]:[rank5]: Exception raised from recvBytes at ../torch/csrc/distributed/c10d/Utils.hpp:672 (most recent call first):
|
265 |
+
[default5]:[rank5]: frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x57 (0x7fad6f05f897 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libc10.so)
|
266 |
+
[default5]:[rank5]: frame #1: <unknown function> + 0x5b3a23e (0x7fada8b7c23e in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
|
267 |
+
[default5]:[rank5]: frame #2: c10d::TCPStore::doWait(c10::ArrayRef<std::string>, std::chrono::duration<long, std::ratio<1l, 1000l> >) + 0x2c7 (0x7fada8b76c87 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
|
268 |
+
[default5]:[rank5]: frame #3: c10d::TCPStore::doGet(std::string const&) + 0x32 (0x7fada8b76f82 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
|
269 |
+
[default5]:[rank5]: frame #4: c10d::TCPStore::get(std::string const&) + 0xa1 (0x7fada8b77fd1 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
|
270 |
+
[default5]:[rank5]: frame #5: c10d::PrefixStore::get(std::string const&) + 0x31 (0x7fada8b2c371 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
|
271 |
+
[default5]:[rank5]: frame #6: c10d::PrefixStore::get(std::string const&) + 0x31 (0x7fada8b2c371 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
|
272 |
+
[default5]:[rank5]: frame #7: c10d::PrefixStore::get(std::string const&) + 0x31 (0x7fada8b2c371 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
|
273 |
+
[default5]:[rank5]: frame #8: c10d::PrefixStore::get(std::string const&) + 0x31 (0x7fada8b2c371 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
|
274 |
+
[default5]:[rank5]: frame #9: c10d::ProcessGroupNCCL::broadcastUniqueNCCLID(ncclUniqueId*, bool, std::string const&, int) + 0xa9 (0x7fad70339189 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
275 |
+
[default5]:[rank5]: frame #10: c10d::ProcessGroupNCCL::getNCCLComm(std::string const&, c10::Device&, c10d::OpType, int, bool) + 0xc50 (0x7fad70340610 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
276 |
+
[default5]:[rank5]: frame #11: c10d::ProcessGroupNCCL::recv(std::vector<at::Tensor, std::allocator<at::Tensor> >&, int, int) + 0x5f8 (0x7fad7035f978 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
277 |
+
[default5]:[rank5]: frame #12: <unknown function> + 0x5adc309 (0x7fada8b1e309 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
|
278 |
+
[default5]:[rank5]: frame #13: <unknown function> + 0x5ae6f10 (0x7fada8b28f10 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
|
279 |
+
[default5]:[rank5]: frame #14: <unknown function> + 0x5ae6fa5 (0x7fada8b28fa5 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
|
280 |
+
[default5]:[rank5]: frame #15: <unknown function> + 0x5124446 (0x7fada8166446 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
|
281 |
+
[default5]:[rank5]: frame #16: <unknown function> + 0x1acf4b8 (0x7fada4b114b8 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
|
282 |
+
[default5]:[rank5]: frame #17: <unknown function> + 0x5aee004 (0x7fada8b30004 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
|
283 |
+
[default5]:[rank5]: frame #18: <unknown function> + 0x5af36b5 (0x7fada8b356b5 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
|
284 |
+
[default5]:[rank5]: frame #19: <unknown function> + 0xd2631e (0x7fadbb71f31e in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_python.so)
|
285 |
+
[default5]:[rank5]: frame #20: <unknown function> + 0x47def4 (0x7fadbae76ef4 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_python.so)
|
286 |
+
[default5]:[rank5]: frame #21: <unknown function> + 0x1445a6 (0x55ef001ef5a6 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
|
287 |
+
[default5]:[rank5]: frame #22: _PyObject_MakeTpCall + 0x26b (0x55ef001e8a6b in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
|
288 |
+
[default5]:[rank5]: frame #23: <unknown function> + 0x150866 (0x55ef001fb866 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
|
289 |
+
[default5]:[rank5]: frame #24: _PyEval_EvalFrameDefault + 0x4c12 (0x55ef001e4142 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
|
290 |
+
[default5]:[rank5]: frame #25: _PyFunction_Vectorcall + 0x6c (0x55ef001efa2c in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
|
291 |
+
[default5]:[rank5]: frame #26: PyObject_Call + 0xbc (0x55ef001fbf1c in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
|
292 |
+
[default5]:[rank5]: frame #27: _PyEval_EvalFrameDefault + 0x2d83 (0x55ef001e22b3 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
|
293 |
+
[default5]:[rank5]: frame #28: _PyFunction_Vectorcall + 0x6c (0x55ef001efa2c in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
|
294 |
+
[default5]:[rank5]: frame #29: _PyEval_EvalFrameDefault + 0x13ca (0x55ef001e08fa in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
|
295 |
+
[default5]:[rank5]: frame #30: <unknown function> + 0x150582 (0x55ef001fb582 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
|
296 |
+
[default5]:[rank5]: frame #31: _PyEval_EvalFrameDefault + 0x13ca (0x55ef001e08fa in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
|
297 |
+
[default5]:[rank5]: frame #32: <unknown function> + 0x150582 (0x55ef001fb582 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
|
298 |
+
[default5]:[rank5]: frame #33: _PyEval_EvalFrameDefault + 0x13ca (0x55ef001e08fa in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
|
299 |
+
[default5]:[rank5]: frame #34: <unknown function> + 0x150582 (0x55ef001fb582 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
|
300 |
+
[default5]:[rank5]: frame #35: _PyEval_EvalFrameDefault + 0x13ca (0x55ef001e08fa in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
|
301 |
+
[default5]:[rank5]: frame #36: _PyObject_FastCallDictTstate + 0xd0 (0x55ef001e7f50 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
|
302 |
+
[default5]:[rank5]: frame #37: _PyObject_Call_Prepend + 0x69 (0x55ef001f9c39 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
|
303 |
+
[default5]:[rank5]: frame #38: <unknown function> + 0x211239 (0x55ef002bc239 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
|
304 |
+
[default5]:[rank5]: frame #39: _PyObject_MakeTpCall + 0x26b (0x55ef001e8a6b in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
|
305 |
+
[default5]:[rank5]: frame #40: _PyEval_EvalFrameDefault + 0x4eb6 (0x55ef001e43e6 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
|
306 |
+
[default5]:[rank5]: frame #41: _PyFunction_Vectorcall + 0x6c (0x55ef001efa2c in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
|
307 |
+
[default5]:[rank5]: frame #42: _PyEval_EvalFrameDefault + 0x72c (0x55ef001dfc5c in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
|
308 |
+
[default5]:[rank5]: frame #43: _PyFunction_Vectorcall + 0x6c (0x55ef001efa2c in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
|
309 |
+
[default5]:[rank5]: frame #44: _PyEval_EvalFrameDefault + 0x13ca (0x55ef001e08fa in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
|
310 |
+
[default5]:[rank5]: frame #45: <unknown function> + 0x150582 (0x55ef001fb582 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
|
311 |
+
[default5]:[rank5]: frame #46: PyObject_Call + 0xbc (0x55ef001fbf1c in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
|
312 |
+
[default5]:[rank5]: frame #47: _PyEval_EvalFrameDefault + 0x2d83 (0x55ef001e22b3 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
|
313 |
+
[default5]:[rank5]: frame #48: <unknown function> + 0x150582 (0x55ef001fb582 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
|
314 |
+
[default5]:[rank5]: frame #49: PyObject_Call + 0xbc (0x55ef001fbf1c in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
|
315 |
+
[default5]:[rank5]: frame #50: _PyEval_EvalFrameDefault + 0x2d83 (0x55ef001e22b3 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
|
316 |
+
[default5]:[rank5]: frame #51: _PyFunction_Vectorcall + 0x6c (0x55ef001efa2c in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
|
317 |
+
[default5]:[rank5]: frame #52: _PyObject_FastCallDictTstate + 0x187 (0x55ef001e8007 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
|
318 |
+
[default5]:[rank5]: frame #53: _PyObject_Call_Prepend + 0x69 (0x55ef001f9c39 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
|
319 |
+
[default5]:[rank5]: frame #54: <unknown function> + 0x211239 (0x55ef002bc239 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
|
320 |
+
[default5]:[rank5]: frame #55: PyObject_Call + 0x207 (0x55ef001fc067 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
|
321 |
+
[default5]:[rank5]: frame #56: _PyEval_EvalFrameDefault + 0x2d83 (0x55ef001e22b3 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
|
322 |
+
[default5]:[rank5]: frame #57: <unknown function> + 0x150582 (0x55ef001fb582 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
|
323 |
+
[default5]:[rank5]: frame #58: _PyEval_EvalFrameDefault + 0x13ca (0x55ef001e08fa in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
|
324 |
+
[default5]:[rank5]: frame #59: <unknown function> + 0x150582 (0x55ef001fb582 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
|
325 |
+
[default5]:[rank5]: frame #60: PyObject_Call + 0xbc (0x55ef001fbf1c in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
|
326 |
+
[default5]:[rank5]: frame #61: _PyEval_EvalFrameDefault + 0x2d83 (0x55ef001e22b3 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
|
327 |
+
[default5]:[rank5]: frame #62: <unknown function> + 0x150582 (0x55ef001fb582 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
|
328 |
+
[default5]:[rank5]: frame #63: PyObject_Call + 0xbc (0x55ef001fbf1c in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
|
329 |
+
[default5]:[rank5]: . This may indicate a possible application crash on rank 0 or a network set up issue.
|
330 |
+
W0703 23:48:56.359000 139776274089792 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 491466 closing signal SIGTERM
|
331 |
+
W0703 23:48:56.360000 139776274089792 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 491467 closing signal SIGTERM
|
332 |
+
W0703 23:48:56.360000 139776274089792 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 491468 closing signal SIGTERM
|
333 |
+
W0703 23:48:56.360000 139776274089792 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 491469 closing signal SIGTERM
|
334 |
+
W0703 23:48:56.360000 139776274089792 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 491470 closing signal SIGTERM
|
335 |
+
W0703 23:48:56.361000 139776274089792 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 491471 closing signal SIGTERM
|
336 |
+
W0703 23:48:56.362000 139776274089792 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 491472 closing signal SIGTERM
|
337 |
+
E0703 23:48:57.975000 139776274089792 torch/distributed/elastic/multiprocessing/api.py:826] failed (exitcode: 1) local_rank: 0 (pid: 491465) of binary: /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10
|
338 |
+
Traceback (most recent call last):
|
339 |
+
File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/torchrun", line 8, in <module>
|
340 |
+
sys.exit(main())
|
341 |
+
File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py", line 347, in wrapper
|
342 |
+
return f(*args, **kwargs)
|
343 |
+
File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/run.py", line 879, in main
|
344 |
+
run(args)
|
345 |
+
File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/run.py", line 870, in run
|
346 |
+
elastic_launch(
|
347 |
+
File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 132, in __call__
|
348 |
+
return launch_agent(self._config, self._entrypoint, list(args))
|
349 |
+
File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 263, in launch_agent
|
350 |
+
raise ChildFailedError(
|
351 |
+
torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
|
352 |
+
============================================================
|
353 |
+
/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py FAILED
|
354 |
+
------------------------------------------------------------
|
355 |
+
Failures:
|
356 |
+
<NO_OTHER_FAILURES>
|
357 |
+
------------------------------------------------------------
|
358 |
+
Root Cause (first observed failure):
|
359 |
+
[0]:
|
360 |
+
time : 2024-07-03_23:48:56
|
361 |
+
host : ip-26-0-174-36.ec2.internal
|
362 |
+
rank : 0 (local_rank: 0)
|
363 |
+
exitcode : 1 (pid: 491465)
|
364 |
+
error_file: <N/A>
|
365 |
+
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
366 |
+
============================================================
|
367 |
+
srun: error: ip-26-0-174-36: task 0: Exited with exit code 1
|
368 |
+
Consider using `hf_transfer` for faster uploads. This solution comes with some limitations. See https://huggingface.co/docs/huggingface_hub/hf_transfer for more details.
|
llama-1B/8_GPUS/dp-4_tp-1_pp-2_mbz-128/status.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
oom
|