nouamanetazi HF staff commited on
Commit
7a0bb63
·
verified ·
1 Parent(s): 9413db9

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. scripts/run_1.14G_dp128_tp2_pp1_acc2_mbs8_seq2048_zero1_tpmodeRED_vocab32k.sh +68 -0
  2. scripts/run_1.14G_dp16_tp2_pp1_acc2_mbs16_seq2048_zero1_tpmodeALL_vocab32k.sh +68 -0
  3. scripts/run_1.14G_dp16_tp2_pp1_acc64_mbs2_seq2048_zero1_tpmodeALL_vocab32k.sh +68 -0
  4. scripts/run_1.14G_dp16_tp32_pp1_acc16_mbs2_seq2048_zero1_tpmodeALL_vocab32k.sh +68 -0
  5. scripts/run_1.14G_dp2_tp16_pp1_acc32_mbs8_seq8192_zero1_tpmodeRED_vocab32k.sh +68 -0
  6. scripts/run_1.14G_dp2_tp1_pp4_acc16_mbs16_seq8192_zero1_tpmodeRED_vocab32k.sh +68 -0
  7. scripts/run_1.14G_dp2_tp8_pp1_acc32_mbs8_seq2048_zero1_tpmodeALL_vocab32k.sh +68 -0
  8. scripts/run_1.14G_dp32_tp16_pp1_acc8_mbs2_seq8192_zero1_tpmodeRED_vocab32k.sh +68 -0
  9. scripts/run_1.14G_dp32_tp1_pp2_acc1_mbs1_seq32768_zero1_tpmodeRED_vocab32k.sh +68 -0
  10. scripts/run_1.14G_dp4_tp16_pp1_acc8_mbs16_seq2048_zero1_tpmodeRED_vocab32k.sh +68 -0
  11. scripts/run_1.14G_dp4_tp2_pp64_acc1_mbs1_seq2048_zero0_tpmodeRED_l16_h2048_heads32.sh +68 -0
  12. scripts/run_1.14G_dp4_tp4_pp1_acc32_mbs1_seq32768_zero1_tpmodeRED_vocab32k.sh +68 -0
  13. scripts/run_1.14G_dp4_tp64_pp1_acc2_mbs64_seq2048_zero1_tpmodeALL_vocab32k.sh +68 -0
  14. scripts/run_1.14G_dp64_tp1_pp1_acc2_mbs1_seq32768_zero1_tpmodeRED_vocab32k.sh +68 -0
  15. scripts/run_1.14G_dp8_tp16_pp1_acc1_mbs256_seq2048_zero1_tpmodeALL_vocab32k.sh +68 -0
  16. scripts/run_1.14G_dp8_tp16_pp1_acc4_mbs1_seq32768_zero1_tpmodeALL_vocab32k.sh +68 -0
  17. scripts/run_1.14G_dp8_tp1_pp1_acc128_mbs2_seq2048_zero1_tpmodeRED_vocab32k.sh +68 -0
  18. scripts/run_1.14G_dp8_tp2_pp1_acc4_mbs16_seq2048_zero1_tpmodeALL_vocab32k.sh +68 -0
  19. scripts/run_1.14G_dp8_tp32_pp1_acc2_mbs32_seq8192_zero1_tpmodeALL_vocab32k.sh +68 -0
  20. scripts/run_1.14G_dp8_tp4_pp1_acc2_mbs32_seq8192_zero1_tpmodeRED_vocab32k.sh +68 -0
  21. scripts/run_1.14G_dp8_tp64_pp1_acc8_mbs32_seq2048_zero1_tpmodeALL_vocab32k.sh +68 -0
  22. scripts/run_1.14G_dp8_tp8_pp1_acc2_mbs2_seq32768_zero1_tpmodeRED_vocab32k.sh +68 -0
  23. scripts/run_1.34G_dp128_tp1_pp1_acc8_mbs2_seq2048_zero1_tpmodeRED_vocab131k.sh +68 -0
  24. scripts/run_1.34G_dp16_tp2_pp1_acc8_mbs1_seq32768_zero1_tpmodeALL_vocab131k.sh +68 -0
  25. scripts/run_1.34G_dp16_tp4_pp1_acc2_mbs16_seq2048_zero1_tpmodeRED_vocab131k.sh +68 -0
  26. scripts/run_1.34G_dp256_tp1_pp1_acc2_mbs1_seq2048_zero1_tpmodeRED_vocab131k.sh +68 -0
  27. scripts/run_1.34G_dp2_tp1_pp8_acc8_mbs128_seq2048_zero1_tpmodeRED_vocab131k.sh +68 -0
  28. scripts/run_1.34G_dp2_tp256_pp1_acc1_mbs256_seq2048_zero1_tpmodeALL_vocab131k.sh +68 -0
  29. scripts/run_1.34G_dp2_tp256_pp1_acc4_mbs64_seq8192_zero1_tpmodeRED_vocab131k.sh +68 -0
  30. scripts/run_1.34G_dp2_tp64_pp1_acc32_mbs8_seq8192_zero1_tpmodeALL_vocab131k.sh +68 -0
  31. scripts/run_1.34G_dp2_tp64_pp1_acc8_mbs8_seq8192_zero1_tpmodeALL_vocab131k.sh +68 -0
  32. scripts/run_1.34G_dp2_tp8_pp1_acc32_mbs2_seq32768_zero1_tpmodeALL_vocab131k.sh +68 -0
  33. scripts/run_1.34G_dp2_tp8_pp2_acc16_mbs8_seq4096_zero1_tpmodeRED_vocab131k.sh +159 -0
  34. scripts/run_1.34G_dp32_tp16_pp1_acc1_mbs4_seq32768_zero1_tpmodeALL_vocab131k.sh +68 -0
  35. scripts/run_1.34G_dp32_tp16_pp1_acc2_mbs32_seq2048_zero1_tpmodeRED_vocab131k.sh +68 -0
  36. scripts/run_1.34G_dp32_tp16_pp1_acc4_mbs1_seq32768_zero1_tpmodeALL_vocab131k.sh +68 -0
  37. scripts/run_1.34G_dp32_tp1_pp2_acc2_mbs32_seq2048_zero1_tpmodeRED_vocab131k.sh +68 -0
  38. scripts/run_1.34G_dp4_tp1_pp2_acc2_mbs16_seq8192_zero1_tpmodeRED_vocab131k.sh +68 -0
  39. scripts/run_1.34G_dp4_tp2_pp1_acc4_mbs8_seq8192_zero1_tpmodeRED_vocab131k.sh +68 -0
  40. scripts/run_1.34G_dp4_tp2_pp1_acc8_mbs64_seq2048_zero1_tpmodeRED_vocab131k.sh +68 -0
  41. scripts/run_1.34G_dp4_tp32_pp1_acc32_mbs4_seq8192_zero1_tpmodeALL_vocab131k.sh +68 -0
  42. scripts/run_1.34G_dp64_tp2_pp2_acc4_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh +159 -0
  43. scripts/run_1.34G_dp64_tp8_pp1_acc8_mbs4_seq2048_zero1_tpmodeRED_vocab131k.sh +68 -0
  44. scripts/run_1.34G_dp8_tp16_pp1_acc64_mbs4_seq2048_zero1_tpmodeRED_vocab131k.sh +68 -0
  45. scripts/run_1.34G_dp8_tp1_pp1_acc4_mbs64_seq2048_zero1_tpmodeRED_vocab131k.sh +68 -0
  46. scripts/run_1.34G_dp8_tp2_pp1_acc4_mbs16_seq2048_zero1_tpmodeRED_vocab131k.sh +68 -0
  47. scripts/run_1.34G_dp8_tp4_pp1_acc4_mbs16_seq2048_zero1_tpmodeALL_vocab131k.sh +68 -0
  48. scripts/run_1.34G_dp8_tp4_pp1_acc8_mbs32_seq2048_zero1_tpmodeRED_vocab131k.sh +68 -0
  49. scripts/run_3.27G_dp8_tp2_pp32_acc1_mbs1_seq2048_zero0_tpmodeRED_l28_h3072_heads24.sh +68 -0
  50. scripts/run_3.56G_dp32_tp4_pp1_acc8_mbs1_seq2048_zero0_l28_h3072_heads24.sh +57 -0
scripts/run_1.14G_dp128_tp2_pp1_acc2_mbs8_seq2048_zero1_tpmodeRED_vocab32k.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_1.14G_dp128_tp2_pp1_acc2_mbs8_seq2048_zero1_tpmodeRED_vocab32k # Job name
4
+ #SBATCH --time=00:02:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=32 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_1.14G_dp128_tp2_pp1_acc2_mbs8_seq2048_zero1_tpmodeRED_vocab32k.yaml
scripts/run_1.14G_dp16_tp2_pp1_acc2_mbs16_seq2048_zero1_tpmodeALL_vocab32k.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_1.14G_dp16_tp2_pp1_acc2_mbs16_seq2048_zero1_tpmodeALL_vocab32k # Job name
4
+ #SBATCH --time=00:02:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=4 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_1.14G_dp16_tp2_pp1_acc2_mbs16_seq2048_zero1_tpmodeALL_vocab32k.yaml
scripts/run_1.14G_dp16_tp2_pp1_acc64_mbs2_seq2048_zero1_tpmodeALL_vocab32k.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_1.14G_dp16_tp2_pp1_acc64_mbs2_seq2048_zero1_tpmodeALL_vocab32k # Job name
4
+ #SBATCH --time=00:02:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=4 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_1.14G_dp16_tp2_pp1_acc64_mbs2_seq2048_zero1_tpmodeALL_vocab32k.yaml
scripts/run_1.14G_dp16_tp32_pp1_acc16_mbs2_seq2048_zero1_tpmodeALL_vocab32k.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_1.14G_dp16_tp32_pp1_acc16_mbs2_seq2048_zero1_tpmodeALL_vocab32k # Job name
4
+ #SBATCH --time=00:02:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=64 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_1.14G_dp16_tp32_pp1_acc16_mbs2_seq2048_zero1_tpmodeALL_vocab32k.yaml
scripts/run_1.14G_dp2_tp16_pp1_acc32_mbs8_seq8192_zero1_tpmodeRED_vocab32k.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_1.14G_dp2_tp16_pp1_acc32_mbs8_seq8192_zero1_tpmodeRED_vocab32k # Job name
4
+ #SBATCH --time=00:02:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=4 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_1.14G_dp2_tp16_pp1_acc32_mbs8_seq8192_zero1_tpmodeRED_vocab32k.yaml
scripts/run_1.14G_dp2_tp1_pp4_acc16_mbs16_seq8192_zero1_tpmodeRED_vocab32k.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_1.14G_dp2_tp1_pp4_acc16_mbs16_seq8192_zero1_tpmodeRED_vocab32k # Job name
4
+ #SBATCH --time=00:15:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=1 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_1.14G_dp2_tp1_pp4_acc16_mbs16_seq8192_zero1_tpmodeRED_vocab32k.yaml
scripts/run_1.14G_dp2_tp8_pp1_acc32_mbs8_seq2048_zero1_tpmodeALL_vocab32k.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_1.14G_dp2_tp8_pp1_acc32_mbs8_seq2048_zero1_tpmodeALL_vocab32k # Job name
4
+ #SBATCH --time=00:02:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=2 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_1.14G_dp2_tp8_pp1_acc32_mbs8_seq2048_zero1_tpmodeALL_vocab32k.yaml
scripts/run_1.14G_dp32_tp16_pp1_acc8_mbs2_seq8192_zero1_tpmodeRED_vocab32k.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_1.14G_dp32_tp16_pp1_acc8_mbs2_seq8192_zero1_tpmodeRED_vocab32k # Job name
4
+ #SBATCH --time=00:02:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=64 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_1.14G_dp32_tp16_pp1_acc8_mbs2_seq8192_zero1_tpmodeRED_vocab32k.yaml
scripts/run_1.14G_dp32_tp1_pp2_acc1_mbs1_seq32768_zero1_tpmodeRED_vocab32k.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_1.14G_dp32_tp1_pp2_acc1_mbs1_seq32768_zero1_tpmodeRED_vocab32k # Job name
4
+ #SBATCH --time=00:02:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=8 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_1.14G_dp32_tp1_pp2_acc1_mbs1_seq32768_zero1_tpmodeRED_vocab32k.yaml
scripts/run_1.14G_dp4_tp16_pp1_acc8_mbs16_seq2048_zero1_tpmodeRED_vocab32k.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_1.14G_dp4_tp16_pp1_acc8_mbs16_seq2048_zero1_tpmodeRED_vocab32k # Job name
4
+ #SBATCH --time=00:02:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=8 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_1.14G_dp4_tp16_pp1_acc8_mbs16_seq2048_zero1_tpmodeRED_vocab32k.yaml
scripts/run_1.14G_dp4_tp2_pp64_acc1_mbs1_seq2048_zero0_tpmodeRED_l16_h2048_heads32.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_1.14G_dp4_tp2_pp64_acc1_mbs1_seq2048_zero0_tpmodeRED_l16_h2048_heads32 # Job name
4
+ #SBATCH --time=00:15:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=64 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_1.14G_dp4_tp2_pp64_acc1_mbs1_seq2048_zero0_tpmodeRED_l16_h2048_heads32.yaml
scripts/run_1.14G_dp4_tp4_pp1_acc32_mbs1_seq32768_zero1_tpmodeRED_vocab32k.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_1.14G_dp4_tp4_pp1_acc32_mbs1_seq32768_zero1_tpmodeRED_vocab32k # Job name
4
+ #SBATCH --time=00:02:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=2 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_1.14G_dp4_tp4_pp1_acc32_mbs1_seq32768_zero1_tpmodeRED_vocab32k.yaml
scripts/run_1.14G_dp4_tp64_pp1_acc2_mbs64_seq2048_zero1_tpmodeALL_vocab32k.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_1.14G_dp4_tp64_pp1_acc2_mbs64_seq2048_zero1_tpmodeALL_vocab32k # Job name
4
+ #SBATCH --time=00:02:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=32 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_1.14G_dp4_tp64_pp1_acc2_mbs64_seq2048_zero1_tpmodeALL_vocab32k.yaml
scripts/run_1.14G_dp64_tp1_pp1_acc2_mbs1_seq32768_zero1_tpmodeRED_vocab32k.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_1.14G_dp64_tp1_pp1_acc2_mbs1_seq32768_zero1_tpmodeRED_vocab32k # Job name
4
+ #SBATCH --time=00:02:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=8 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_1.14G_dp64_tp1_pp1_acc2_mbs1_seq32768_zero1_tpmodeRED_vocab32k.yaml
scripts/run_1.14G_dp8_tp16_pp1_acc1_mbs256_seq2048_zero1_tpmodeALL_vocab32k.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_1.14G_dp8_tp16_pp1_acc1_mbs256_seq2048_zero1_tpmodeALL_vocab32k # Job name
4
+ #SBATCH --time=00:02:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=16 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_1.14G_dp8_tp16_pp1_acc1_mbs256_seq2048_zero1_tpmodeALL_vocab32k.yaml
scripts/run_1.14G_dp8_tp16_pp1_acc4_mbs1_seq32768_zero1_tpmodeALL_vocab32k.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_1.14G_dp8_tp16_pp1_acc4_mbs1_seq32768_zero1_tpmodeALL_vocab32k # Job name
4
+ #SBATCH --time=00:02:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=16 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_1.14G_dp8_tp16_pp1_acc4_mbs1_seq32768_zero1_tpmodeALL_vocab32k.yaml
scripts/run_1.14G_dp8_tp1_pp1_acc128_mbs2_seq2048_zero1_tpmodeRED_vocab32k.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_1.14G_dp8_tp1_pp1_acc128_mbs2_seq2048_zero1_tpmodeRED_vocab32k # Job name
4
+ #SBATCH --time=00:02:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=1 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_1.14G_dp8_tp1_pp1_acc128_mbs2_seq2048_zero1_tpmodeRED_vocab32k.yaml
scripts/run_1.14G_dp8_tp2_pp1_acc4_mbs16_seq2048_zero1_tpmodeALL_vocab32k.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_1.14G_dp8_tp2_pp1_acc4_mbs16_seq2048_zero1_tpmodeALL_vocab32k # Job name
4
+ #SBATCH --time=00:02:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=2 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_1.14G_dp8_tp2_pp1_acc4_mbs16_seq2048_zero1_tpmodeALL_vocab32k.yaml
scripts/run_1.14G_dp8_tp32_pp1_acc2_mbs32_seq8192_zero1_tpmodeALL_vocab32k.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_1.14G_dp8_tp32_pp1_acc2_mbs32_seq8192_zero1_tpmodeALL_vocab32k # Job name
4
+ #SBATCH --time=00:02:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=32 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_1.14G_dp8_tp32_pp1_acc2_mbs32_seq8192_zero1_tpmodeALL_vocab32k.yaml
scripts/run_1.14G_dp8_tp4_pp1_acc2_mbs32_seq8192_zero1_tpmodeRED_vocab32k.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_1.14G_dp8_tp4_pp1_acc2_mbs32_seq8192_zero1_tpmodeRED_vocab32k # Job name
4
+ #SBATCH --time=00:02:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=4 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_1.14G_dp8_tp4_pp1_acc2_mbs32_seq8192_zero1_tpmodeRED_vocab32k.yaml
scripts/run_1.14G_dp8_tp64_pp1_acc8_mbs32_seq2048_zero1_tpmodeALL_vocab32k.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_1.14G_dp8_tp64_pp1_acc8_mbs32_seq2048_zero1_tpmodeALL_vocab32k # Job name
4
+ #SBATCH --time=00:02:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=64 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_1.14G_dp8_tp64_pp1_acc8_mbs32_seq2048_zero1_tpmodeALL_vocab32k.yaml
scripts/run_1.14G_dp8_tp8_pp1_acc2_mbs2_seq32768_zero1_tpmodeRED_vocab32k.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_1.14G_dp8_tp8_pp1_acc2_mbs2_seq32768_zero1_tpmodeRED_vocab32k # Job name
4
+ #SBATCH --time=00:02:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=8 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_1.14G_dp8_tp8_pp1_acc2_mbs2_seq32768_zero1_tpmodeRED_vocab32k.yaml
scripts/run_1.34G_dp128_tp1_pp1_acc8_mbs2_seq2048_zero1_tpmodeRED_vocab131k.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_1.34G_dp128_tp1_pp1_acc8_mbs2_seq2048_zero1_tpmodeRED_vocab131k # Job name
4
+ #SBATCH --time=00:02:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=16 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_1.34G_dp128_tp1_pp1_acc8_mbs2_seq2048_zero1_tpmodeRED_vocab131k.yaml
scripts/run_1.34G_dp16_tp2_pp1_acc8_mbs1_seq32768_zero1_tpmodeALL_vocab131k.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_1.34G_dp16_tp2_pp1_acc8_mbs1_seq32768_zero1_tpmodeALL_vocab131k # Job name
4
+ #SBATCH --time=00:02:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=4 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_1.34G_dp16_tp2_pp1_acc8_mbs1_seq32768_zero1_tpmodeALL_vocab131k.yaml
scripts/run_1.34G_dp16_tp4_pp1_acc2_mbs16_seq2048_zero1_tpmodeRED_vocab131k.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_1.34G_dp16_tp4_pp1_acc2_mbs16_seq2048_zero1_tpmodeRED_vocab131k # Job name
4
+ #SBATCH --time=00:02:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=8 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_1.34G_dp16_tp4_pp1_acc2_mbs16_seq2048_zero1_tpmodeRED_vocab131k.yaml
scripts/run_1.34G_dp256_tp1_pp1_acc2_mbs1_seq2048_zero1_tpmodeRED_vocab131k.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_1.34G_dp256_tp1_pp1_acc2_mbs1_seq2048_zero1_tpmodeRED_vocab131k # Job name
4
+ #SBATCH --time=00:02:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=32 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_1.34G_dp256_tp1_pp1_acc2_mbs1_seq2048_zero1_tpmodeRED_vocab131k.yaml
scripts/run_1.34G_dp2_tp1_pp8_acc8_mbs128_seq2048_zero1_tpmodeRED_vocab131k.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_1.34G_dp2_tp1_pp8_acc8_mbs128_seq2048_zero1_tpmodeRED_vocab131k # Job name
4
+ #SBATCH --time=00:15:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=2 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_1.34G_dp2_tp1_pp8_acc8_mbs128_seq2048_zero1_tpmodeRED_vocab131k.yaml
scripts/run_1.34G_dp2_tp256_pp1_acc1_mbs256_seq2048_zero1_tpmodeALL_vocab131k.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_1.34G_dp2_tp256_pp1_acc1_mbs256_seq2048_zero1_tpmodeALL_vocab131k # Job name
4
+ #SBATCH --time=00:02:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=64 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_1.34G_dp2_tp256_pp1_acc1_mbs256_seq2048_zero1_tpmodeALL_vocab131k.yaml
scripts/run_1.34G_dp2_tp256_pp1_acc4_mbs64_seq8192_zero1_tpmodeRED_vocab131k.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_1.34G_dp2_tp256_pp1_acc4_mbs64_seq8192_zero1_tpmodeRED_vocab131k # Job name
4
+ #SBATCH --time=00:02:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=64 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_1.34G_dp2_tp256_pp1_acc4_mbs64_seq8192_zero1_tpmodeRED_vocab131k.yaml
scripts/run_1.34G_dp2_tp64_pp1_acc32_mbs8_seq8192_zero1_tpmodeALL_vocab131k.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_1.34G_dp2_tp64_pp1_acc32_mbs8_seq8192_zero1_tpmodeALL_vocab131k # Job name
4
+ #SBATCH --time=00:02:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=16 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_1.34G_dp2_tp64_pp1_acc32_mbs8_seq8192_zero1_tpmodeALL_vocab131k.yaml
scripts/run_1.34G_dp2_tp64_pp1_acc8_mbs8_seq8192_zero1_tpmodeALL_vocab131k.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_1.34G_dp2_tp64_pp1_acc8_mbs8_seq8192_zero1_tpmodeALL_vocab131k # Job name
4
+ #SBATCH --time=00:02:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=16 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_1.34G_dp2_tp64_pp1_acc8_mbs8_seq8192_zero1_tpmodeALL_vocab131k.yaml
scripts/run_1.34G_dp2_tp8_pp1_acc32_mbs2_seq32768_zero1_tpmodeALL_vocab131k.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_1.34G_dp2_tp8_pp1_acc32_mbs2_seq32768_zero1_tpmodeALL_vocab131k # Job name
4
+ #SBATCH --time=00:02:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=2 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_1.34G_dp2_tp8_pp1_acc32_mbs2_seq32768_zero1_tpmodeALL_vocab131k.yaml
scripts/run_1.34G_dp2_tp8_pp2_acc16_mbs8_seq4096_zero1_tpmodeRED_vocab131k.sh ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=bench_1.34G_dp2_tp8_pp2_acc16_mbs8_seq4096_zero1_tpmodeRED_vocab131k # Job name
3
+ #SBATCH --time=01:10:00
4
+ #SBATCH --partition=hopper-prod
5
+ #SBATCH --qos=high
6
+
7
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
8
+
9
+ #SBATCH --nodes=4 # Number of nodes (modify as needed)
10
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
11
+ #SBATCH --cpus-per-task=60 # CPU cores per task
12
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
13
+ #SBATCH --exclusive # Exclusive use of nodes
14
+ #SBATCH --wait-all-nodes=1 # fail if any node is not ready
15
+
16
+ # run using
17
+ # sbatch --nodes=1 run_multinode.sh
18
+ # or
19
+ # SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh
20
+
21
+ set -x -e
22
+
23
+ # If not running under SLURM, set default SLURM environment variables
24
+ if [ -z "${SLURM_JOB_ID}" ]; then
25
+ if [ -z "${SALLOC_JOBID}" ]; then
26
+ echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session."
27
+ exit 1
28
+ fi
29
+ if [ -z "${NNODES}" ]; then
30
+ echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session."
31
+ exit 1
32
+ fi
33
+ export SALLOC_MODE=1
34
+ export SLURM_JOB_ID=$SALLOC_JOBID
35
+ export SLURM_NNODES=$NNODES
36
+ export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N")
37
+ fi
38
+
39
+ # Load any necessary modules for your system
40
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
41
+ module load cuda/12.1
42
+ # Unset FI_PROVIDER to avoid potential libfabric provider issues
43
+ # unset FI_PROVIDER
44
+
45
+
46
+ # Activate your conda environment if needed
47
+ source /fsx/nouamane/miniconda/bin/activate
48
+ conda activate 2-1-cu121
49
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
50
+
51
+ # Get the node names from SLURM
52
+ if [ -z "${SALLOC_MODE}" ]; then # sbatch mode
53
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
54
+
55
+ else # srun mode
56
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES`
57
+ fi
58
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
59
+ export MASTER_PORT=12356
60
+
61
+ # Calculate total number of processes
62
+ export NNODES=$SLURM_NNODES
63
+ export GPUS_PER_NODE=8
64
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
65
+
66
+ # Set some environment variables for better distributed training
67
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
68
+ export NCCL_DEBUG=INFO # INFO, WARN
69
+ # export NCCL_DEBUG_SUBSYS=ALL
70
+ # export CUDA_LAUNCH_BLOCKING=1
71
+
72
+ # Nanotron specific
73
+ export NANOTRON_BENCHMARK=1
74
+ export WANDB_MODE=disabled
75
+
76
+ # export TORCH_NCCL_USE_COMM_NONBLOCKING=1
77
+
78
+ # Trying to avoid hangs
79
+ export TORCH_NCCL_ASYNC_ERROR_HANDLING=1
80
+
81
+ # debug
82
+ export TORCH_DISTRIBUTED_DEBUG=DETAIL
83
+
84
+ # export NCCL_P2P_LEVEL=NVL
85
+ # export CUDA_LAUNCH_BLOCKING=1
86
+ # export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA
87
+ # export NCCL_NET_GDR_LEVEL=LOC
88
+ # Test Script - save as test_comm.sh
89
+
90
+ # Test 1 - Force TCP
91
+ # echo "Running with TCP only..."
92
+ # export NCCL_P2P_LEVEL=LOC
93
+
94
+ # # Match bandwidth patterns
95
+ # export NCCL_MAX_NCHANNELS=2
96
+ # export NCCL_MIN_NCHANNELS=2
97
+
98
+
99
+ # export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA
100
+ # export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport
101
+ # export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport
102
+ # export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds
103
+ # export NCCL_IB_RETRY_CNT=7 # Increase retry count as well
104
+
105
+ # Force SHM
106
+ # export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode
107
+ # export NCCL_SOCKET_NTHREADS=1
108
+ # export FI_PROVIDER="tcp"
109
+
110
+ # Print GPU topology information
111
+ if [ -z "${SALLOC_MODE}" ]; then
112
+ echo "=== GPU Topology ==="
113
+ nvidia-smi topo -m
114
+ echo "=================="
115
+ export SRUN_ALLOC_ARGS=""
116
+ else
117
+ export JOBNAME="bench_1.34G_dp2_tp8_pp2_acc16_mbs8_seq4096_zero1_tpmodeRED_vocab131k"
118
+ export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out"
119
+ export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME"
120
+ fi
121
+
122
+
123
+ # Print some debugging information
124
+ echo "Master node: $MASTER_NODE"
125
+ echo "All nodes: $NODELIST"
126
+ echo "World size: $WORLD_SIZE"
127
+
128
+ # Launch the training script using srun in background
129
+ if [ -n "${SALLOC_MODE}" ]; then # srun mode
130
+ srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \
131
+ --nnodes=$NNODES \
132
+ --nproc_per_node=$GPUS_PER_NODE \
133
+ --rdzv_id=$SLURM_JOB_ID \
134
+ --rdzv_backend=c10d \
135
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
136
+ --max_restarts 0 \
137
+ --rdzv_conf timeout=60 \
138
+ /fsx/nouamane/projects/nanotron/run_train.py \
139
+ --config-file benchmark/configs/config_1.34G_dp2_tp8_pp2_acc16_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 &
140
+ # Store the process ID
141
+ SRUN_PID=$!
142
+ echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE
143
+
144
+ # Optionally, you can add:
145
+ echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE
146
+ echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE
147
+
148
+ else # sbatch mode
149
+ srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \
150
+ --nnodes=$NNODES \
151
+ --nproc_per_node=$GPUS_PER_NODE \
152
+ --rdzv_id=$SLURM_JOB_ID \
153
+ --rdzv_backend=c10d \
154
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
155
+ --max_restarts 0 \
156
+ --rdzv_conf timeout=60 \
157
+ /fsx/nouamane/projects/nanotron/run_train.py \
158
+ --config-file benchmark/configs/config_1.34G_dp2_tp8_pp2_acc16_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml
159
+ fi
scripts/run_1.34G_dp32_tp16_pp1_acc1_mbs4_seq32768_zero1_tpmodeALL_vocab131k.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_1.34G_dp32_tp16_pp1_acc1_mbs4_seq32768_zero1_tpmodeALL_vocab131k # Job name
4
+ #SBATCH --time=00:02:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=64 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_1.34G_dp32_tp16_pp1_acc1_mbs4_seq32768_zero1_tpmodeALL_vocab131k.yaml
scripts/run_1.34G_dp32_tp16_pp1_acc2_mbs32_seq2048_zero1_tpmodeRED_vocab131k.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_1.34G_dp32_tp16_pp1_acc2_mbs32_seq2048_zero1_tpmodeRED_vocab131k # Job name
4
+ #SBATCH --time=00:02:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=64 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_1.34G_dp32_tp16_pp1_acc2_mbs32_seq2048_zero1_tpmodeRED_vocab131k.yaml
scripts/run_1.34G_dp32_tp16_pp1_acc4_mbs1_seq32768_zero1_tpmodeALL_vocab131k.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_1.34G_dp32_tp16_pp1_acc4_mbs1_seq32768_zero1_tpmodeALL_vocab131k # Job name
4
+ #SBATCH --time=00:02:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=64 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_1.34G_dp32_tp16_pp1_acc4_mbs1_seq32768_zero1_tpmodeALL_vocab131k.yaml
scripts/run_1.34G_dp32_tp1_pp2_acc2_mbs32_seq2048_zero1_tpmodeRED_vocab131k.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_1.34G_dp32_tp1_pp2_acc2_mbs32_seq2048_zero1_tpmodeRED_vocab131k # Job name
4
+ #SBATCH --time=00:02:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=8 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_1.34G_dp32_tp1_pp2_acc2_mbs32_seq2048_zero1_tpmodeRED_vocab131k.yaml
scripts/run_1.34G_dp4_tp1_pp2_acc2_mbs16_seq8192_zero1_tpmodeRED_vocab131k.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_1.34G_dp4_tp1_pp2_acc2_mbs16_seq8192_zero1_tpmodeRED_vocab131k # Job name
4
+ #SBATCH --time=00:02:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=1 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_1.34G_dp4_tp1_pp2_acc2_mbs16_seq8192_zero1_tpmodeRED_vocab131k.yaml
scripts/run_1.34G_dp4_tp2_pp1_acc4_mbs8_seq8192_zero1_tpmodeRED_vocab131k.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_1.34G_dp4_tp2_pp1_acc4_mbs8_seq8192_zero1_tpmodeRED_vocab131k # Job name
4
+ #SBATCH --time=00:02:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=1 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_1.34G_dp4_tp2_pp1_acc4_mbs8_seq8192_zero1_tpmodeRED_vocab131k.yaml
scripts/run_1.34G_dp4_tp2_pp1_acc8_mbs64_seq2048_zero1_tpmodeRED_vocab131k.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_1.34G_dp4_tp2_pp1_acc8_mbs64_seq2048_zero1_tpmodeRED_vocab131k # Job name
4
+ #SBATCH --time=00:02:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=1 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_1.34G_dp4_tp2_pp1_acc8_mbs64_seq2048_zero1_tpmodeRED_vocab131k.yaml
scripts/run_1.34G_dp4_tp32_pp1_acc32_mbs4_seq8192_zero1_tpmodeALL_vocab131k.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_1.34G_dp4_tp32_pp1_acc32_mbs4_seq8192_zero1_tpmodeALL_vocab131k # Job name
4
+ #SBATCH --time=00:02:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=16 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_1.34G_dp4_tp32_pp1_acc32_mbs4_seq8192_zero1_tpmodeALL_vocab131k.yaml
scripts/run_1.34G_dp64_tp2_pp2_acc4_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=bench_1.34G_dp64_tp2_pp2_acc4_mbs1_seq4096_zero1_tpmodeRED_vocab131k # Job name
3
+ #SBATCH --time=01:10:00
4
+ #SBATCH --partition=hopper-prod
5
+ #SBATCH --qos=high
6
+
7
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
8
+
9
+ #SBATCH --nodes=32 # Number of nodes (modify as needed)
10
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
11
+ #SBATCH --cpus-per-task=60 # CPU cores per task
12
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
13
+ #SBATCH --exclusive # Exclusive use of nodes
14
+ #SBATCH --wait-all-nodes=1 # fail if any node is not ready
15
+
16
+ # run using
17
+ # sbatch --nodes=1 run_multinode.sh
18
+ # or
19
+ # SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh
20
+
21
+ set -x -e
22
+
23
+ # If not running under SLURM, set default SLURM environment variables
24
+ if [ -z "${SLURM_JOB_ID}" ]; then
25
+ if [ -z "${SALLOC_JOBID}" ]; then
26
+ echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session."
27
+ exit 1
28
+ fi
29
+ if [ -z "${NNODES}" ]; then
30
+ echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session."
31
+ exit 1
32
+ fi
33
+ export SALLOC_MODE=1
34
+ export SLURM_JOB_ID=$SALLOC_JOBID
35
+ export SLURM_NNODES=$NNODES
36
+ export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N")
37
+ fi
38
+
39
+ # Load any necessary modules for your system
40
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
41
+ module load cuda/12.1
42
+ # Unset FI_PROVIDER to avoid potential libfabric provider issues
43
+ # unset FI_PROVIDER
44
+
45
+
46
+ # Activate your conda environment if needed
47
+ source /fsx/nouamane/miniconda/bin/activate
48
+ conda activate 2-1-cu121
49
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
50
+
51
+ # Get the node names from SLURM
52
+ if [ -z "${SALLOC_MODE}" ]; then # sbatch mode
53
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
54
+
55
+ else # srun mode
56
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES`
57
+ fi
58
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
59
+ export MASTER_PORT=12356
60
+
61
+ # Calculate total number of processes
62
+ export NNODES=$SLURM_NNODES
63
+ export GPUS_PER_NODE=8
64
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
65
+
66
+ # Set some environment variables for better distributed training
67
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
68
+ export NCCL_DEBUG=INFO # INFO, WARN
69
+ # export NCCL_DEBUG_SUBSYS=ALL
70
+ # export CUDA_LAUNCH_BLOCKING=1
71
+
72
+ # Nanotron specific
73
+ export NANOTRON_BENCHMARK=1
74
+ export WANDB_MODE=disabled
75
+
76
+ # export TORCH_NCCL_USE_COMM_NONBLOCKING=1
77
+
78
+ # Trying to avoid hangs
79
+ export TORCH_NCCL_ASYNC_ERROR_HANDLING=1
80
+
81
+ # debug
82
+ export TORCH_DISTRIBUTED_DEBUG=DETAIL
83
+
84
+ # export NCCL_P2P_LEVEL=NVL
85
+ # export CUDA_LAUNCH_BLOCKING=1
86
+ # export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA
87
+ # export NCCL_NET_GDR_LEVEL=LOC
88
+ # Test Script - save as test_comm.sh
89
+
90
+ # Test 1 - Force TCP
91
+ # echo "Running with TCP only..."
92
+ # export NCCL_P2P_LEVEL=LOC
93
+
94
+ # # Match bandwidth patterns
95
+ # export NCCL_MAX_NCHANNELS=2
96
+ # export NCCL_MIN_NCHANNELS=2
97
+
98
+
99
+ # export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA
100
+ # export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport
101
+ # export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport
102
+ # export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds
103
+ # export NCCL_IB_RETRY_CNT=7 # Increase retry count as well
104
+
105
+ # Force SHM
106
+ # export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode
107
+ # export NCCL_SOCKET_NTHREADS=1
108
+ # export FI_PROVIDER="tcp"
109
+
110
+ # Print GPU topology information
111
+ if [ -z "${SALLOC_MODE}" ]; then
112
+ echo "=== GPU Topology ==="
113
+ nvidia-smi topo -m
114
+ echo "=================="
115
+ export SRUN_ALLOC_ARGS=""
116
+ else
117
+ export JOBNAME="bench_1.34G_dp64_tp2_pp2_acc4_mbs1_seq4096_zero1_tpmodeRED_vocab131k"
118
+ export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out"
119
+ export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME"
120
+ fi
121
+
122
+
123
+ # Print some debugging information
124
+ echo "Master node: $MASTER_NODE"
125
+ echo "All nodes: $NODELIST"
126
+ echo "World size: $WORLD_SIZE"
127
+
128
+ # Launch the training script using srun in background
129
+ if [ -n "${SALLOC_MODE}" ]; then # srun mode
130
+ srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \
131
+ --nnodes=$NNODES \
132
+ --nproc_per_node=$GPUS_PER_NODE \
133
+ --rdzv_id=$SLURM_JOB_ID \
134
+ --rdzv_backend=c10d \
135
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
136
+ --max_restarts 0 \
137
+ --rdzv_conf timeout=60 \
138
+ /fsx/nouamane/projects/nanotron/run_train.py \
139
+ --config-file benchmark/configs/config_1.34G_dp64_tp2_pp2_acc4_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 &
140
+ # Store the process ID
141
+ SRUN_PID=$!
142
+ echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE
143
+
144
+ # Optionally, you can add:
145
+ echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE
146
+ echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE
147
+
148
+ else # sbatch mode
149
+ srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \
150
+ --nnodes=$NNODES \
151
+ --nproc_per_node=$GPUS_PER_NODE \
152
+ --rdzv_id=$SLURM_JOB_ID \
153
+ --rdzv_backend=c10d \
154
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
155
+ --max_restarts 0 \
156
+ --rdzv_conf timeout=60 \
157
+ /fsx/nouamane/projects/nanotron/run_train.py \
158
+ --config-file benchmark/configs/config_1.34G_dp64_tp2_pp2_acc4_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml
159
+ fi
scripts/run_1.34G_dp64_tp8_pp1_acc8_mbs4_seq2048_zero1_tpmodeRED_vocab131k.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_1.34G_dp64_tp8_pp1_acc8_mbs4_seq2048_zero1_tpmodeRED_vocab131k # Job name
4
+ #SBATCH --time=00:02:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=64 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_1.34G_dp64_tp8_pp1_acc8_mbs4_seq2048_zero1_tpmodeRED_vocab131k.yaml
scripts/run_1.34G_dp8_tp16_pp1_acc64_mbs4_seq2048_zero1_tpmodeRED_vocab131k.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_1.34G_dp8_tp16_pp1_acc64_mbs4_seq2048_zero1_tpmodeRED_vocab131k # Job name
4
+ #SBATCH --time=00:02:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=16 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_1.34G_dp8_tp16_pp1_acc64_mbs4_seq2048_zero1_tpmodeRED_vocab131k.yaml
scripts/run_1.34G_dp8_tp1_pp1_acc4_mbs64_seq2048_zero1_tpmodeRED_vocab131k.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_1.34G_dp8_tp1_pp1_acc4_mbs64_seq2048_zero1_tpmodeRED_vocab131k # Job name
4
+ #SBATCH --time=00:02:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=1 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_1.34G_dp8_tp1_pp1_acc4_mbs64_seq2048_zero1_tpmodeRED_vocab131k.yaml
scripts/run_1.34G_dp8_tp2_pp1_acc4_mbs16_seq2048_zero1_tpmodeRED_vocab131k.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_1.34G_dp8_tp2_pp1_acc4_mbs16_seq2048_zero1_tpmodeRED_vocab131k # Job name
4
+ #SBATCH --time=00:02:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=2 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_1.34G_dp8_tp2_pp1_acc4_mbs16_seq2048_zero1_tpmodeRED_vocab131k.yaml
scripts/run_1.34G_dp8_tp4_pp1_acc4_mbs16_seq2048_zero1_tpmodeALL_vocab131k.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_1.34G_dp8_tp4_pp1_acc4_mbs16_seq2048_zero1_tpmodeALL_vocab131k # Job name
4
+ #SBATCH --time=00:02:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=4 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_1.34G_dp8_tp4_pp1_acc4_mbs16_seq2048_zero1_tpmodeALL_vocab131k.yaml
scripts/run_1.34G_dp8_tp4_pp1_acc8_mbs32_seq2048_zero1_tpmodeRED_vocab131k.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_1.34G_dp8_tp4_pp1_acc8_mbs32_seq2048_zero1_tpmodeRED_vocab131k # Job name
4
+ #SBATCH --time=00:02:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=4 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_1.34G_dp8_tp4_pp1_acc8_mbs32_seq2048_zero1_tpmodeRED_vocab131k.yaml
scripts/run_3.27G_dp8_tp2_pp32_acc1_mbs1_seq2048_zero0_tpmodeRED_l28_h3072_heads24.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_3.27G_dp8_tp2_pp32_acc1_mbs1_seq2048_zero0_tpmodeRED_l28_h3072_heads24 # Job name
4
+ #SBATCH --time=00:15:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=64 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # # Disable EFA by changing the provider to tcp
45
+ # export FI_PROVIDER=tcp
46
+
47
+ # # Optionally, you can also unset these EFA-related variables
48
+ # unset FI_EFA_FORK_SAFE
49
+ # unset FI_EFA_ENABLE_SHM_TRANSFER
50
+
51
+ # # If you want to ensure NCCL uses TCP
52
+ # export NCCL_IB_DISABLE=1
53
+ # export NCCL_SOCKET_IFNAME=eth0
54
+
55
+ # Print some debugging information
56
+ echo "Master node: $MASTER_NODE"
57
+ echo "All nodes: $NODELIST"
58
+ echo "World size: $WORLD_SIZE"
59
+
60
+ # Launch the training script using srun
61
+ srun torchrun \
62
+ --nnodes=$NNODES \
63
+ --nproc_per_node=$GPUS_PER_NODE \
64
+ --rdzv_id=$SLURM_JOB_ID \
65
+ --rdzv_backend=c10d \
66
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
67
+ run_train.py \
68
+ --config-file benchmark/configs/config_3.27G_dp8_tp2_pp32_acc1_mbs1_seq2048_zero0_tpmodeRED_l28_h3072_heads24.yaml
scripts/run_3.56G_dp32_tp4_pp1_acc8_mbs1_seq2048_zero0_l28_h3072_heads24.sh ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_3.56G_dp32_tp4_pp1_acc8_mbs1_seq2048_zero0_l28_h3072_heads24 # Job name
4
+ #SBATCH --time=00:15:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --qos=high
7
+
8
+ #SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
9
+
10
+ #SBATCH --nodes=16 # Number of nodes (modify as needed)
11
+ #SBATCH --ntasks-per-node=1 # Number of tasks per node
12
+ #SBATCH --cpus-per-task=60 # CPU cores per task
13
+ #SBATCH --gres=gpu:8 # Number of GPUs per node
14
+ #SBATCH --exclusive # Exclusive use of nodes
15
+
16
+ set -x -e
17
+
18
+ # Load any necessary modules for your system
19
+ source /etc/profile.d/modules.sh # for some reason module isn't loaded
20
+ module load cuda/12.1
21
+
22
+ # Activate your conda environment if needed
23
+ source /fsx/nouamane/miniconda/bin/activate
24
+ conda activate 2-1-cu121
25
+ export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
26
+
27
+ # Get the node names from SLURM
28
+ export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
29
+ export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
30
+ export MASTER_PORT=12356
31
+
32
+ # Calculate total number of processes
33
+ export NNODES=$SLURM_NNODES
34
+ export GPUS_PER_NODE=8
35
+ export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
36
+
37
+ # Set some environment variables for better distributed training
38
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
39
+ # export NCCL_DEBUG=INFO
40
+
41
+ # Nanotron specific
42
+ export NANOTRON_BENCHMARK=1
43
+
44
+ # Print some debugging information
45
+ echo "Master node: $MASTER_NODE"
46
+ echo "All nodes: $NODELIST"
47
+ echo "World size: $WORLD_SIZE"
48
+
49
+ # Launch the training script using srun
50
+ srun torchrun \
51
+ --nnodes=$NNODES \
52
+ --nproc_per_node=$GPUS_PER_NODE \
53
+ --rdzv_id=$SLURM_JOB_ID \
54
+ --rdzv_backend=c10d \
55
+ --rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
56
+ run_train.py \
57
+ --config-file benchmark/configs/config_3.56G_dp32_tp4_pp1_acc8_mbs1_seq2048_zero0_l28_h3072_heads24.yaml