Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- scripts/run_1.14G_dp128_tp1_pp1_acc4_mbs1_seq2048_zero1_tpmodeRED_vocab32k.sh +68 -0
- scripts/run_1.14G_dp128_tp1_pp2_acc4_mbs1_seq2048_zero1_tpmodeRED_vocab32k.sh +68 -0
- scripts/run_1.14G_dp128_tp2_pp1_acc2_mbs2_seq8192_zero1_tpmodeRED_vocab32k.sh +68 -0
- scripts/run_1.14G_dp16_tp2_pp1_acc1_mbs128_seq2048_zero1_tpmodeALL_vocab32k.sh +68 -0
- scripts/run_1.14G_dp16_tp2_pp1_acc32_mbs4_seq2048_zero1_tpmodeALL_vocab32k.sh +68 -0
- scripts/run_1.14G_dp16_tp32_pp1_acc8_mbs1_seq32768_zero1_tpmodeRED_vocab32k.sh +68 -0
- scripts/run_1.14G_dp16_tp8_pp1_acc2_mbs16_seq8192_zero1_tpmodeRED_vocab32k.sh +68 -0
- scripts/run_1.14G_dp1_tp8_pp2_acc1_mbs64_seq2048_zero0_tpmodeRED_vocab32k.sh +68 -0
- scripts/run_1.14G_dp2_tp256_pp1_acc8_mbs128_seq2048_zero1_tpmodeRED_vocab32k.sh +68 -0
- scripts/run_1.14G_dp2_tp256_pp1_acc8_mbs2_seq32768_zero1_tpmodeALL_vocab32k.sh +68 -0
- scripts/run_1.14G_dp2_tp4_pp1_acc16_mbs4_seq32768_zero1_tpmodeRED_vocab32k.sh +68 -0
- scripts/run_1.14G_dp2_tp64_pp1_acc1_mbs64_seq32768_zero1_tpmodeRED_vocab32k.sh +68 -0
- scripts/run_1.14G_dp2_tp64_pp1_acc256_mbs1_seq2048_zero1_tpmodeALL_vocab32k.sh +68 -0
- scripts/run_1.14G_dp2_tp8_pp1_acc64_mbs4_seq8192_zero1_tpmodeALL_vocab32k.sh +68 -0
- scripts/run_1.14G_dp32_tp8_pp1_acc1_mbs4_seq32768_zero1_tpmodeALL_vocab32k.sh +68 -0
- scripts/run_1.14G_dp32_tp8_pp1_acc2_mbs32_seq2048_zero1_tpmodeRED_vocab32k.sh +68 -0
- scripts/run_1.14G_dp4_tp1_pp2_acc8_mbs16_seq2048_zero1_tpmodeRED_vocab32k.sh +68 -0
- scripts/run_1.14G_dp4_tp2_pp1_acc2_mbs64_seq2048_zero1_tpmodeALL_vocab32k.sh +68 -0
- scripts/run_1.14G_dp4_tp64_pp1_acc16_mbs2_seq32768_zero1_tpmodeALL_vocab32k.sh +68 -0
- scripts/run_1.14G_dp4_tp64_pp1_acc1_mbs128_seq8192_zero1_tpmodeALL_vocab32k.sh +68 -0
- scripts/run_1.14G_dp4_tp64_pp1_acc256_mbs2_seq2048_zero1_tpmodeALL_vocab32k.sh +68 -0
- scripts/run_1.14G_dp4_tp8_pp1_acc1_mbs128_seq2048_zero1_tpmodeALL_vocab32k.sh +68 -0
- scripts/run_1.14G_dp8_tp16_pp1_acc64_mbs1_seq2048_zero1_tpmodeALL_vocab32k.sh +68 -0
- scripts/run_1.14G_dp8_tp4_pp1_acc8_mbs2_seq8192_zero1_tpmodeRED_vocab32k.sh +68 -0
- scripts/run_1.34G_dp128_tp4_pp1_acc2_mbs8_seq2048_zero1_tpmodeRED_vocab131k.sh +68 -0
- scripts/run_1.34G_dp16_tp16_pp1_acc8_mbs16_seq2048_zero1_tpmodeRED_vocab131k.sh +68 -0
- scripts/run_1.34G_dp16_tp1_pp1_acc2_mbs16_seq8192_zero1_tpmodeRED_vocab131k.sh +68 -0
- scripts/run_1.34G_dp16_tp2_pp1_acc1_mbs8_seq32768_zero1_tpmodeALL_vocab131k.sh +68 -0
- scripts/run_1.34G_dp16_tp2_pp4_acc8_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh +159 -0
- scripts/run_1.34G_dp16_tp32_pp1_acc2_mbs64_seq2048_zero1_tpmodeALL_vocab131k.sh +68 -0
- scripts/run_1.34G_dp16_tp8_pp1_acc16_mbs8_seq2048_zero1_tpmodeALL_vocab131k.sh +68 -0
- scripts/run_1.34G_dp2_tp1_pp8_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh +159 -0
- scripts/run_1.34G_dp2_tp256_pp1_acc16_mbs1_seq32768_zero1_tpmodeALL_vocab131k.sh +68 -0
- scripts/run_1.34G_dp2_tp256_pp1_acc8_mbs32_seq2048_zero1_tpmodeRED_vocab131k.sh +68 -0
- scripts/run_1.34G_dp2_tp64_pp1_acc1_mbs64_seq32768_zero1_tpmodeRED_vocab131k.sh +68 -0
- scripts/run_1.34G_dp32_tp2_pp1_acc4_mbs16_seq2048_zero1_tpmodeRED_vocab131k.sh +68 -0
- scripts/run_1.34G_dp32_tp4_pp1_acc8_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh +124 -0
- scripts/run_1.34G_dp4_tp16_pp1_acc32_mbs16_seq2048_zero1_tpmodeRED_vocab131k.sh +68 -0
- scripts/run_1.34G_dp4_tp2_pp1_acc4_mbs32_seq8192_zero1_tpmodeRED_vocab131k.sh +68 -0
- scripts/run_1.34G_dp4_tp8_pp1_acc128_mbs4_seq2048_zero1_tpmodeRED_vocab131k.sh +68 -0
- scripts/run_1.34G_dp64_tp1_pp2_acc4_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh +159 -0
- scripts/run_1.34G_dp64_tp8_pp1_acc1_mbs8_seq2048_zero1_tpmodeRED_vocab131k.sh +68 -0
- scripts/run_1.34G_dp8_tp32_pp1_acc32_mbs2_seq2048_zero1_tpmodeRED_vocab131k.sh +68 -0
- scripts/run_187G_dp16_tp4_pp8_acc1_mbs1_seq2048_zero0_tpmodeRED_l126_h16384_heads128.sh +68 -0
- scripts/run_187G_dp8_tp2_pp32_acc1_mbs1_seq2048_zero0_tpmodeRED_l126_h16384_heads128.sh +68 -0
- scripts/run_3.57G_dp1_tp16_pp1_acc1_mbs40_seq4096_zero0_tpmodeRED_vocab131k_cache.sh +161 -0
- scripts/run_3.57G_dp1_tp1_pp8_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k_cache.sh +161 -0
- scripts/run_3.57G_dp1_tp2_pp8_acc16_mbs16_seq4096_zero0_tpmodeRED_vocab131k.sh +73 -0
- scripts/run_3.57G_dp2_tp16_pp8_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh +159 -0
- scripts/run_3.57G_dp4_tp16_pp4_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh +159 -0
scripts/run_1.14G_dp128_tp1_pp1_acc4_mbs1_seq2048_zero1_tpmodeRED_vocab32k.sh
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=bench_1.14G_dp128_tp1_pp1_acc4_mbs1_seq2048_zero1_tpmodeRED_vocab32k # Job name
|
4 |
+
#SBATCH --time=00:02:00
|
5 |
+
#SBATCH --partition=hopper-prod
|
6 |
+
#SBATCH --qos=high
|
7 |
+
|
8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
9 |
+
|
10 |
+
#SBATCH --nodes=16 # Number of nodes (modify as needed)
|
11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
15 |
+
|
16 |
+
set -x -e
|
17 |
+
|
18 |
+
# Load any necessary modules for your system
|
19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
20 |
+
module load cuda/12.1
|
21 |
+
|
22 |
+
# Activate your conda environment if needed
|
23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
24 |
+
conda activate 2-1-cu121
|
25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
26 |
+
|
27 |
+
# Get the node names from SLURM
|
28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
30 |
+
export MASTER_PORT=12356
|
31 |
+
|
32 |
+
# Calculate total number of processes
|
33 |
+
export NNODES=$SLURM_NNODES
|
34 |
+
export GPUS_PER_NODE=8
|
35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
36 |
+
|
37 |
+
# Set some environment variables for better distributed training
|
38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
39 |
+
# export NCCL_DEBUG=INFO
|
40 |
+
|
41 |
+
# Nanotron specific
|
42 |
+
export NANOTRON_BENCHMARK=1
|
43 |
+
|
44 |
+
# # Disable EFA by changing the provider to tcp
|
45 |
+
# export FI_PROVIDER=tcp
|
46 |
+
|
47 |
+
# # Optionally, you can also unset these EFA-related variables
|
48 |
+
# unset FI_EFA_FORK_SAFE
|
49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
50 |
+
|
51 |
+
# # If you want to ensure NCCL uses TCP
|
52 |
+
# export NCCL_IB_DISABLE=1
|
53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
54 |
+
|
55 |
+
# Print some debugging information
|
56 |
+
echo "Master node: $MASTER_NODE"
|
57 |
+
echo "All nodes: $NODELIST"
|
58 |
+
echo "World size: $WORLD_SIZE"
|
59 |
+
|
60 |
+
# Launch the training script using srun
|
61 |
+
srun torchrun \
|
62 |
+
--nnodes=$NNODES \
|
63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
65 |
+
--rdzv_backend=c10d \
|
66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
67 |
+
run_train.py \
|
68 |
+
--config-file benchmark/configs/config_1.14G_dp128_tp1_pp1_acc4_mbs1_seq2048_zero1_tpmodeRED_vocab32k.yaml
|
scripts/run_1.14G_dp128_tp1_pp2_acc4_mbs1_seq2048_zero1_tpmodeRED_vocab32k.sh
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=bench_1.14G_dp128_tp1_pp2_acc4_mbs1_seq2048_zero1_tpmodeRED_vocab32k # Job name
|
4 |
+
#SBATCH --time=00:02:00
|
5 |
+
#SBATCH --partition=hopper-prod
|
6 |
+
#SBATCH --qos=high
|
7 |
+
|
8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
9 |
+
|
10 |
+
#SBATCH --nodes=32 # Number of nodes (modify as needed)
|
11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
15 |
+
|
16 |
+
set -x -e
|
17 |
+
|
18 |
+
# Load any necessary modules for your system
|
19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
20 |
+
module load cuda/12.1
|
21 |
+
|
22 |
+
# Activate your conda environment if needed
|
23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
24 |
+
conda activate 2-1-cu121
|
25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
26 |
+
|
27 |
+
# Get the node names from SLURM
|
28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
30 |
+
export MASTER_PORT=12356
|
31 |
+
|
32 |
+
# Calculate total number of processes
|
33 |
+
export NNODES=$SLURM_NNODES
|
34 |
+
export GPUS_PER_NODE=8
|
35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
36 |
+
|
37 |
+
# Set some environment variables for better distributed training
|
38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
39 |
+
# export NCCL_DEBUG=INFO
|
40 |
+
|
41 |
+
# Nanotron specific
|
42 |
+
export NANOTRON_BENCHMARK=1
|
43 |
+
|
44 |
+
# # Disable EFA by changing the provider to tcp
|
45 |
+
# export FI_PROVIDER=tcp
|
46 |
+
|
47 |
+
# # Optionally, you can also unset these EFA-related variables
|
48 |
+
# unset FI_EFA_FORK_SAFE
|
49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
50 |
+
|
51 |
+
# # If you want to ensure NCCL uses TCP
|
52 |
+
# export NCCL_IB_DISABLE=1
|
53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
54 |
+
|
55 |
+
# Print some debugging information
|
56 |
+
echo "Master node: $MASTER_NODE"
|
57 |
+
echo "All nodes: $NODELIST"
|
58 |
+
echo "World size: $WORLD_SIZE"
|
59 |
+
|
60 |
+
# Launch the training script using srun
|
61 |
+
srun torchrun \
|
62 |
+
--nnodes=$NNODES \
|
63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
65 |
+
--rdzv_backend=c10d \
|
66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
67 |
+
run_train.py \
|
68 |
+
--config-file benchmark/configs/config_1.14G_dp128_tp1_pp2_acc4_mbs1_seq2048_zero1_tpmodeRED_vocab32k.yaml
|
scripts/run_1.14G_dp128_tp2_pp1_acc2_mbs2_seq8192_zero1_tpmodeRED_vocab32k.sh
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=bench_1.14G_dp128_tp2_pp1_acc2_mbs2_seq8192_zero1_tpmodeRED_vocab32k # Job name
|
4 |
+
#SBATCH --time=00:02:00
|
5 |
+
#SBATCH --partition=hopper-prod
|
6 |
+
#SBATCH --qos=high
|
7 |
+
|
8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
9 |
+
|
10 |
+
#SBATCH --nodes=32 # Number of nodes (modify as needed)
|
11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
15 |
+
|
16 |
+
set -x -e
|
17 |
+
|
18 |
+
# Load any necessary modules for your system
|
19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
20 |
+
module load cuda/12.1
|
21 |
+
|
22 |
+
# Activate your conda environment if needed
|
23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
24 |
+
conda activate 2-1-cu121
|
25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
26 |
+
|
27 |
+
# Get the node names from SLURM
|
28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
30 |
+
export MASTER_PORT=12356
|
31 |
+
|
32 |
+
# Calculate total number of processes
|
33 |
+
export NNODES=$SLURM_NNODES
|
34 |
+
export GPUS_PER_NODE=8
|
35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
36 |
+
|
37 |
+
# Set some environment variables for better distributed training
|
38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
39 |
+
# export NCCL_DEBUG=INFO
|
40 |
+
|
41 |
+
# Nanotron specific
|
42 |
+
export NANOTRON_BENCHMARK=1
|
43 |
+
|
44 |
+
# # Disable EFA by changing the provider to tcp
|
45 |
+
# export FI_PROVIDER=tcp
|
46 |
+
|
47 |
+
# # Optionally, you can also unset these EFA-related variables
|
48 |
+
# unset FI_EFA_FORK_SAFE
|
49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
50 |
+
|
51 |
+
# # If you want to ensure NCCL uses TCP
|
52 |
+
# export NCCL_IB_DISABLE=1
|
53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
54 |
+
|
55 |
+
# Print some debugging information
|
56 |
+
echo "Master node: $MASTER_NODE"
|
57 |
+
echo "All nodes: $NODELIST"
|
58 |
+
echo "World size: $WORLD_SIZE"
|
59 |
+
|
60 |
+
# Launch the training script using srun
|
61 |
+
srun torchrun \
|
62 |
+
--nnodes=$NNODES \
|
63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
65 |
+
--rdzv_backend=c10d \
|
66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
67 |
+
run_train.py \
|
68 |
+
--config-file benchmark/configs/config_1.14G_dp128_tp2_pp1_acc2_mbs2_seq8192_zero1_tpmodeRED_vocab32k.yaml
|
scripts/run_1.14G_dp16_tp2_pp1_acc1_mbs128_seq2048_zero1_tpmodeALL_vocab32k.sh
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=bench_1.14G_dp16_tp2_pp1_acc1_mbs128_seq2048_zero1_tpmodeALL_vocab32k # Job name
|
4 |
+
#SBATCH --time=00:02:00
|
5 |
+
#SBATCH --partition=hopper-prod
|
6 |
+
#SBATCH --qos=high
|
7 |
+
|
8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
9 |
+
|
10 |
+
#SBATCH --nodes=4 # Number of nodes (modify as needed)
|
11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
15 |
+
|
16 |
+
set -x -e
|
17 |
+
|
18 |
+
# Load any necessary modules for your system
|
19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
20 |
+
module load cuda/12.1
|
21 |
+
|
22 |
+
# Activate your conda environment if needed
|
23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
24 |
+
conda activate 2-1-cu121
|
25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
26 |
+
|
27 |
+
# Get the node names from SLURM
|
28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
30 |
+
export MASTER_PORT=12356
|
31 |
+
|
32 |
+
# Calculate total number of processes
|
33 |
+
export NNODES=$SLURM_NNODES
|
34 |
+
export GPUS_PER_NODE=8
|
35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
36 |
+
|
37 |
+
# Set some environment variables for better distributed training
|
38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
39 |
+
# export NCCL_DEBUG=INFO
|
40 |
+
|
41 |
+
# Nanotron specific
|
42 |
+
export NANOTRON_BENCHMARK=1
|
43 |
+
|
44 |
+
# # Disable EFA by changing the provider to tcp
|
45 |
+
# export FI_PROVIDER=tcp
|
46 |
+
|
47 |
+
# # Optionally, you can also unset these EFA-related variables
|
48 |
+
# unset FI_EFA_FORK_SAFE
|
49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
50 |
+
|
51 |
+
# # If you want to ensure NCCL uses TCP
|
52 |
+
# export NCCL_IB_DISABLE=1
|
53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
54 |
+
|
55 |
+
# Print some debugging information
|
56 |
+
echo "Master node: $MASTER_NODE"
|
57 |
+
echo "All nodes: $NODELIST"
|
58 |
+
echo "World size: $WORLD_SIZE"
|
59 |
+
|
60 |
+
# Launch the training script using srun
|
61 |
+
srun torchrun \
|
62 |
+
--nnodes=$NNODES \
|
63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
65 |
+
--rdzv_backend=c10d \
|
66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
67 |
+
run_train.py \
|
68 |
+
--config-file benchmark/configs/config_1.14G_dp16_tp2_pp1_acc1_mbs128_seq2048_zero1_tpmodeALL_vocab32k.yaml
|
scripts/run_1.14G_dp16_tp2_pp1_acc32_mbs4_seq2048_zero1_tpmodeALL_vocab32k.sh
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=bench_1.14G_dp16_tp2_pp1_acc32_mbs4_seq2048_zero1_tpmodeALL_vocab32k # Job name
|
4 |
+
#SBATCH --time=00:02:00
|
5 |
+
#SBATCH --partition=hopper-prod
|
6 |
+
#SBATCH --qos=high
|
7 |
+
|
8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
9 |
+
|
10 |
+
#SBATCH --nodes=4 # Number of nodes (modify as needed)
|
11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
15 |
+
|
16 |
+
set -x -e
|
17 |
+
|
18 |
+
# Load any necessary modules for your system
|
19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
20 |
+
module load cuda/12.1
|
21 |
+
|
22 |
+
# Activate your conda environment if needed
|
23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
24 |
+
conda activate 2-1-cu121
|
25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
26 |
+
|
27 |
+
# Get the node names from SLURM
|
28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
30 |
+
export MASTER_PORT=12356
|
31 |
+
|
32 |
+
# Calculate total number of processes
|
33 |
+
export NNODES=$SLURM_NNODES
|
34 |
+
export GPUS_PER_NODE=8
|
35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
36 |
+
|
37 |
+
# Set some environment variables for better distributed training
|
38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
39 |
+
# export NCCL_DEBUG=INFO
|
40 |
+
|
41 |
+
# Nanotron specific
|
42 |
+
export NANOTRON_BENCHMARK=1
|
43 |
+
|
44 |
+
# # Disable EFA by changing the provider to tcp
|
45 |
+
# export FI_PROVIDER=tcp
|
46 |
+
|
47 |
+
# # Optionally, you can also unset these EFA-related variables
|
48 |
+
# unset FI_EFA_FORK_SAFE
|
49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
50 |
+
|
51 |
+
# # If you want to ensure NCCL uses TCP
|
52 |
+
# export NCCL_IB_DISABLE=1
|
53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
54 |
+
|
55 |
+
# Print some debugging information
|
56 |
+
echo "Master node: $MASTER_NODE"
|
57 |
+
echo "All nodes: $NODELIST"
|
58 |
+
echo "World size: $WORLD_SIZE"
|
59 |
+
|
60 |
+
# Launch the training script using srun
|
61 |
+
srun torchrun \
|
62 |
+
--nnodes=$NNODES \
|
63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
65 |
+
--rdzv_backend=c10d \
|
66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
67 |
+
run_train.py \
|
68 |
+
--config-file benchmark/configs/config_1.14G_dp16_tp2_pp1_acc32_mbs4_seq2048_zero1_tpmodeALL_vocab32k.yaml
|
scripts/run_1.14G_dp16_tp32_pp1_acc8_mbs1_seq32768_zero1_tpmodeRED_vocab32k.sh
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=bench_1.14G_dp16_tp32_pp1_acc8_mbs1_seq32768_zero1_tpmodeRED_vocab32k # Job name
|
4 |
+
#SBATCH --time=00:02:00
|
5 |
+
#SBATCH --partition=hopper-prod
|
6 |
+
#SBATCH --qos=high
|
7 |
+
|
8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
9 |
+
|
10 |
+
#SBATCH --nodes=64 # Number of nodes (modify as needed)
|
11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
15 |
+
|
16 |
+
set -x -e
|
17 |
+
|
18 |
+
# Load any necessary modules for your system
|
19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
20 |
+
module load cuda/12.1
|
21 |
+
|
22 |
+
# Activate your conda environment if needed
|
23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
24 |
+
conda activate 2-1-cu121
|
25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
26 |
+
|
27 |
+
# Get the node names from SLURM
|
28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
30 |
+
export MASTER_PORT=12356
|
31 |
+
|
32 |
+
# Calculate total number of processes
|
33 |
+
export NNODES=$SLURM_NNODES
|
34 |
+
export GPUS_PER_NODE=8
|
35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
36 |
+
|
37 |
+
# Set some environment variables for better distributed training
|
38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
39 |
+
# export NCCL_DEBUG=INFO
|
40 |
+
|
41 |
+
# Nanotron specific
|
42 |
+
export NANOTRON_BENCHMARK=1
|
43 |
+
|
44 |
+
# # Disable EFA by changing the provider to tcp
|
45 |
+
# export FI_PROVIDER=tcp
|
46 |
+
|
47 |
+
# # Optionally, you can also unset these EFA-related variables
|
48 |
+
# unset FI_EFA_FORK_SAFE
|
49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
50 |
+
|
51 |
+
# # If you want to ensure NCCL uses TCP
|
52 |
+
# export NCCL_IB_DISABLE=1
|
53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
54 |
+
|
55 |
+
# Print some debugging information
|
56 |
+
echo "Master node: $MASTER_NODE"
|
57 |
+
echo "All nodes: $NODELIST"
|
58 |
+
echo "World size: $WORLD_SIZE"
|
59 |
+
|
60 |
+
# Launch the training script using srun
|
61 |
+
srun torchrun \
|
62 |
+
--nnodes=$NNODES \
|
63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
65 |
+
--rdzv_backend=c10d \
|
66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
67 |
+
run_train.py \
|
68 |
+
--config-file benchmark/configs/config_1.14G_dp16_tp32_pp1_acc8_mbs1_seq32768_zero1_tpmodeRED_vocab32k.yaml
|
scripts/run_1.14G_dp16_tp8_pp1_acc2_mbs16_seq8192_zero1_tpmodeRED_vocab32k.sh
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=bench_1.14G_dp16_tp8_pp1_acc2_mbs16_seq8192_zero1_tpmodeRED_vocab32k # Job name
|
4 |
+
#SBATCH --time=00:02:00
|
5 |
+
#SBATCH --partition=hopper-prod
|
6 |
+
#SBATCH --qos=high
|
7 |
+
|
8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
9 |
+
|
10 |
+
#SBATCH --nodes=16 # Number of nodes (modify as needed)
|
11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
15 |
+
|
16 |
+
set -x -e
|
17 |
+
|
18 |
+
# Load any necessary modules for your system
|
19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
20 |
+
module load cuda/12.1
|
21 |
+
|
22 |
+
# Activate your conda environment if needed
|
23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
24 |
+
conda activate 2-1-cu121
|
25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
26 |
+
|
27 |
+
# Get the node names from SLURM
|
28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
30 |
+
export MASTER_PORT=12356
|
31 |
+
|
32 |
+
# Calculate total number of processes
|
33 |
+
export NNODES=$SLURM_NNODES
|
34 |
+
export GPUS_PER_NODE=8
|
35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
36 |
+
|
37 |
+
# Set some environment variables for better distributed training
|
38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
39 |
+
# export NCCL_DEBUG=INFO
|
40 |
+
|
41 |
+
# Nanotron specific
|
42 |
+
export NANOTRON_BENCHMARK=1
|
43 |
+
|
44 |
+
# # Disable EFA by changing the provider to tcp
|
45 |
+
# export FI_PROVIDER=tcp
|
46 |
+
|
47 |
+
# # Optionally, you can also unset these EFA-related variables
|
48 |
+
# unset FI_EFA_FORK_SAFE
|
49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
50 |
+
|
51 |
+
# # If you want to ensure NCCL uses TCP
|
52 |
+
# export NCCL_IB_DISABLE=1
|
53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
54 |
+
|
55 |
+
# Print some debugging information
|
56 |
+
echo "Master node: $MASTER_NODE"
|
57 |
+
echo "All nodes: $NODELIST"
|
58 |
+
echo "World size: $WORLD_SIZE"
|
59 |
+
|
60 |
+
# Launch the training script using srun
|
61 |
+
srun torchrun \
|
62 |
+
--nnodes=$NNODES \
|
63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
65 |
+
--rdzv_backend=c10d \
|
66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
67 |
+
run_train.py \
|
68 |
+
--config-file benchmark/configs/config_1.14G_dp16_tp8_pp1_acc2_mbs16_seq8192_zero1_tpmodeRED_vocab32k.yaml
|
scripts/run_1.14G_dp1_tp8_pp2_acc1_mbs64_seq2048_zero0_tpmodeRED_vocab32k.sh
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=bench_1.14G_dp1_tp8_pp2_acc1_mbs64_seq2048_zero0_tpmodeRED_vocab32k # Job name
|
4 |
+
#SBATCH --time=00:02:00
|
5 |
+
#SBATCH --partition=hopper-prod
|
6 |
+
#SBATCH --qos=high
|
7 |
+
|
8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
9 |
+
|
10 |
+
#SBATCH --nodes=2 # Number of nodes (modify as needed)
|
11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
15 |
+
|
16 |
+
set -x -e
|
17 |
+
|
18 |
+
# Load any necessary modules for your system
|
19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
20 |
+
module load cuda/12.1
|
21 |
+
|
22 |
+
# Activate your conda environment if needed
|
23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
24 |
+
conda activate 2-1-cu121
|
25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
26 |
+
|
27 |
+
# Get the node names from SLURM
|
28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
30 |
+
export MASTER_PORT=12356
|
31 |
+
|
32 |
+
# Calculate total number of processes
|
33 |
+
export NNODES=$SLURM_NNODES
|
34 |
+
export GPUS_PER_NODE=8
|
35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
36 |
+
|
37 |
+
# Set some environment variables for better distributed training
|
38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
39 |
+
# export NCCL_DEBUG=INFO
|
40 |
+
|
41 |
+
# Nanotron specific
|
42 |
+
export NANOTRON_BENCHMARK=1
|
43 |
+
|
44 |
+
# # Disable EFA by changing the provider to tcp
|
45 |
+
# export FI_PROVIDER=tcp
|
46 |
+
|
47 |
+
# # Optionally, you can also unset these EFA-related variables
|
48 |
+
# unset FI_EFA_FORK_SAFE
|
49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
50 |
+
|
51 |
+
# # If you want to ensure NCCL uses TCP
|
52 |
+
# export NCCL_IB_DISABLE=1
|
53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
54 |
+
|
55 |
+
# Print some debugging information
|
56 |
+
echo "Master node: $MASTER_NODE"
|
57 |
+
echo "All nodes: $NODELIST"
|
58 |
+
echo "World size: $WORLD_SIZE"
|
59 |
+
|
60 |
+
# Launch the training script using srun
|
61 |
+
srun torchrun \
|
62 |
+
--nnodes=$NNODES \
|
63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
65 |
+
--rdzv_backend=c10d \
|
66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
67 |
+
run_train.py \
|
68 |
+
--config-file benchmark/configs/config_1.14G_dp1_tp8_pp2_acc1_mbs64_seq2048_zero0_tpmodeRED_vocab32k.yaml
|
scripts/run_1.14G_dp2_tp256_pp1_acc8_mbs128_seq2048_zero1_tpmodeRED_vocab32k.sh
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=bench_1.14G_dp2_tp256_pp1_acc8_mbs128_seq2048_zero1_tpmodeRED_vocab32k # Job name
|
4 |
+
#SBATCH --time=00:02:00
|
5 |
+
#SBATCH --partition=hopper-prod
|
6 |
+
#SBATCH --qos=high
|
7 |
+
|
8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
9 |
+
|
10 |
+
#SBATCH --nodes=64 # Number of nodes (modify as needed)
|
11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
15 |
+
|
16 |
+
set -x -e
|
17 |
+
|
18 |
+
# Load any necessary modules for your system
|
19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
20 |
+
module load cuda/12.1
|
21 |
+
|
22 |
+
# Activate your conda environment if needed
|
23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
24 |
+
conda activate 2-1-cu121
|
25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
26 |
+
|
27 |
+
# Get the node names from SLURM
|
28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
30 |
+
export MASTER_PORT=12356
|
31 |
+
|
32 |
+
# Calculate total number of processes
|
33 |
+
export NNODES=$SLURM_NNODES
|
34 |
+
export GPUS_PER_NODE=8
|
35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
36 |
+
|
37 |
+
# Set some environment variables for better distributed training
|
38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
39 |
+
# export NCCL_DEBUG=INFO
|
40 |
+
|
41 |
+
# Nanotron specific
|
42 |
+
export NANOTRON_BENCHMARK=1
|
43 |
+
|
44 |
+
# # Disable EFA by changing the provider to tcp
|
45 |
+
# export FI_PROVIDER=tcp
|
46 |
+
|
47 |
+
# # Optionally, you can also unset these EFA-related variables
|
48 |
+
# unset FI_EFA_FORK_SAFE
|
49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
50 |
+
|
51 |
+
# # If you want to ensure NCCL uses TCP
|
52 |
+
# export NCCL_IB_DISABLE=1
|
53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
54 |
+
|
55 |
+
# Print some debugging information
|
56 |
+
echo "Master node: $MASTER_NODE"
|
57 |
+
echo "All nodes: $NODELIST"
|
58 |
+
echo "World size: $WORLD_SIZE"
|
59 |
+
|
60 |
+
# Launch the training script using srun
|
61 |
+
srun torchrun \
|
62 |
+
--nnodes=$NNODES \
|
63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
65 |
+
--rdzv_backend=c10d \
|
66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
67 |
+
run_train.py \
|
68 |
+
--config-file benchmark/configs/config_1.14G_dp2_tp256_pp1_acc8_mbs128_seq2048_zero1_tpmodeRED_vocab32k.yaml
|
scripts/run_1.14G_dp2_tp256_pp1_acc8_mbs2_seq32768_zero1_tpmodeALL_vocab32k.sh
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=bench_1.14G_dp2_tp256_pp1_acc8_mbs2_seq32768_zero1_tpmodeALL_vocab32k # Job name
|
4 |
+
#SBATCH --time=00:02:00
|
5 |
+
#SBATCH --partition=hopper-prod
|
6 |
+
#SBATCH --qos=high
|
7 |
+
|
8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
9 |
+
|
10 |
+
#SBATCH --nodes=64 # Number of nodes (modify as needed)
|
11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
15 |
+
|
16 |
+
set -x -e
|
17 |
+
|
18 |
+
# Load any necessary modules for your system
|
19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
20 |
+
module load cuda/12.1
|
21 |
+
|
22 |
+
# Activate your conda environment if needed
|
23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
24 |
+
conda activate 2-1-cu121
|
25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
26 |
+
|
27 |
+
# Get the node names from SLURM
|
28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
30 |
+
export MASTER_PORT=12356
|
31 |
+
|
32 |
+
# Calculate total number of processes
|
33 |
+
export NNODES=$SLURM_NNODES
|
34 |
+
export GPUS_PER_NODE=8
|
35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
36 |
+
|
37 |
+
# Set some environment variables for better distributed training
|
38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
39 |
+
# export NCCL_DEBUG=INFO
|
40 |
+
|
41 |
+
# Nanotron specific
|
42 |
+
export NANOTRON_BENCHMARK=1
|
43 |
+
|
44 |
+
# # Disable EFA by changing the provider to tcp
|
45 |
+
# export FI_PROVIDER=tcp
|
46 |
+
|
47 |
+
# # Optionally, you can also unset these EFA-related variables
|
48 |
+
# unset FI_EFA_FORK_SAFE
|
49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
50 |
+
|
51 |
+
# # If you want to ensure NCCL uses TCP
|
52 |
+
# export NCCL_IB_DISABLE=1
|
53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
54 |
+
|
55 |
+
# Print some debugging information
|
56 |
+
echo "Master node: $MASTER_NODE"
|
57 |
+
echo "All nodes: $NODELIST"
|
58 |
+
echo "World size: $WORLD_SIZE"
|
59 |
+
|
60 |
+
# Launch the training script using srun
|
61 |
+
srun torchrun \
|
62 |
+
--nnodes=$NNODES \
|
63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
65 |
+
--rdzv_backend=c10d \
|
66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
67 |
+
run_train.py \
|
68 |
+
--config-file benchmark/configs/config_1.14G_dp2_tp256_pp1_acc8_mbs2_seq32768_zero1_tpmodeALL_vocab32k.yaml
|
scripts/run_1.14G_dp2_tp4_pp1_acc16_mbs4_seq32768_zero1_tpmodeRED_vocab32k.sh
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=bench_1.14G_dp2_tp4_pp1_acc16_mbs4_seq32768_zero1_tpmodeRED_vocab32k # Job name
|
4 |
+
#SBATCH --time=00:02:00
|
5 |
+
#SBATCH --partition=hopper-prod
|
6 |
+
#SBATCH --qos=high
|
7 |
+
|
8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
9 |
+
|
10 |
+
#SBATCH --nodes=1 # Number of nodes (modify as needed)
|
11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
15 |
+
|
16 |
+
set -x -e
|
17 |
+
|
18 |
+
# Load any necessary modules for your system
|
19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
20 |
+
module load cuda/12.1
|
21 |
+
|
22 |
+
# Activate your conda environment if needed
|
23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
24 |
+
conda activate 2-1-cu121
|
25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
26 |
+
|
27 |
+
# Get the node names from SLURM
|
28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
30 |
+
export MASTER_PORT=12356
|
31 |
+
|
32 |
+
# Calculate total number of processes
|
33 |
+
export NNODES=$SLURM_NNODES
|
34 |
+
export GPUS_PER_NODE=8
|
35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
36 |
+
|
37 |
+
# Set some environment variables for better distributed training
|
38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
39 |
+
# export NCCL_DEBUG=INFO
|
40 |
+
|
41 |
+
# Nanotron specific
|
42 |
+
export NANOTRON_BENCHMARK=1
|
43 |
+
|
44 |
+
# # Disable EFA by changing the provider to tcp
|
45 |
+
# export FI_PROVIDER=tcp
|
46 |
+
|
47 |
+
# # Optionally, you can also unset these EFA-related variables
|
48 |
+
# unset FI_EFA_FORK_SAFE
|
49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
50 |
+
|
51 |
+
# # If you want to ensure NCCL uses TCP
|
52 |
+
# export NCCL_IB_DISABLE=1
|
53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
54 |
+
|
55 |
+
# Print some debugging information
|
56 |
+
echo "Master node: $MASTER_NODE"
|
57 |
+
echo "All nodes: $NODELIST"
|
58 |
+
echo "World size: $WORLD_SIZE"
|
59 |
+
|
60 |
+
# Launch the training script using srun
|
61 |
+
srun torchrun \
|
62 |
+
--nnodes=$NNODES \
|
63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
65 |
+
--rdzv_backend=c10d \
|
66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
67 |
+
run_train.py \
|
68 |
+
--config-file benchmark/configs/config_1.14G_dp2_tp4_pp1_acc16_mbs4_seq32768_zero1_tpmodeRED_vocab32k.yaml
|
scripts/run_1.14G_dp2_tp64_pp1_acc1_mbs64_seq32768_zero1_tpmodeRED_vocab32k.sh
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=bench_1.14G_dp2_tp64_pp1_acc1_mbs64_seq32768_zero1_tpmodeRED_vocab32k # Job name
|
4 |
+
#SBATCH --time=00:02:00
|
5 |
+
#SBATCH --partition=hopper-prod
|
6 |
+
#SBATCH --qos=high
|
7 |
+
|
8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
9 |
+
|
10 |
+
#SBATCH --nodes=16 # Number of nodes (modify as needed)
|
11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
15 |
+
|
16 |
+
set -x -e
|
17 |
+
|
18 |
+
# Load any necessary modules for your system
|
19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
20 |
+
module load cuda/12.1
|
21 |
+
|
22 |
+
# Activate your conda environment if needed
|
23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
24 |
+
conda activate 2-1-cu121
|
25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
26 |
+
|
27 |
+
# Get the node names from SLURM
|
28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
30 |
+
export MASTER_PORT=12356
|
31 |
+
|
32 |
+
# Calculate total number of processes
|
33 |
+
export NNODES=$SLURM_NNODES
|
34 |
+
export GPUS_PER_NODE=8
|
35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
36 |
+
|
37 |
+
# Set some environment variables for better distributed training
|
38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
39 |
+
# export NCCL_DEBUG=INFO
|
40 |
+
|
41 |
+
# Nanotron specific
|
42 |
+
export NANOTRON_BENCHMARK=1
|
43 |
+
|
44 |
+
# # Disable EFA by changing the provider to tcp
|
45 |
+
# export FI_PROVIDER=tcp
|
46 |
+
|
47 |
+
# # Optionally, you can also unset these EFA-related variables
|
48 |
+
# unset FI_EFA_FORK_SAFE
|
49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
50 |
+
|
51 |
+
# # If you want to ensure NCCL uses TCP
|
52 |
+
# export NCCL_IB_DISABLE=1
|
53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
54 |
+
|
55 |
+
# Print some debugging information
|
56 |
+
echo "Master node: $MASTER_NODE"
|
57 |
+
echo "All nodes: $NODELIST"
|
58 |
+
echo "World size: $WORLD_SIZE"
|
59 |
+
|
60 |
+
# Launch the training script using srun
|
61 |
+
srun torchrun \
|
62 |
+
--nnodes=$NNODES \
|
63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
65 |
+
--rdzv_backend=c10d \
|
66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
67 |
+
run_train.py \
|
68 |
+
--config-file benchmark/configs/config_1.14G_dp2_tp64_pp1_acc1_mbs64_seq32768_zero1_tpmodeRED_vocab32k.yaml
|
scripts/run_1.14G_dp2_tp64_pp1_acc256_mbs1_seq2048_zero1_tpmodeALL_vocab32k.sh
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=bench_1.14G_dp2_tp64_pp1_acc256_mbs1_seq2048_zero1_tpmodeALL_vocab32k # Job name
|
4 |
+
#SBATCH --time=00:02:00
|
5 |
+
#SBATCH --partition=hopper-prod
|
6 |
+
#SBATCH --qos=high
|
7 |
+
|
8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
9 |
+
|
10 |
+
#SBATCH --nodes=16 # Number of nodes (modify as needed)
|
11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
15 |
+
|
16 |
+
set -x -e
|
17 |
+
|
18 |
+
# Load any necessary modules for your system
|
19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
20 |
+
module load cuda/12.1
|
21 |
+
|
22 |
+
# Activate your conda environment if needed
|
23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
24 |
+
conda activate 2-1-cu121
|
25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
26 |
+
|
27 |
+
# Get the node names from SLURM
|
28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
30 |
+
export MASTER_PORT=12356
|
31 |
+
|
32 |
+
# Calculate total number of processes
|
33 |
+
export NNODES=$SLURM_NNODES
|
34 |
+
export GPUS_PER_NODE=8
|
35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
36 |
+
|
37 |
+
# Set some environment variables for better distributed training
|
38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
39 |
+
# export NCCL_DEBUG=INFO
|
40 |
+
|
41 |
+
# Nanotron specific
|
42 |
+
export NANOTRON_BENCHMARK=1
|
43 |
+
|
44 |
+
# # Disable EFA by changing the provider to tcp
|
45 |
+
# export FI_PROVIDER=tcp
|
46 |
+
|
47 |
+
# # Optionally, you can also unset these EFA-related variables
|
48 |
+
# unset FI_EFA_FORK_SAFE
|
49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
50 |
+
|
51 |
+
# # If you want to ensure NCCL uses TCP
|
52 |
+
# export NCCL_IB_DISABLE=1
|
53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
54 |
+
|
55 |
+
# Print some debugging information
|
56 |
+
echo "Master node: $MASTER_NODE"
|
57 |
+
echo "All nodes: $NODELIST"
|
58 |
+
echo "World size: $WORLD_SIZE"
|
59 |
+
|
60 |
+
# Launch the training script using srun
|
61 |
+
srun torchrun \
|
62 |
+
--nnodes=$NNODES \
|
63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
65 |
+
--rdzv_backend=c10d \
|
66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
67 |
+
run_train.py \
|
68 |
+
--config-file benchmark/configs/config_1.14G_dp2_tp64_pp1_acc256_mbs1_seq2048_zero1_tpmodeALL_vocab32k.yaml
|
scripts/run_1.14G_dp2_tp8_pp1_acc64_mbs4_seq8192_zero1_tpmodeALL_vocab32k.sh
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=bench_1.14G_dp2_tp8_pp1_acc64_mbs4_seq8192_zero1_tpmodeALL_vocab32k # Job name
|
4 |
+
#SBATCH --time=00:02:00
|
5 |
+
#SBATCH --partition=hopper-prod
|
6 |
+
#SBATCH --qos=high
|
7 |
+
|
8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
9 |
+
|
10 |
+
#SBATCH --nodes=2 # Number of nodes (modify as needed)
|
11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
15 |
+
|
16 |
+
set -x -e
|
17 |
+
|
18 |
+
# Load any necessary modules for your system
|
19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
20 |
+
module load cuda/12.1
|
21 |
+
|
22 |
+
# Activate your conda environment if needed
|
23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
24 |
+
conda activate 2-1-cu121
|
25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
26 |
+
|
27 |
+
# Get the node names from SLURM
|
28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
30 |
+
export MASTER_PORT=12356
|
31 |
+
|
32 |
+
# Calculate total number of processes
|
33 |
+
export NNODES=$SLURM_NNODES
|
34 |
+
export GPUS_PER_NODE=8
|
35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
36 |
+
|
37 |
+
# Set some environment variables for better distributed training
|
38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
39 |
+
# export NCCL_DEBUG=INFO
|
40 |
+
|
41 |
+
# Nanotron specific
|
42 |
+
export NANOTRON_BENCHMARK=1
|
43 |
+
|
44 |
+
# # Disable EFA by changing the provider to tcp
|
45 |
+
# export FI_PROVIDER=tcp
|
46 |
+
|
47 |
+
# # Optionally, you can also unset these EFA-related variables
|
48 |
+
# unset FI_EFA_FORK_SAFE
|
49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
50 |
+
|
51 |
+
# # If you want to ensure NCCL uses TCP
|
52 |
+
# export NCCL_IB_DISABLE=1
|
53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
54 |
+
|
55 |
+
# Print some debugging information
|
56 |
+
echo "Master node: $MASTER_NODE"
|
57 |
+
echo "All nodes: $NODELIST"
|
58 |
+
echo "World size: $WORLD_SIZE"
|
59 |
+
|
60 |
+
# Launch the training script using srun
|
61 |
+
srun torchrun \
|
62 |
+
--nnodes=$NNODES \
|
63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
65 |
+
--rdzv_backend=c10d \
|
66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
67 |
+
run_train.py \
|
68 |
+
--config-file benchmark/configs/config_1.14G_dp2_tp8_pp1_acc64_mbs4_seq8192_zero1_tpmodeALL_vocab32k.yaml
|
scripts/run_1.14G_dp32_tp8_pp1_acc1_mbs4_seq32768_zero1_tpmodeALL_vocab32k.sh
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=bench_1.14G_dp32_tp8_pp1_acc1_mbs4_seq32768_zero1_tpmodeALL_vocab32k # Job name
|
4 |
+
#SBATCH --time=00:02:00
|
5 |
+
#SBATCH --partition=hopper-prod
|
6 |
+
#SBATCH --qos=high
|
7 |
+
|
8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
9 |
+
|
10 |
+
#SBATCH --nodes=32 # Number of nodes (modify as needed)
|
11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
15 |
+
|
16 |
+
set -x -e
|
17 |
+
|
18 |
+
# Load any necessary modules for your system
|
19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
20 |
+
module load cuda/12.1
|
21 |
+
|
22 |
+
# Activate your conda environment if needed
|
23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
24 |
+
conda activate 2-1-cu121
|
25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
26 |
+
|
27 |
+
# Get the node names from SLURM
|
28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
30 |
+
export MASTER_PORT=12356
|
31 |
+
|
32 |
+
# Calculate total number of processes
|
33 |
+
export NNODES=$SLURM_NNODES
|
34 |
+
export GPUS_PER_NODE=8
|
35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
36 |
+
|
37 |
+
# Set some environment variables for better distributed training
|
38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
39 |
+
# export NCCL_DEBUG=INFO
|
40 |
+
|
41 |
+
# Nanotron specific
|
42 |
+
export NANOTRON_BENCHMARK=1
|
43 |
+
|
44 |
+
# # Disable EFA by changing the provider to tcp
|
45 |
+
# export FI_PROVIDER=tcp
|
46 |
+
|
47 |
+
# # Optionally, you can also unset these EFA-related variables
|
48 |
+
# unset FI_EFA_FORK_SAFE
|
49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
50 |
+
|
51 |
+
# # If you want to ensure NCCL uses TCP
|
52 |
+
# export NCCL_IB_DISABLE=1
|
53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
54 |
+
|
55 |
+
# Print some debugging information
|
56 |
+
echo "Master node: $MASTER_NODE"
|
57 |
+
echo "All nodes: $NODELIST"
|
58 |
+
echo "World size: $WORLD_SIZE"
|
59 |
+
|
60 |
+
# Launch the training script using srun
|
61 |
+
srun torchrun \
|
62 |
+
--nnodes=$NNODES \
|
63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
65 |
+
--rdzv_backend=c10d \
|
66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
67 |
+
run_train.py \
|
68 |
+
--config-file benchmark/configs/config_1.14G_dp32_tp8_pp1_acc1_mbs4_seq32768_zero1_tpmodeALL_vocab32k.yaml
|
scripts/run_1.14G_dp32_tp8_pp1_acc2_mbs32_seq2048_zero1_tpmodeRED_vocab32k.sh
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=bench_1.14G_dp32_tp8_pp1_acc2_mbs32_seq2048_zero1_tpmodeRED_vocab32k # Job name
|
4 |
+
#SBATCH --time=00:02:00
|
5 |
+
#SBATCH --partition=hopper-prod
|
6 |
+
#SBATCH --qos=high
|
7 |
+
|
8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
9 |
+
|
10 |
+
#SBATCH --nodes=32 # Number of nodes (modify as needed)
|
11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
15 |
+
|
16 |
+
set -x -e
|
17 |
+
|
18 |
+
# Load any necessary modules for your system
|
19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
20 |
+
module load cuda/12.1
|
21 |
+
|
22 |
+
# Activate your conda environment if needed
|
23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
24 |
+
conda activate 2-1-cu121
|
25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
26 |
+
|
27 |
+
# Get the node names from SLURM
|
28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
30 |
+
export MASTER_PORT=12356
|
31 |
+
|
32 |
+
# Calculate total number of processes
|
33 |
+
export NNODES=$SLURM_NNODES
|
34 |
+
export GPUS_PER_NODE=8
|
35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
36 |
+
|
37 |
+
# Set some environment variables for better distributed training
|
38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
39 |
+
# export NCCL_DEBUG=INFO
|
40 |
+
|
41 |
+
# Nanotron specific
|
42 |
+
export NANOTRON_BENCHMARK=1
|
43 |
+
|
44 |
+
# # Disable EFA by changing the provider to tcp
|
45 |
+
# export FI_PROVIDER=tcp
|
46 |
+
|
47 |
+
# # Optionally, you can also unset these EFA-related variables
|
48 |
+
# unset FI_EFA_FORK_SAFE
|
49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
50 |
+
|
51 |
+
# # If you want to ensure NCCL uses TCP
|
52 |
+
# export NCCL_IB_DISABLE=1
|
53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
54 |
+
|
55 |
+
# Print some debugging information
|
56 |
+
echo "Master node: $MASTER_NODE"
|
57 |
+
echo "All nodes: $NODELIST"
|
58 |
+
echo "World size: $WORLD_SIZE"
|
59 |
+
|
60 |
+
# Launch the training script using srun
|
61 |
+
srun torchrun \
|
62 |
+
--nnodes=$NNODES \
|
63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
65 |
+
--rdzv_backend=c10d \
|
66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
67 |
+
run_train.py \
|
68 |
+
--config-file benchmark/configs/config_1.14G_dp32_tp8_pp1_acc2_mbs32_seq2048_zero1_tpmodeRED_vocab32k.yaml
|
scripts/run_1.14G_dp4_tp1_pp2_acc8_mbs16_seq2048_zero1_tpmodeRED_vocab32k.sh
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=bench_1.14G_dp4_tp1_pp2_acc8_mbs16_seq2048_zero1_tpmodeRED_vocab32k # Job name
|
4 |
+
#SBATCH --time=00:02:00
|
5 |
+
#SBATCH --partition=hopper-prod
|
6 |
+
#SBATCH --qos=high
|
7 |
+
|
8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
9 |
+
|
10 |
+
#SBATCH --nodes=1 # Number of nodes (modify as needed)
|
11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
15 |
+
|
16 |
+
set -x -e
|
17 |
+
|
18 |
+
# Load any necessary modules for your system
|
19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
20 |
+
module load cuda/12.1
|
21 |
+
|
22 |
+
# Activate your conda environment if needed
|
23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
24 |
+
conda activate 2-1-cu121
|
25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
26 |
+
|
27 |
+
# Get the node names from SLURM
|
28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
30 |
+
export MASTER_PORT=12356
|
31 |
+
|
32 |
+
# Calculate total number of processes
|
33 |
+
export NNODES=$SLURM_NNODES
|
34 |
+
export GPUS_PER_NODE=8
|
35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
36 |
+
|
37 |
+
# Set some environment variables for better distributed training
|
38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
39 |
+
# export NCCL_DEBUG=INFO
|
40 |
+
|
41 |
+
# Nanotron specific
|
42 |
+
export NANOTRON_BENCHMARK=1
|
43 |
+
|
44 |
+
# # Disable EFA by changing the provider to tcp
|
45 |
+
# export FI_PROVIDER=tcp
|
46 |
+
|
47 |
+
# # Optionally, you can also unset these EFA-related variables
|
48 |
+
# unset FI_EFA_FORK_SAFE
|
49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
50 |
+
|
51 |
+
# # If you want to ensure NCCL uses TCP
|
52 |
+
# export NCCL_IB_DISABLE=1
|
53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
54 |
+
|
55 |
+
# Print some debugging information
|
56 |
+
echo "Master node: $MASTER_NODE"
|
57 |
+
echo "All nodes: $NODELIST"
|
58 |
+
echo "World size: $WORLD_SIZE"
|
59 |
+
|
60 |
+
# Launch the training script using srun
|
61 |
+
srun torchrun \
|
62 |
+
--nnodes=$NNODES \
|
63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
65 |
+
--rdzv_backend=c10d \
|
66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
67 |
+
run_train.py \
|
68 |
+
--config-file benchmark/configs/config_1.14G_dp4_tp1_pp2_acc8_mbs16_seq2048_zero1_tpmodeRED_vocab32k.yaml
|
scripts/run_1.14G_dp4_tp2_pp1_acc2_mbs64_seq2048_zero1_tpmodeALL_vocab32k.sh
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=bench_1.14G_dp4_tp2_pp1_acc2_mbs64_seq2048_zero1_tpmodeALL_vocab32k # Job name
|
4 |
+
#SBATCH --time=00:02:00
|
5 |
+
#SBATCH --partition=hopper-prod
|
6 |
+
#SBATCH --qos=high
|
7 |
+
|
8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
9 |
+
|
10 |
+
#SBATCH --nodes=1 # Number of nodes (modify as needed)
|
11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
15 |
+
|
16 |
+
set -x -e
|
17 |
+
|
18 |
+
# Load any necessary modules for your system
|
19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
20 |
+
module load cuda/12.1
|
21 |
+
|
22 |
+
# Activate your conda environment if needed
|
23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
24 |
+
conda activate 2-1-cu121
|
25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
26 |
+
|
27 |
+
# Get the node names from SLURM
|
28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
30 |
+
export MASTER_PORT=12356
|
31 |
+
|
32 |
+
# Calculate total number of processes
|
33 |
+
export NNODES=$SLURM_NNODES
|
34 |
+
export GPUS_PER_NODE=8
|
35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
36 |
+
|
37 |
+
# Set some environment variables for better distributed training
|
38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
39 |
+
# export NCCL_DEBUG=INFO
|
40 |
+
|
41 |
+
# Nanotron specific
|
42 |
+
export NANOTRON_BENCHMARK=1
|
43 |
+
|
44 |
+
# # Disable EFA by changing the provider to tcp
|
45 |
+
# export FI_PROVIDER=tcp
|
46 |
+
|
47 |
+
# # Optionally, you can also unset these EFA-related variables
|
48 |
+
# unset FI_EFA_FORK_SAFE
|
49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
50 |
+
|
51 |
+
# # If you want to ensure NCCL uses TCP
|
52 |
+
# export NCCL_IB_DISABLE=1
|
53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
54 |
+
|
55 |
+
# Print some debugging information
|
56 |
+
echo "Master node: $MASTER_NODE"
|
57 |
+
echo "All nodes: $NODELIST"
|
58 |
+
echo "World size: $WORLD_SIZE"
|
59 |
+
|
60 |
+
# Launch the training script using srun
|
61 |
+
srun torchrun \
|
62 |
+
--nnodes=$NNODES \
|
63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
65 |
+
--rdzv_backend=c10d \
|
66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
67 |
+
run_train.py \
|
68 |
+
--config-file benchmark/configs/config_1.14G_dp4_tp2_pp1_acc2_mbs64_seq2048_zero1_tpmodeALL_vocab32k.yaml
|
scripts/run_1.14G_dp4_tp64_pp1_acc16_mbs2_seq32768_zero1_tpmodeALL_vocab32k.sh
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=bench_1.14G_dp4_tp64_pp1_acc16_mbs2_seq32768_zero1_tpmodeALL_vocab32k # Job name
|
4 |
+
#SBATCH --time=00:02:00
|
5 |
+
#SBATCH --partition=hopper-prod
|
6 |
+
#SBATCH --qos=high
|
7 |
+
|
8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
9 |
+
|
10 |
+
#SBATCH --nodes=32 # Number of nodes (modify as needed)
|
11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
15 |
+
|
16 |
+
set -x -e
|
17 |
+
|
18 |
+
# Load any necessary modules for your system
|
19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
20 |
+
module load cuda/12.1
|
21 |
+
|
22 |
+
# Activate your conda environment if needed
|
23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
24 |
+
conda activate 2-1-cu121
|
25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
26 |
+
|
27 |
+
# Get the node names from SLURM
|
28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
30 |
+
export MASTER_PORT=12356
|
31 |
+
|
32 |
+
# Calculate total number of processes
|
33 |
+
export NNODES=$SLURM_NNODES
|
34 |
+
export GPUS_PER_NODE=8
|
35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
36 |
+
|
37 |
+
# Set some environment variables for better distributed training
|
38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
39 |
+
# export NCCL_DEBUG=INFO
|
40 |
+
|
41 |
+
# Nanotron specific
|
42 |
+
export NANOTRON_BENCHMARK=1
|
43 |
+
|
44 |
+
# # Disable EFA by changing the provider to tcp
|
45 |
+
# export FI_PROVIDER=tcp
|
46 |
+
|
47 |
+
# # Optionally, you can also unset these EFA-related variables
|
48 |
+
# unset FI_EFA_FORK_SAFE
|
49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
50 |
+
|
51 |
+
# # If you want to ensure NCCL uses TCP
|
52 |
+
# export NCCL_IB_DISABLE=1
|
53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
54 |
+
|
55 |
+
# Print some debugging information
|
56 |
+
echo "Master node: $MASTER_NODE"
|
57 |
+
echo "All nodes: $NODELIST"
|
58 |
+
echo "World size: $WORLD_SIZE"
|
59 |
+
|
60 |
+
# Launch the training script using srun
|
61 |
+
srun torchrun \
|
62 |
+
--nnodes=$NNODES \
|
63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
65 |
+
--rdzv_backend=c10d \
|
66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
67 |
+
run_train.py \
|
68 |
+
--config-file benchmark/configs/config_1.14G_dp4_tp64_pp1_acc16_mbs2_seq32768_zero1_tpmodeALL_vocab32k.yaml
|
scripts/run_1.14G_dp4_tp64_pp1_acc1_mbs128_seq8192_zero1_tpmodeALL_vocab32k.sh
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=bench_1.14G_dp4_tp64_pp1_acc1_mbs128_seq8192_zero1_tpmodeALL_vocab32k # Job name
|
4 |
+
#SBATCH --time=00:02:00
|
5 |
+
#SBATCH --partition=hopper-prod
|
6 |
+
#SBATCH --qos=high
|
7 |
+
|
8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
9 |
+
|
10 |
+
#SBATCH --nodes=32 # Number of nodes (modify as needed)
|
11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
15 |
+
|
16 |
+
set -x -e
|
17 |
+
|
18 |
+
# Load any necessary modules for your system
|
19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
20 |
+
module load cuda/12.1
|
21 |
+
|
22 |
+
# Activate your conda environment if needed
|
23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
24 |
+
conda activate 2-1-cu121
|
25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
26 |
+
|
27 |
+
# Get the node names from SLURM
|
28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
30 |
+
export MASTER_PORT=12356
|
31 |
+
|
32 |
+
# Calculate total number of processes
|
33 |
+
export NNODES=$SLURM_NNODES
|
34 |
+
export GPUS_PER_NODE=8
|
35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
36 |
+
|
37 |
+
# Set some environment variables for better distributed training
|
38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
39 |
+
# export NCCL_DEBUG=INFO
|
40 |
+
|
41 |
+
# Nanotron specific
|
42 |
+
export NANOTRON_BENCHMARK=1
|
43 |
+
|
44 |
+
# # Disable EFA by changing the provider to tcp
|
45 |
+
# export FI_PROVIDER=tcp
|
46 |
+
|
47 |
+
# # Optionally, you can also unset these EFA-related variables
|
48 |
+
# unset FI_EFA_FORK_SAFE
|
49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
50 |
+
|
51 |
+
# # If you want to ensure NCCL uses TCP
|
52 |
+
# export NCCL_IB_DISABLE=1
|
53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
54 |
+
|
55 |
+
# Print some debugging information
|
56 |
+
echo "Master node: $MASTER_NODE"
|
57 |
+
echo "All nodes: $NODELIST"
|
58 |
+
echo "World size: $WORLD_SIZE"
|
59 |
+
|
60 |
+
# Launch the training script using srun
|
61 |
+
srun torchrun \
|
62 |
+
--nnodes=$NNODES \
|
63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
65 |
+
--rdzv_backend=c10d \
|
66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
67 |
+
run_train.py \
|
68 |
+
--config-file benchmark/configs/config_1.14G_dp4_tp64_pp1_acc1_mbs128_seq8192_zero1_tpmodeALL_vocab32k.yaml
|
scripts/run_1.14G_dp4_tp64_pp1_acc256_mbs2_seq2048_zero1_tpmodeALL_vocab32k.sh
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=bench_1.14G_dp4_tp64_pp1_acc256_mbs2_seq2048_zero1_tpmodeALL_vocab32k # Job name
|
4 |
+
#SBATCH --time=00:02:00
|
5 |
+
#SBATCH --partition=hopper-prod
|
6 |
+
#SBATCH --qos=high
|
7 |
+
|
8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
9 |
+
|
10 |
+
#SBATCH --nodes=32 # Number of nodes (modify as needed)
|
11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
15 |
+
|
16 |
+
set -x -e
|
17 |
+
|
18 |
+
# Load any necessary modules for your system
|
19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
20 |
+
module load cuda/12.1
|
21 |
+
|
22 |
+
# Activate your conda environment if needed
|
23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
24 |
+
conda activate 2-1-cu121
|
25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
26 |
+
|
27 |
+
# Get the node names from SLURM
|
28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
30 |
+
export MASTER_PORT=12356
|
31 |
+
|
32 |
+
# Calculate total number of processes
|
33 |
+
export NNODES=$SLURM_NNODES
|
34 |
+
export GPUS_PER_NODE=8
|
35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
36 |
+
|
37 |
+
# Set some environment variables for better distributed training
|
38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
39 |
+
# export NCCL_DEBUG=INFO
|
40 |
+
|
41 |
+
# Nanotron specific
|
42 |
+
export NANOTRON_BENCHMARK=1
|
43 |
+
|
44 |
+
# # Disable EFA by changing the provider to tcp
|
45 |
+
# export FI_PROVIDER=tcp
|
46 |
+
|
47 |
+
# # Optionally, you can also unset these EFA-related variables
|
48 |
+
# unset FI_EFA_FORK_SAFE
|
49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
50 |
+
|
51 |
+
# # If you want to ensure NCCL uses TCP
|
52 |
+
# export NCCL_IB_DISABLE=1
|
53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
54 |
+
|
55 |
+
# Print some debugging information
|
56 |
+
echo "Master node: $MASTER_NODE"
|
57 |
+
echo "All nodes: $NODELIST"
|
58 |
+
echo "World size: $WORLD_SIZE"
|
59 |
+
|
60 |
+
# Launch the training script using srun
|
61 |
+
srun torchrun \
|
62 |
+
--nnodes=$NNODES \
|
63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
65 |
+
--rdzv_backend=c10d \
|
66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
67 |
+
run_train.py \
|
68 |
+
--config-file benchmark/configs/config_1.14G_dp4_tp64_pp1_acc256_mbs2_seq2048_zero1_tpmodeALL_vocab32k.yaml
|
scripts/run_1.14G_dp4_tp8_pp1_acc1_mbs128_seq2048_zero1_tpmodeALL_vocab32k.sh
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=bench_1.14G_dp4_tp8_pp1_acc1_mbs128_seq2048_zero1_tpmodeALL_vocab32k # Job name
|
4 |
+
#SBATCH --time=00:02:00
|
5 |
+
#SBATCH --partition=hopper-prod
|
6 |
+
#SBATCH --qos=high
|
7 |
+
|
8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
9 |
+
|
10 |
+
#SBATCH --nodes=4 # Number of nodes (modify as needed)
|
11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
15 |
+
|
16 |
+
set -x -e
|
17 |
+
|
18 |
+
# Load any necessary modules for your system
|
19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
20 |
+
module load cuda/12.1
|
21 |
+
|
22 |
+
# Activate your conda environment if needed
|
23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
24 |
+
conda activate 2-1-cu121
|
25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
26 |
+
|
27 |
+
# Get the node names from SLURM
|
28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
30 |
+
export MASTER_PORT=12356
|
31 |
+
|
32 |
+
# Calculate total number of processes
|
33 |
+
export NNODES=$SLURM_NNODES
|
34 |
+
export GPUS_PER_NODE=8
|
35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
36 |
+
|
37 |
+
# Set some environment variables for better distributed training
|
38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
39 |
+
# export NCCL_DEBUG=INFO
|
40 |
+
|
41 |
+
# Nanotron specific
|
42 |
+
export NANOTRON_BENCHMARK=1
|
43 |
+
|
44 |
+
# # Disable EFA by changing the provider to tcp
|
45 |
+
# export FI_PROVIDER=tcp
|
46 |
+
|
47 |
+
# # Optionally, you can also unset these EFA-related variables
|
48 |
+
# unset FI_EFA_FORK_SAFE
|
49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
50 |
+
|
51 |
+
# # If you want to ensure NCCL uses TCP
|
52 |
+
# export NCCL_IB_DISABLE=1
|
53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
54 |
+
|
55 |
+
# Print some debugging information
|
56 |
+
echo "Master node: $MASTER_NODE"
|
57 |
+
echo "All nodes: $NODELIST"
|
58 |
+
echo "World size: $WORLD_SIZE"
|
59 |
+
|
60 |
+
# Launch the training script using srun
|
61 |
+
srun torchrun \
|
62 |
+
--nnodes=$NNODES \
|
63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
65 |
+
--rdzv_backend=c10d \
|
66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
67 |
+
run_train.py \
|
68 |
+
--config-file benchmark/configs/config_1.14G_dp4_tp8_pp1_acc1_mbs128_seq2048_zero1_tpmodeALL_vocab32k.yaml
|
scripts/run_1.14G_dp8_tp16_pp1_acc64_mbs1_seq2048_zero1_tpmodeALL_vocab32k.sh
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=bench_1.14G_dp8_tp16_pp1_acc64_mbs1_seq2048_zero1_tpmodeALL_vocab32k # Job name
|
4 |
+
#SBATCH --time=00:02:00
|
5 |
+
#SBATCH --partition=hopper-prod
|
6 |
+
#SBATCH --qos=high
|
7 |
+
|
8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
9 |
+
|
10 |
+
#SBATCH --nodes=16 # Number of nodes (modify as needed)
|
11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
15 |
+
|
16 |
+
set -x -e
|
17 |
+
|
18 |
+
# Load any necessary modules for your system
|
19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
20 |
+
module load cuda/12.1
|
21 |
+
|
22 |
+
# Activate your conda environment if needed
|
23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
24 |
+
conda activate 2-1-cu121
|
25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
26 |
+
|
27 |
+
# Get the node names from SLURM
|
28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
30 |
+
export MASTER_PORT=12356
|
31 |
+
|
32 |
+
# Calculate total number of processes
|
33 |
+
export NNODES=$SLURM_NNODES
|
34 |
+
export GPUS_PER_NODE=8
|
35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
36 |
+
|
37 |
+
# Set some environment variables for better distributed training
|
38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
39 |
+
# export NCCL_DEBUG=INFO
|
40 |
+
|
41 |
+
# Nanotron specific
|
42 |
+
export NANOTRON_BENCHMARK=1
|
43 |
+
|
44 |
+
# # Disable EFA by changing the provider to tcp
|
45 |
+
# export FI_PROVIDER=tcp
|
46 |
+
|
47 |
+
# # Optionally, you can also unset these EFA-related variables
|
48 |
+
# unset FI_EFA_FORK_SAFE
|
49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
50 |
+
|
51 |
+
# # If you want to ensure NCCL uses TCP
|
52 |
+
# export NCCL_IB_DISABLE=1
|
53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
54 |
+
|
55 |
+
# Print some debugging information
|
56 |
+
echo "Master node: $MASTER_NODE"
|
57 |
+
echo "All nodes: $NODELIST"
|
58 |
+
echo "World size: $WORLD_SIZE"
|
59 |
+
|
60 |
+
# Launch the training script using srun
|
61 |
+
srun torchrun \
|
62 |
+
--nnodes=$NNODES \
|
63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
65 |
+
--rdzv_backend=c10d \
|
66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
67 |
+
run_train.py \
|
68 |
+
--config-file benchmark/configs/config_1.14G_dp8_tp16_pp1_acc64_mbs1_seq2048_zero1_tpmodeALL_vocab32k.yaml
|
scripts/run_1.14G_dp8_tp4_pp1_acc8_mbs2_seq8192_zero1_tpmodeRED_vocab32k.sh
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=bench_1.14G_dp8_tp4_pp1_acc8_mbs2_seq8192_zero1_tpmodeRED_vocab32k # Job name
|
4 |
+
#SBATCH --time=00:02:00
|
5 |
+
#SBATCH --partition=hopper-prod
|
6 |
+
#SBATCH --qos=high
|
7 |
+
|
8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
9 |
+
|
10 |
+
#SBATCH --nodes=4 # Number of nodes (modify as needed)
|
11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
15 |
+
|
16 |
+
set -x -e
|
17 |
+
|
18 |
+
# Load any necessary modules for your system
|
19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
20 |
+
module load cuda/12.1
|
21 |
+
|
22 |
+
# Activate your conda environment if needed
|
23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
24 |
+
conda activate 2-1-cu121
|
25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
26 |
+
|
27 |
+
# Get the node names from SLURM
|
28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
30 |
+
export MASTER_PORT=12356
|
31 |
+
|
32 |
+
# Calculate total number of processes
|
33 |
+
export NNODES=$SLURM_NNODES
|
34 |
+
export GPUS_PER_NODE=8
|
35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
36 |
+
|
37 |
+
# Set some environment variables for better distributed training
|
38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
39 |
+
# export NCCL_DEBUG=INFO
|
40 |
+
|
41 |
+
# Nanotron specific
|
42 |
+
export NANOTRON_BENCHMARK=1
|
43 |
+
|
44 |
+
# # Disable EFA by changing the provider to tcp
|
45 |
+
# export FI_PROVIDER=tcp
|
46 |
+
|
47 |
+
# # Optionally, you can also unset these EFA-related variables
|
48 |
+
# unset FI_EFA_FORK_SAFE
|
49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
50 |
+
|
51 |
+
# # If you want to ensure NCCL uses TCP
|
52 |
+
# export NCCL_IB_DISABLE=1
|
53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
54 |
+
|
55 |
+
# Print some debugging information
|
56 |
+
echo "Master node: $MASTER_NODE"
|
57 |
+
echo "All nodes: $NODELIST"
|
58 |
+
echo "World size: $WORLD_SIZE"
|
59 |
+
|
60 |
+
# Launch the training script using srun
|
61 |
+
srun torchrun \
|
62 |
+
--nnodes=$NNODES \
|
63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
65 |
+
--rdzv_backend=c10d \
|
66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
67 |
+
run_train.py \
|
68 |
+
--config-file benchmark/configs/config_1.14G_dp8_tp4_pp1_acc8_mbs2_seq8192_zero1_tpmodeRED_vocab32k.yaml
|
scripts/run_1.34G_dp128_tp4_pp1_acc2_mbs8_seq2048_zero1_tpmodeRED_vocab131k.sh
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=bench_1.34G_dp128_tp4_pp1_acc2_mbs8_seq2048_zero1_tpmodeRED_vocab131k # Job name
|
4 |
+
#SBATCH --time=00:02:00
|
5 |
+
#SBATCH --partition=hopper-prod
|
6 |
+
#SBATCH --qos=high
|
7 |
+
|
8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
9 |
+
|
10 |
+
#SBATCH --nodes=64 # Number of nodes (modify as needed)
|
11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
15 |
+
|
16 |
+
set -x -e
|
17 |
+
|
18 |
+
# Load any necessary modules for your system
|
19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
20 |
+
module load cuda/12.1
|
21 |
+
|
22 |
+
# Activate your conda environment if needed
|
23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
24 |
+
conda activate 2-1-cu121
|
25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
26 |
+
|
27 |
+
# Get the node names from SLURM
|
28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
30 |
+
export MASTER_PORT=12356
|
31 |
+
|
32 |
+
# Calculate total number of processes
|
33 |
+
export NNODES=$SLURM_NNODES
|
34 |
+
export GPUS_PER_NODE=8
|
35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
36 |
+
|
37 |
+
# Set some environment variables for better distributed training
|
38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
39 |
+
# export NCCL_DEBUG=INFO
|
40 |
+
|
41 |
+
# Nanotron specific
|
42 |
+
export NANOTRON_BENCHMARK=1
|
43 |
+
|
44 |
+
# # Disable EFA by changing the provider to tcp
|
45 |
+
# export FI_PROVIDER=tcp
|
46 |
+
|
47 |
+
# # Optionally, you can also unset these EFA-related variables
|
48 |
+
# unset FI_EFA_FORK_SAFE
|
49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
50 |
+
|
51 |
+
# # If you want to ensure NCCL uses TCP
|
52 |
+
# export NCCL_IB_DISABLE=1
|
53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
54 |
+
|
55 |
+
# Print some debugging information
|
56 |
+
echo "Master node: $MASTER_NODE"
|
57 |
+
echo "All nodes: $NODELIST"
|
58 |
+
echo "World size: $WORLD_SIZE"
|
59 |
+
|
60 |
+
# Launch the training script using srun
|
61 |
+
srun torchrun \
|
62 |
+
--nnodes=$NNODES \
|
63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
65 |
+
--rdzv_backend=c10d \
|
66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
67 |
+
run_train.py \
|
68 |
+
--config-file benchmark/configs/config_1.34G_dp128_tp4_pp1_acc2_mbs8_seq2048_zero1_tpmodeRED_vocab131k.yaml
|
scripts/run_1.34G_dp16_tp16_pp1_acc8_mbs16_seq2048_zero1_tpmodeRED_vocab131k.sh
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=bench_1.34G_dp16_tp16_pp1_acc8_mbs16_seq2048_zero1_tpmodeRED_vocab131k # Job name
|
4 |
+
#SBATCH --time=00:02:00
|
5 |
+
#SBATCH --partition=hopper-prod
|
6 |
+
#SBATCH --qos=high
|
7 |
+
|
8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
9 |
+
|
10 |
+
#SBATCH --nodes=32 # Number of nodes (modify as needed)
|
11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
15 |
+
|
16 |
+
set -x -e
|
17 |
+
|
18 |
+
# Load any necessary modules for your system
|
19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
20 |
+
module load cuda/12.1
|
21 |
+
|
22 |
+
# Activate your conda environment if needed
|
23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
24 |
+
conda activate 2-1-cu121
|
25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
26 |
+
|
27 |
+
# Get the node names from SLURM
|
28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
30 |
+
export MASTER_PORT=12356
|
31 |
+
|
32 |
+
# Calculate total number of processes
|
33 |
+
export NNODES=$SLURM_NNODES
|
34 |
+
export GPUS_PER_NODE=8
|
35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
36 |
+
|
37 |
+
# Set some environment variables for better distributed training
|
38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
39 |
+
# export NCCL_DEBUG=INFO
|
40 |
+
|
41 |
+
# Nanotron specific
|
42 |
+
export NANOTRON_BENCHMARK=1
|
43 |
+
|
44 |
+
# # Disable EFA by changing the provider to tcp
|
45 |
+
# export FI_PROVIDER=tcp
|
46 |
+
|
47 |
+
# # Optionally, you can also unset these EFA-related variables
|
48 |
+
# unset FI_EFA_FORK_SAFE
|
49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
50 |
+
|
51 |
+
# # If you want to ensure NCCL uses TCP
|
52 |
+
# export NCCL_IB_DISABLE=1
|
53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
54 |
+
|
55 |
+
# Print some debugging information
|
56 |
+
echo "Master node: $MASTER_NODE"
|
57 |
+
echo "All nodes: $NODELIST"
|
58 |
+
echo "World size: $WORLD_SIZE"
|
59 |
+
|
60 |
+
# Launch the training script using srun
|
61 |
+
srun torchrun \
|
62 |
+
--nnodes=$NNODES \
|
63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
65 |
+
--rdzv_backend=c10d \
|
66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
67 |
+
run_train.py \
|
68 |
+
--config-file benchmark/configs/config_1.34G_dp16_tp16_pp1_acc8_mbs16_seq2048_zero1_tpmodeRED_vocab131k.yaml
|
scripts/run_1.34G_dp16_tp1_pp1_acc2_mbs16_seq8192_zero1_tpmodeRED_vocab131k.sh
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=bench_1.34G_dp16_tp1_pp1_acc2_mbs16_seq8192_zero1_tpmodeRED_vocab131k # Job name
|
4 |
+
#SBATCH --time=00:02:00
|
5 |
+
#SBATCH --partition=hopper-prod
|
6 |
+
#SBATCH --qos=high
|
7 |
+
|
8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
9 |
+
|
10 |
+
#SBATCH --nodes=2 # Number of nodes (modify as needed)
|
11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
15 |
+
|
16 |
+
set -x -e
|
17 |
+
|
18 |
+
# Load any necessary modules for your system
|
19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
20 |
+
module load cuda/12.1
|
21 |
+
|
22 |
+
# Activate your conda environment if needed
|
23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
24 |
+
conda activate 2-1-cu121
|
25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
26 |
+
|
27 |
+
# Get the node names from SLURM
|
28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
30 |
+
export MASTER_PORT=12356
|
31 |
+
|
32 |
+
# Calculate total number of processes
|
33 |
+
export NNODES=$SLURM_NNODES
|
34 |
+
export GPUS_PER_NODE=8
|
35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
36 |
+
|
37 |
+
# Set some environment variables for better distributed training
|
38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
39 |
+
# export NCCL_DEBUG=INFO
|
40 |
+
|
41 |
+
# Nanotron specific
|
42 |
+
export NANOTRON_BENCHMARK=1
|
43 |
+
|
44 |
+
# # Disable EFA by changing the provider to tcp
|
45 |
+
# export FI_PROVIDER=tcp
|
46 |
+
|
47 |
+
# # Optionally, you can also unset these EFA-related variables
|
48 |
+
# unset FI_EFA_FORK_SAFE
|
49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
50 |
+
|
51 |
+
# # If you want to ensure NCCL uses TCP
|
52 |
+
# export NCCL_IB_DISABLE=1
|
53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
54 |
+
|
55 |
+
# Print some debugging information
|
56 |
+
echo "Master node: $MASTER_NODE"
|
57 |
+
echo "All nodes: $NODELIST"
|
58 |
+
echo "World size: $WORLD_SIZE"
|
59 |
+
|
60 |
+
# Launch the training script using srun
|
61 |
+
srun torchrun \
|
62 |
+
--nnodes=$NNODES \
|
63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
65 |
+
--rdzv_backend=c10d \
|
66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
67 |
+
run_train.py \
|
68 |
+
--config-file benchmark/configs/config_1.34G_dp16_tp1_pp1_acc2_mbs16_seq8192_zero1_tpmodeRED_vocab131k.yaml
|
scripts/run_1.34G_dp16_tp2_pp1_acc1_mbs8_seq32768_zero1_tpmodeALL_vocab131k.sh
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=bench_1.34G_dp16_tp2_pp1_acc1_mbs8_seq32768_zero1_tpmodeALL_vocab131k # Job name
|
4 |
+
#SBATCH --time=00:02:00
|
5 |
+
#SBATCH --partition=hopper-prod
|
6 |
+
#SBATCH --qos=high
|
7 |
+
|
8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
9 |
+
|
10 |
+
#SBATCH --nodes=4 # Number of nodes (modify as needed)
|
11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
15 |
+
|
16 |
+
set -x -e
|
17 |
+
|
18 |
+
# Load any necessary modules for your system
|
19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
20 |
+
module load cuda/12.1
|
21 |
+
|
22 |
+
# Activate your conda environment if needed
|
23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
24 |
+
conda activate 2-1-cu121
|
25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
26 |
+
|
27 |
+
# Get the node names from SLURM
|
28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
30 |
+
export MASTER_PORT=12356
|
31 |
+
|
32 |
+
# Calculate total number of processes
|
33 |
+
export NNODES=$SLURM_NNODES
|
34 |
+
export GPUS_PER_NODE=8
|
35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
36 |
+
|
37 |
+
# Set some environment variables for better distributed training
|
38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
39 |
+
# export NCCL_DEBUG=INFO
|
40 |
+
|
41 |
+
# Nanotron specific
|
42 |
+
export NANOTRON_BENCHMARK=1
|
43 |
+
|
44 |
+
# # Disable EFA by changing the provider to tcp
|
45 |
+
# export FI_PROVIDER=tcp
|
46 |
+
|
47 |
+
# # Optionally, you can also unset these EFA-related variables
|
48 |
+
# unset FI_EFA_FORK_SAFE
|
49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
50 |
+
|
51 |
+
# # If you want to ensure NCCL uses TCP
|
52 |
+
# export NCCL_IB_DISABLE=1
|
53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
54 |
+
|
55 |
+
# Print some debugging information
|
56 |
+
echo "Master node: $MASTER_NODE"
|
57 |
+
echo "All nodes: $NODELIST"
|
58 |
+
echo "World size: $WORLD_SIZE"
|
59 |
+
|
60 |
+
# Launch the training script using srun
|
61 |
+
srun torchrun \
|
62 |
+
--nnodes=$NNODES \
|
63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
65 |
+
--rdzv_backend=c10d \
|
66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
67 |
+
run_train.py \
|
68 |
+
--config-file benchmark/configs/config_1.34G_dp16_tp2_pp1_acc1_mbs8_seq32768_zero1_tpmodeALL_vocab131k.yaml
|
scripts/run_1.34G_dp16_tp2_pp4_acc8_mbs2_seq4096_zero1_tpmodeRED_vocab131k.sh
ADDED
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
#SBATCH --job-name=bench_1.34G_dp16_tp2_pp4_acc8_mbs2_seq4096_zero1_tpmodeRED_vocab131k # Job name
|
3 |
+
#SBATCH --time=01:10:00
|
4 |
+
#SBATCH --partition=hopper-prod
|
5 |
+
#SBATCH --qos=high
|
6 |
+
|
7 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
8 |
+
|
9 |
+
#SBATCH --nodes=16 # Number of nodes (modify as needed)
|
10 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
11 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
12 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
13 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
14 |
+
#SBATCH --wait-all-nodes=1 # fail if any node is not ready
|
15 |
+
|
16 |
+
# run using
|
17 |
+
# sbatch --nodes=1 run_multinode.sh
|
18 |
+
# or
|
19 |
+
# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh
|
20 |
+
|
21 |
+
set -x -e
|
22 |
+
|
23 |
+
# If not running under SLURM, set default SLURM environment variables
|
24 |
+
if [ -z "${SLURM_JOB_ID}" ]; then
|
25 |
+
if [ -z "${SALLOC_JOBID}" ]; then
|
26 |
+
echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session."
|
27 |
+
exit 1
|
28 |
+
fi
|
29 |
+
if [ -z "${NNODES}" ]; then
|
30 |
+
echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session."
|
31 |
+
exit 1
|
32 |
+
fi
|
33 |
+
export SALLOC_MODE=1
|
34 |
+
export SLURM_JOB_ID=$SALLOC_JOBID
|
35 |
+
export SLURM_NNODES=$NNODES
|
36 |
+
export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N")
|
37 |
+
fi
|
38 |
+
|
39 |
+
# Load any necessary modules for your system
|
40 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
41 |
+
module load cuda/12.1
|
42 |
+
# Unset FI_PROVIDER to avoid potential libfabric provider issues
|
43 |
+
# unset FI_PROVIDER
|
44 |
+
|
45 |
+
|
46 |
+
# Activate your conda environment if needed
|
47 |
+
source /fsx/nouamane/miniconda/bin/activate
|
48 |
+
conda activate 2-1-cu121
|
49 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
50 |
+
|
51 |
+
# Get the node names from SLURM
|
52 |
+
if [ -z "${SALLOC_MODE}" ]; then # sbatch mode
|
53 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
54 |
+
|
55 |
+
else # srun mode
|
56 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES`
|
57 |
+
fi
|
58 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
59 |
+
export MASTER_PORT=12356
|
60 |
+
|
61 |
+
# Calculate total number of processes
|
62 |
+
export NNODES=$SLURM_NNODES
|
63 |
+
export GPUS_PER_NODE=8
|
64 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
65 |
+
|
66 |
+
# Set some environment variables for better distributed training
|
67 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
68 |
+
export NCCL_DEBUG=INFO # INFO, WARN
|
69 |
+
# export NCCL_DEBUG_SUBSYS=ALL
|
70 |
+
# export CUDA_LAUNCH_BLOCKING=1
|
71 |
+
|
72 |
+
# Nanotron specific
|
73 |
+
export NANOTRON_BENCHMARK=1
|
74 |
+
export WANDB_MODE=disabled
|
75 |
+
|
76 |
+
# export TORCH_NCCL_USE_COMM_NONBLOCKING=1
|
77 |
+
|
78 |
+
# Trying to avoid hangs
|
79 |
+
export TORCH_NCCL_ASYNC_ERROR_HANDLING=1
|
80 |
+
|
81 |
+
# debug
|
82 |
+
export TORCH_DISTRIBUTED_DEBUG=DETAIL
|
83 |
+
|
84 |
+
# export NCCL_P2P_LEVEL=NVL
|
85 |
+
# export CUDA_LAUNCH_BLOCKING=1
|
86 |
+
# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA
|
87 |
+
# export NCCL_NET_GDR_LEVEL=LOC
|
88 |
+
# Test Script - save as test_comm.sh
|
89 |
+
|
90 |
+
# Test 1 - Force TCP
|
91 |
+
# echo "Running with TCP only..."
|
92 |
+
# export NCCL_P2P_LEVEL=LOC
|
93 |
+
|
94 |
+
# # Match bandwidth patterns
|
95 |
+
# export NCCL_MAX_NCHANNELS=2
|
96 |
+
# export NCCL_MIN_NCHANNELS=2
|
97 |
+
|
98 |
+
|
99 |
+
# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA
|
100 |
+
# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport
|
101 |
+
# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport
|
102 |
+
# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds
|
103 |
+
# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well
|
104 |
+
|
105 |
+
# Force SHM
|
106 |
+
# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode
|
107 |
+
# export NCCL_SOCKET_NTHREADS=1
|
108 |
+
# export FI_PROVIDER="tcp"
|
109 |
+
|
110 |
+
# Print GPU topology information
|
111 |
+
if [ -z "${SALLOC_MODE}" ]; then
|
112 |
+
echo "=== GPU Topology ==="
|
113 |
+
nvidia-smi topo -m
|
114 |
+
echo "=================="
|
115 |
+
export SRUN_ALLOC_ARGS=""
|
116 |
+
else
|
117 |
+
export JOBNAME="bench_1.34G_dp16_tp2_pp4_acc8_mbs2_seq4096_zero1_tpmodeRED_vocab131k"
|
118 |
+
export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out"
|
119 |
+
export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME"
|
120 |
+
fi
|
121 |
+
|
122 |
+
|
123 |
+
# Print some debugging information
|
124 |
+
echo "Master node: $MASTER_NODE"
|
125 |
+
echo "All nodes: $NODELIST"
|
126 |
+
echo "World size: $WORLD_SIZE"
|
127 |
+
|
128 |
+
# Launch the training script using srun in background
|
129 |
+
if [ -n "${SALLOC_MODE}" ]; then # srun mode
|
130 |
+
srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \
|
131 |
+
--nnodes=$NNODES \
|
132 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
133 |
+
--rdzv_id=$SLURM_JOB_ID \
|
134 |
+
--rdzv_backend=c10d \
|
135 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
136 |
+
--max_restarts 0 \
|
137 |
+
--rdzv_conf timeout=60 \
|
138 |
+
/fsx/nouamane/projects/nanotron/run_train.py \
|
139 |
+
--config-file benchmark/configs/config_1.34G_dp16_tp2_pp4_acc8_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 &
|
140 |
+
# Store the process ID
|
141 |
+
SRUN_PID=$!
|
142 |
+
echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE
|
143 |
+
|
144 |
+
# Optionally, you can add:
|
145 |
+
echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE
|
146 |
+
echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE
|
147 |
+
|
148 |
+
else # sbatch mode
|
149 |
+
srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \
|
150 |
+
--nnodes=$NNODES \
|
151 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
152 |
+
--rdzv_id=$SLURM_JOB_ID \
|
153 |
+
--rdzv_backend=c10d \
|
154 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
155 |
+
--max_restarts 0 \
|
156 |
+
--rdzv_conf timeout=60 \
|
157 |
+
/fsx/nouamane/projects/nanotron/run_train.py \
|
158 |
+
--config-file benchmark/configs/config_1.34G_dp16_tp2_pp4_acc8_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml
|
159 |
+
fi
|
scripts/run_1.34G_dp16_tp32_pp1_acc2_mbs64_seq2048_zero1_tpmodeALL_vocab131k.sh
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=bench_1.34G_dp16_tp32_pp1_acc2_mbs64_seq2048_zero1_tpmodeALL_vocab131k # Job name
|
4 |
+
#SBATCH --time=00:02:00
|
5 |
+
#SBATCH --partition=hopper-prod
|
6 |
+
#SBATCH --qos=high
|
7 |
+
|
8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
9 |
+
|
10 |
+
#SBATCH --nodes=64 # Number of nodes (modify as needed)
|
11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
15 |
+
|
16 |
+
set -x -e
|
17 |
+
|
18 |
+
# Load any necessary modules for your system
|
19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
20 |
+
module load cuda/12.1
|
21 |
+
|
22 |
+
# Activate your conda environment if needed
|
23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
24 |
+
conda activate 2-1-cu121
|
25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
26 |
+
|
27 |
+
# Get the node names from SLURM
|
28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
30 |
+
export MASTER_PORT=12356
|
31 |
+
|
32 |
+
# Calculate total number of processes
|
33 |
+
export NNODES=$SLURM_NNODES
|
34 |
+
export GPUS_PER_NODE=8
|
35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
36 |
+
|
37 |
+
# Set some environment variables for better distributed training
|
38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
39 |
+
# export NCCL_DEBUG=INFO
|
40 |
+
|
41 |
+
# Nanotron specific
|
42 |
+
export NANOTRON_BENCHMARK=1
|
43 |
+
|
44 |
+
# # Disable EFA by changing the provider to tcp
|
45 |
+
# export FI_PROVIDER=tcp
|
46 |
+
|
47 |
+
# # Optionally, you can also unset these EFA-related variables
|
48 |
+
# unset FI_EFA_FORK_SAFE
|
49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
50 |
+
|
51 |
+
# # If you want to ensure NCCL uses TCP
|
52 |
+
# export NCCL_IB_DISABLE=1
|
53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
54 |
+
|
55 |
+
# Print some debugging information
|
56 |
+
echo "Master node: $MASTER_NODE"
|
57 |
+
echo "All nodes: $NODELIST"
|
58 |
+
echo "World size: $WORLD_SIZE"
|
59 |
+
|
60 |
+
# Launch the training script using srun
|
61 |
+
srun torchrun \
|
62 |
+
--nnodes=$NNODES \
|
63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
65 |
+
--rdzv_backend=c10d \
|
66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
67 |
+
run_train.py \
|
68 |
+
--config-file benchmark/configs/config_1.34G_dp16_tp32_pp1_acc2_mbs64_seq2048_zero1_tpmodeALL_vocab131k.yaml
|
scripts/run_1.34G_dp16_tp8_pp1_acc16_mbs8_seq2048_zero1_tpmodeALL_vocab131k.sh
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=bench_1.34G_dp16_tp8_pp1_acc16_mbs8_seq2048_zero1_tpmodeALL_vocab131k # Job name
|
4 |
+
#SBATCH --time=00:02:00
|
5 |
+
#SBATCH --partition=hopper-prod
|
6 |
+
#SBATCH --qos=high
|
7 |
+
|
8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
9 |
+
|
10 |
+
#SBATCH --nodes=16 # Number of nodes (modify as needed)
|
11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
15 |
+
|
16 |
+
set -x -e
|
17 |
+
|
18 |
+
# Load any necessary modules for your system
|
19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
20 |
+
module load cuda/12.1
|
21 |
+
|
22 |
+
# Activate your conda environment if needed
|
23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
24 |
+
conda activate 2-1-cu121
|
25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
26 |
+
|
27 |
+
# Get the node names from SLURM
|
28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
30 |
+
export MASTER_PORT=12356
|
31 |
+
|
32 |
+
# Calculate total number of processes
|
33 |
+
export NNODES=$SLURM_NNODES
|
34 |
+
export GPUS_PER_NODE=8
|
35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
36 |
+
|
37 |
+
# Set some environment variables for better distributed training
|
38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
39 |
+
# export NCCL_DEBUG=INFO
|
40 |
+
|
41 |
+
# Nanotron specific
|
42 |
+
export NANOTRON_BENCHMARK=1
|
43 |
+
|
44 |
+
# # Disable EFA by changing the provider to tcp
|
45 |
+
# export FI_PROVIDER=tcp
|
46 |
+
|
47 |
+
# # Optionally, you can also unset these EFA-related variables
|
48 |
+
# unset FI_EFA_FORK_SAFE
|
49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
50 |
+
|
51 |
+
# # If you want to ensure NCCL uses TCP
|
52 |
+
# export NCCL_IB_DISABLE=1
|
53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
54 |
+
|
55 |
+
# Print some debugging information
|
56 |
+
echo "Master node: $MASTER_NODE"
|
57 |
+
echo "All nodes: $NODELIST"
|
58 |
+
echo "World size: $WORLD_SIZE"
|
59 |
+
|
60 |
+
# Launch the training script using srun
|
61 |
+
srun torchrun \
|
62 |
+
--nnodes=$NNODES \
|
63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
65 |
+
--rdzv_backend=c10d \
|
66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
67 |
+
run_train.py \
|
68 |
+
--config-file benchmark/configs/config_1.34G_dp16_tp8_pp1_acc16_mbs8_seq2048_zero1_tpmodeALL_vocab131k.yaml
|
scripts/run_1.34G_dp2_tp1_pp8_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh
ADDED
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
#SBATCH --job-name=bench_1.34G_dp2_tp1_pp8_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k # Job name
|
3 |
+
#SBATCH --time=01:10:00
|
4 |
+
#SBATCH --partition=hopper-prod
|
5 |
+
#SBATCH --qos=high
|
6 |
+
|
7 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
8 |
+
|
9 |
+
#SBATCH --nodes=2 # Number of nodes (modify as needed)
|
10 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
11 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
12 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
13 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
14 |
+
#SBATCH --wait-all-nodes=1 # fail if any node is not ready
|
15 |
+
|
16 |
+
# run using
|
17 |
+
# sbatch --nodes=1 run_multinode.sh
|
18 |
+
# or
|
19 |
+
# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh
|
20 |
+
|
21 |
+
set -x -e
|
22 |
+
|
23 |
+
# If not running under SLURM, set default SLURM environment variables
|
24 |
+
if [ -z "${SLURM_JOB_ID}" ]; then
|
25 |
+
if [ -z "${SALLOC_JOBID}" ]; then
|
26 |
+
echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session."
|
27 |
+
exit 1
|
28 |
+
fi
|
29 |
+
if [ -z "${NNODES}" ]; then
|
30 |
+
echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session."
|
31 |
+
exit 1
|
32 |
+
fi
|
33 |
+
export SALLOC_MODE=1
|
34 |
+
export SLURM_JOB_ID=$SALLOC_JOBID
|
35 |
+
export SLURM_NNODES=$NNODES
|
36 |
+
export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N")
|
37 |
+
fi
|
38 |
+
|
39 |
+
# Load any necessary modules for your system
|
40 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
41 |
+
module load cuda/12.1
|
42 |
+
# Unset FI_PROVIDER to avoid potential libfabric provider issues
|
43 |
+
# unset FI_PROVIDER
|
44 |
+
|
45 |
+
|
46 |
+
# Activate your conda environment if needed
|
47 |
+
source /fsx/nouamane/miniconda/bin/activate
|
48 |
+
conda activate 2-1-cu121
|
49 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
50 |
+
|
51 |
+
# Get the node names from SLURM
|
52 |
+
if [ -z "${SALLOC_MODE}" ]; then # sbatch mode
|
53 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
54 |
+
|
55 |
+
else # srun mode
|
56 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES`
|
57 |
+
fi
|
58 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
59 |
+
export MASTER_PORT=12356
|
60 |
+
|
61 |
+
# Calculate total number of processes
|
62 |
+
export NNODES=$SLURM_NNODES
|
63 |
+
export GPUS_PER_NODE=8
|
64 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
65 |
+
|
66 |
+
# Set some environment variables for better distributed training
|
67 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
68 |
+
export NCCL_DEBUG=INFO # INFO, WARN
|
69 |
+
# export NCCL_DEBUG_SUBSYS=ALL
|
70 |
+
# export CUDA_LAUNCH_BLOCKING=1
|
71 |
+
|
72 |
+
# Nanotron specific
|
73 |
+
export NANOTRON_BENCHMARK=1
|
74 |
+
export WANDB_MODE=disabled
|
75 |
+
|
76 |
+
# export TORCH_NCCL_USE_COMM_NONBLOCKING=1
|
77 |
+
|
78 |
+
# Trying to avoid hangs
|
79 |
+
export TORCH_NCCL_ASYNC_ERROR_HANDLING=1
|
80 |
+
|
81 |
+
# debug
|
82 |
+
export TORCH_DISTRIBUTED_DEBUG=DETAIL
|
83 |
+
|
84 |
+
# export NCCL_P2P_LEVEL=NVL
|
85 |
+
# export CUDA_LAUNCH_BLOCKING=1
|
86 |
+
# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA
|
87 |
+
# export NCCL_NET_GDR_LEVEL=LOC
|
88 |
+
# Test Script - save as test_comm.sh
|
89 |
+
|
90 |
+
# Test 1 - Force TCP
|
91 |
+
# echo "Running with TCP only..."
|
92 |
+
# export NCCL_P2P_LEVEL=LOC
|
93 |
+
|
94 |
+
# # Match bandwidth patterns
|
95 |
+
# export NCCL_MAX_NCHANNELS=2
|
96 |
+
# export NCCL_MIN_NCHANNELS=2
|
97 |
+
|
98 |
+
|
99 |
+
# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA
|
100 |
+
# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport
|
101 |
+
# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport
|
102 |
+
# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds
|
103 |
+
# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well
|
104 |
+
|
105 |
+
# Force SHM
|
106 |
+
# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode
|
107 |
+
# export NCCL_SOCKET_NTHREADS=1
|
108 |
+
# export FI_PROVIDER="tcp"
|
109 |
+
|
110 |
+
# Print GPU topology information
|
111 |
+
if [ -z "${SALLOC_MODE}" ]; then
|
112 |
+
echo "=== GPU Topology ==="
|
113 |
+
nvidia-smi topo -m
|
114 |
+
echo "=================="
|
115 |
+
export SRUN_ALLOC_ARGS=""
|
116 |
+
else
|
117 |
+
export JOBNAME="bench_1.34G_dp2_tp1_pp8_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k"
|
118 |
+
export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out"
|
119 |
+
export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME"
|
120 |
+
fi
|
121 |
+
|
122 |
+
|
123 |
+
# Print some debugging information
|
124 |
+
echo "Master node: $MASTER_NODE"
|
125 |
+
echo "All nodes: $NODELIST"
|
126 |
+
echo "World size: $WORLD_SIZE"
|
127 |
+
|
128 |
+
# Launch the training script using srun in background
|
129 |
+
if [ -n "${SALLOC_MODE}" ]; then # srun mode
|
130 |
+
srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \
|
131 |
+
--nnodes=$NNODES \
|
132 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
133 |
+
--rdzv_id=$SLURM_JOB_ID \
|
134 |
+
--rdzv_backend=c10d \
|
135 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
136 |
+
--max_restarts 0 \
|
137 |
+
--rdzv_conf timeout=60 \
|
138 |
+
/fsx/nouamane/projects/nanotron/run_train.py \
|
139 |
+
--config-file benchmark/configs/config_1.34G_dp2_tp1_pp8_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 &
|
140 |
+
# Store the process ID
|
141 |
+
SRUN_PID=$!
|
142 |
+
echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE
|
143 |
+
|
144 |
+
# Optionally, you can add:
|
145 |
+
echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE
|
146 |
+
echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE
|
147 |
+
|
148 |
+
else # sbatch mode
|
149 |
+
srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \
|
150 |
+
--nnodes=$NNODES \
|
151 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
152 |
+
--rdzv_id=$SLURM_JOB_ID \
|
153 |
+
--rdzv_backend=c10d \
|
154 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
155 |
+
--max_restarts 0 \
|
156 |
+
--rdzv_conf timeout=60 \
|
157 |
+
/fsx/nouamane/projects/nanotron/run_train.py \
|
158 |
+
--config-file benchmark/configs/config_1.34G_dp2_tp1_pp8_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml
|
159 |
+
fi
|
scripts/run_1.34G_dp2_tp256_pp1_acc16_mbs1_seq32768_zero1_tpmodeALL_vocab131k.sh
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=bench_1.34G_dp2_tp256_pp1_acc16_mbs1_seq32768_zero1_tpmodeALL_vocab131k # Job name
|
4 |
+
#SBATCH --time=00:02:00
|
5 |
+
#SBATCH --partition=hopper-prod
|
6 |
+
#SBATCH --qos=high
|
7 |
+
|
8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
9 |
+
|
10 |
+
#SBATCH --nodes=64 # Number of nodes (modify as needed)
|
11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
15 |
+
|
16 |
+
set -x -e
|
17 |
+
|
18 |
+
# Load any necessary modules for your system
|
19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
20 |
+
module load cuda/12.1
|
21 |
+
|
22 |
+
# Activate your conda environment if needed
|
23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
24 |
+
conda activate 2-1-cu121
|
25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
26 |
+
|
27 |
+
# Get the node names from SLURM
|
28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
30 |
+
export MASTER_PORT=12356
|
31 |
+
|
32 |
+
# Calculate total number of processes
|
33 |
+
export NNODES=$SLURM_NNODES
|
34 |
+
export GPUS_PER_NODE=8
|
35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
36 |
+
|
37 |
+
# Set some environment variables for better distributed training
|
38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
39 |
+
# export NCCL_DEBUG=INFO
|
40 |
+
|
41 |
+
# Nanotron specific
|
42 |
+
export NANOTRON_BENCHMARK=1
|
43 |
+
|
44 |
+
# # Disable EFA by changing the provider to tcp
|
45 |
+
# export FI_PROVIDER=tcp
|
46 |
+
|
47 |
+
# # Optionally, you can also unset these EFA-related variables
|
48 |
+
# unset FI_EFA_FORK_SAFE
|
49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
50 |
+
|
51 |
+
# # If you want to ensure NCCL uses TCP
|
52 |
+
# export NCCL_IB_DISABLE=1
|
53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
54 |
+
|
55 |
+
# Print some debugging information
|
56 |
+
echo "Master node: $MASTER_NODE"
|
57 |
+
echo "All nodes: $NODELIST"
|
58 |
+
echo "World size: $WORLD_SIZE"
|
59 |
+
|
60 |
+
# Launch the training script using srun
|
61 |
+
srun torchrun \
|
62 |
+
--nnodes=$NNODES \
|
63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
65 |
+
--rdzv_backend=c10d \
|
66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
67 |
+
run_train.py \
|
68 |
+
--config-file benchmark/configs/config_1.34G_dp2_tp256_pp1_acc16_mbs1_seq32768_zero1_tpmodeALL_vocab131k.yaml
|
scripts/run_1.34G_dp2_tp256_pp1_acc8_mbs32_seq2048_zero1_tpmodeRED_vocab131k.sh
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=bench_1.34G_dp2_tp256_pp1_acc8_mbs32_seq2048_zero1_tpmodeRED_vocab131k # Job name
|
4 |
+
#SBATCH --time=00:02:00
|
5 |
+
#SBATCH --partition=hopper-prod
|
6 |
+
#SBATCH --qos=high
|
7 |
+
|
8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
9 |
+
|
10 |
+
#SBATCH --nodes=64 # Number of nodes (modify as needed)
|
11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
15 |
+
|
16 |
+
set -x -e
|
17 |
+
|
18 |
+
# Load any necessary modules for your system
|
19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
20 |
+
module load cuda/12.1
|
21 |
+
|
22 |
+
# Activate your conda environment if needed
|
23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
24 |
+
conda activate 2-1-cu121
|
25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
26 |
+
|
27 |
+
# Get the node names from SLURM
|
28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
30 |
+
export MASTER_PORT=12356
|
31 |
+
|
32 |
+
# Calculate total number of processes
|
33 |
+
export NNODES=$SLURM_NNODES
|
34 |
+
export GPUS_PER_NODE=8
|
35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
36 |
+
|
37 |
+
# Set some environment variables for better distributed training
|
38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
39 |
+
# export NCCL_DEBUG=INFO
|
40 |
+
|
41 |
+
# Nanotron specific
|
42 |
+
export NANOTRON_BENCHMARK=1
|
43 |
+
|
44 |
+
# # Disable EFA by changing the provider to tcp
|
45 |
+
# export FI_PROVIDER=tcp
|
46 |
+
|
47 |
+
# # Optionally, you can also unset these EFA-related variables
|
48 |
+
# unset FI_EFA_FORK_SAFE
|
49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
50 |
+
|
51 |
+
# # If you want to ensure NCCL uses TCP
|
52 |
+
# export NCCL_IB_DISABLE=1
|
53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
54 |
+
|
55 |
+
# Print some debugging information
|
56 |
+
echo "Master node: $MASTER_NODE"
|
57 |
+
echo "All nodes: $NODELIST"
|
58 |
+
echo "World size: $WORLD_SIZE"
|
59 |
+
|
60 |
+
# Launch the training script using srun
|
61 |
+
srun torchrun \
|
62 |
+
--nnodes=$NNODES \
|
63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
65 |
+
--rdzv_backend=c10d \
|
66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
67 |
+
run_train.py \
|
68 |
+
--config-file benchmark/configs/config_1.34G_dp2_tp256_pp1_acc8_mbs32_seq2048_zero1_tpmodeRED_vocab131k.yaml
|
scripts/run_1.34G_dp2_tp64_pp1_acc1_mbs64_seq32768_zero1_tpmodeRED_vocab131k.sh
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=bench_1.34G_dp2_tp64_pp1_acc1_mbs64_seq32768_zero1_tpmodeRED_vocab131k # Job name
|
4 |
+
#SBATCH --time=00:02:00
|
5 |
+
#SBATCH --partition=hopper-prod
|
6 |
+
#SBATCH --qos=high
|
7 |
+
|
8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
9 |
+
|
10 |
+
#SBATCH --nodes=16 # Number of nodes (modify as needed)
|
11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
15 |
+
|
16 |
+
set -x -e
|
17 |
+
|
18 |
+
# Load any necessary modules for your system
|
19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
20 |
+
module load cuda/12.1
|
21 |
+
|
22 |
+
# Activate your conda environment if needed
|
23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
24 |
+
conda activate 2-1-cu121
|
25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
26 |
+
|
27 |
+
# Get the node names from SLURM
|
28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
30 |
+
export MASTER_PORT=12356
|
31 |
+
|
32 |
+
# Calculate total number of processes
|
33 |
+
export NNODES=$SLURM_NNODES
|
34 |
+
export GPUS_PER_NODE=8
|
35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
36 |
+
|
37 |
+
# Set some environment variables for better distributed training
|
38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
39 |
+
# export NCCL_DEBUG=INFO
|
40 |
+
|
41 |
+
# Nanotron specific
|
42 |
+
export NANOTRON_BENCHMARK=1
|
43 |
+
|
44 |
+
# # Disable EFA by changing the provider to tcp
|
45 |
+
# export FI_PROVIDER=tcp
|
46 |
+
|
47 |
+
# # Optionally, you can also unset these EFA-related variables
|
48 |
+
# unset FI_EFA_FORK_SAFE
|
49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
50 |
+
|
51 |
+
# # If you want to ensure NCCL uses TCP
|
52 |
+
# export NCCL_IB_DISABLE=1
|
53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
54 |
+
|
55 |
+
# Print some debugging information
|
56 |
+
echo "Master node: $MASTER_NODE"
|
57 |
+
echo "All nodes: $NODELIST"
|
58 |
+
echo "World size: $WORLD_SIZE"
|
59 |
+
|
60 |
+
# Launch the training script using srun
|
61 |
+
srun torchrun \
|
62 |
+
--nnodes=$NNODES \
|
63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
65 |
+
--rdzv_backend=c10d \
|
66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
67 |
+
run_train.py \
|
68 |
+
--config-file benchmark/configs/config_1.34G_dp2_tp64_pp1_acc1_mbs64_seq32768_zero1_tpmodeRED_vocab131k.yaml
|
scripts/run_1.34G_dp32_tp2_pp1_acc4_mbs16_seq2048_zero1_tpmodeRED_vocab131k.sh
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=bench_1.34G_dp32_tp2_pp1_acc4_mbs16_seq2048_zero1_tpmodeRED_vocab131k # Job name
|
4 |
+
#SBATCH --time=00:02:00
|
5 |
+
#SBATCH --partition=hopper-prod
|
6 |
+
#SBATCH --qos=high
|
7 |
+
|
8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
9 |
+
|
10 |
+
#SBATCH --nodes=8 # Number of nodes (modify as needed)
|
11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
15 |
+
|
16 |
+
set -x -e
|
17 |
+
|
18 |
+
# Load any necessary modules for your system
|
19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
20 |
+
module load cuda/12.1
|
21 |
+
|
22 |
+
# Activate your conda environment if needed
|
23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
24 |
+
conda activate 2-1-cu121
|
25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
26 |
+
|
27 |
+
# Get the node names from SLURM
|
28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
30 |
+
export MASTER_PORT=12356
|
31 |
+
|
32 |
+
# Calculate total number of processes
|
33 |
+
export NNODES=$SLURM_NNODES
|
34 |
+
export GPUS_PER_NODE=8
|
35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
36 |
+
|
37 |
+
# Set some environment variables for better distributed training
|
38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
39 |
+
# export NCCL_DEBUG=INFO
|
40 |
+
|
41 |
+
# Nanotron specific
|
42 |
+
export NANOTRON_BENCHMARK=1
|
43 |
+
|
44 |
+
# # Disable EFA by changing the provider to tcp
|
45 |
+
# export FI_PROVIDER=tcp
|
46 |
+
|
47 |
+
# # Optionally, you can also unset these EFA-related variables
|
48 |
+
# unset FI_EFA_FORK_SAFE
|
49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
50 |
+
|
51 |
+
# # If you want to ensure NCCL uses TCP
|
52 |
+
# export NCCL_IB_DISABLE=1
|
53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
54 |
+
|
55 |
+
# Print some debugging information
|
56 |
+
echo "Master node: $MASTER_NODE"
|
57 |
+
echo "All nodes: $NODELIST"
|
58 |
+
echo "World size: $WORLD_SIZE"
|
59 |
+
|
60 |
+
# Launch the training script using srun
|
61 |
+
srun torchrun \
|
62 |
+
--nnodes=$NNODES \
|
63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
65 |
+
--rdzv_backend=c10d \
|
66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
67 |
+
run_train.py \
|
68 |
+
--config-file benchmark/configs/config_1.34G_dp32_tp2_pp1_acc4_mbs16_seq2048_zero1_tpmodeRED_vocab131k.yaml
|
scripts/run_1.34G_dp32_tp4_pp1_acc8_mbs1_seq4096_zero0_tpmodeRED_vocab131k.sh
ADDED
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
#SBATCH --job-name=bench_1.34G_dp32_tp4_pp1_acc8_mbs1_seq4096_zero0_tpmodeRED_vocab131k # Job name
|
3 |
+
#SBATCH --time=01:10:00
|
4 |
+
#SBATCH --partition=hopper-prod
|
5 |
+
#SBATCH --qos=high
|
6 |
+
|
7 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
8 |
+
|
9 |
+
#SBATCH --nodes=16 # Number of nodes (modify as needed)
|
10 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
11 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
12 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
13 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
14 |
+
#SBATCH --wait-all-nodes=1 # fail if any node is not ready
|
15 |
+
|
16 |
+
# run using
|
17 |
+
# sbatch --nodes=1 run_multinode.sh
|
18 |
+
# or
|
19 |
+
# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh
|
20 |
+
|
21 |
+
set -x -e
|
22 |
+
|
23 |
+
# If not running under SLURM, set default SLURM environment variables
|
24 |
+
if [ -z "${SLURM_JOB_ID}" ]; then
|
25 |
+
if [ -z "${SALLOC_JOBID}" ]; then
|
26 |
+
echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session."
|
27 |
+
exit 1
|
28 |
+
fi
|
29 |
+
if [ -z "${NNODES}" ]; then
|
30 |
+
echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session."
|
31 |
+
exit 1
|
32 |
+
fi
|
33 |
+
export SALLOC_MODE=1
|
34 |
+
export SLURM_JOB_ID=$SALLOC_JOBID
|
35 |
+
export SLURM_NNODES=$NNODES
|
36 |
+
export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N")
|
37 |
+
fi
|
38 |
+
|
39 |
+
# Load any necessary modules for your system
|
40 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
41 |
+
module load cuda/12.1
|
42 |
+
|
43 |
+
# Activate your conda environment if needed
|
44 |
+
source /fsx/nouamane/miniconda/bin/activate
|
45 |
+
conda activate 2-1-cu121
|
46 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
47 |
+
|
48 |
+
# Get the node names from SLURM
|
49 |
+
if [ -z "${SALLOC_MODE}" ]; then # sbatch mode
|
50 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
51 |
+
|
52 |
+
else # srun mode
|
53 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES`
|
54 |
+
fi
|
55 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
56 |
+
export MASTER_PORT=12356
|
57 |
+
|
58 |
+
# Calculate total number of processes
|
59 |
+
export NNODES=$SLURM_NNODES
|
60 |
+
export GPUS_PER_NODE=8
|
61 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
62 |
+
|
63 |
+
# Set some environment variables for better distributed training
|
64 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
65 |
+
export NCCL_DEBUG=WARN # INFO
|
66 |
+
|
67 |
+
# Nanotron specific
|
68 |
+
export NANOTRON_BENCHMARK=1
|
69 |
+
export WANDB_MODE=disabled
|
70 |
+
|
71 |
+
# Trying to avoid hangs
|
72 |
+
export TORCH_NCCL_ASYNC_ERROR_HANDLING=1
|
73 |
+
|
74 |
+
|
75 |
+
# Print GPU topology information
|
76 |
+
if [ -z "${SALLOC_MODE}" ]; then
|
77 |
+
echo "=== GPU Topology ==="
|
78 |
+
nvidia-smi topo -m
|
79 |
+
echo "=================="
|
80 |
+
export SRUN_ALLOC_ARGS=""
|
81 |
+
else
|
82 |
+
export JOBNAME="bench_1.34G_dp32_tp4_pp1_acc8_mbs1_seq4096_zero0_tpmodeRED_vocab131k"
|
83 |
+
export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out"
|
84 |
+
export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME"
|
85 |
+
fi
|
86 |
+
|
87 |
+
|
88 |
+
# Print some debugging information
|
89 |
+
echo "Master node: $MASTER_NODE"
|
90 |
+
echo "All nodes: $NODELIST"
|
91 |
+
echo "World size: $WORLD_SIZE"
|
92 |
+
|
93 |
+
# Launch the training script using srun in background
|
94 |
+
if [ -n "${SALLOC_MODE}" ]; then # srun mode
|
95 |
+
srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \
|
96 |
+
--nnodes=$NNODES \
|
97 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
98 |
+
--rdzv_id=$SLURM_JOB_ID \
|
99 |
+
--rdzv_backend=c10d \
|
100 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
101 |
+
--max_restarts 0 \
|
102 |
+
--rdzv_conf timeout=60 \
|
103 |
+
/fsx/nouamane/projects/nanotron/run_train.py \
|
104 |
+
--config-file benchmark/configs/config_1.34G_dp32_tp4_pp1_acc8_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 &
|
105 |
+
# Store the process ID
|
106 |
+
SRUN_PID=$!
|
107 |
+
echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE
|
108 |
+
|
109 |
+
# Optionally, you can add:
|
110 |
+
echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE
|
111 |
+
echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE
|
112 |
+
|
113 |
+
else # sbatch mode
|
114 |
+
srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \
|
115 |
+
--nnodes=$NNODES \
|
116 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
117 |
+
--rdzv_id=$SLURM_JOB_ID \
|
118 |
+
--rdzv_backend=c10d \
|
119 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
120 |
+
--max_restarts 0 \
|
121 |
+
--rdzv_conf timeout=60 \
|
122 |
+
/fsx/nouamane/projects/nanotron/run_train.py \
|
123 |
+
--config-file benchmark/configs/config_1.34G_dp32_tp4_pp1_acc8_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml
|
124 |
+
fi
|
scripts/run_1.34G_dp4_tp16_pp1_acc32_mbs16_seq2048_zero1_tpmodeRED_vocab131k.sh
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=bench_1.34G_dp4_tp16_pp1_acc32_mbs16_seq2048_zero1_tpmodeRED_vocab131k # Job name
|
4 |
+
#SBATCH --time=00:02:00
|
5 |
+
#SBATCH --partition=hopper-prod
|
6 |
+
#SBATCH --qos=high
|
7 |
+
|
8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
9 |
+
|
10 |
+
#SBATCH --nodes=8 # Number of nodes (modify as needed)
|
11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
15 |
+
|
16 |
+
set -x -e
|
17 |
+
|
18 |
+
# Load any necessary modules for your system
|
19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
20 |
+
module load cuda/12.1
|
21 |
+
|
22 |
+
# Activate your conda environment if needed
|
23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
24 |
+
conda activate 2-1-cu121
|
25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
26 |
+
|
27 |
+
# Get the node names from SLURM
|
28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
30 |
+
export MASTER_PORT=12356
|
31 |
+
|
32 |
+
# Calculate total number of processes
|
33 |
+
export NNODES=$SLURM_NNODES
|
34 |
+
export GPUS_PER_NODE=8
|
35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
36 |
+
|
37 |
+
# Set some environment variables for better distributed training
|
38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
39 |
+
# export NCCL_DEBUG=INFO
|
40 |
+
|
41 |
+
# Nanotron specific
|
42 |
+
export NANOTRON_BENCHMARK=1
|
43 |
+
|
44 |
+
# # Disable EFA by changing the provider to tcp
|
45 |
+
# export FI_PROVIDER=tcp
|
46 |
+
|
47 |
+
# # Optionally, you can also unset these EFA-related variables
|
48 |
+
# unset FI_EFA_FORK_SAFE
|
49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
50 |
+
|
51 |
+
# # If you want to ensure NCCL uses TCP
|
52 |
+
# export NCCL_IB_DISABLE=1
|
53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
54 |
+
|
55 |
+
# Print some debugging information
|
56 |
+
echo "Master node: $MASTER_NODE"
|
57 |
+
echo "All nodes: $NODELIST"
|
58 |
+
echo "World size: $WORLD_SIZE"
|
59 |
+
|
60 |
+
# Launch the training script using srun
|
61 |
+
srun torchrun \
|
62 |
+
--nnodes=$NNODES \
|
63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
65 |
+
--rdzv_backend=c10d \
|
66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
67 |
+
run_train.py \
|
68 |
+
--config-file benchmark/configs/config_1.34G_dp4_tp16_pp1_acc32_mbs16_seq2048_zero1_tpmodeRED_vocab131k.yaml
|
scripts/run_1.34G_dp4_tp2_pp1_acc4_mbs32_seq8192_zero1_tpmodeRED_vocab131k.sh
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=bench_1.34G_dp4_tp2_pp1_acc4_mbs32_seq8192_zero1_tpmodeRED_vocab131k # Job name
|
4 |
+
#SBATCH --time=00:02:00
|
5 |
+
#SBATCH --partition=hopper-prod
|
6 |
+
#SBATCH --qos=high
|
7 |
+
|
8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
9 |
+
|
10 |
+
#SBATCH --nodes=1 # Number of nodes (modify as needed)
|
11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
15 |
+
|
16 |
+
set -x -e
|
17 |
+
|
18 |
+
# Load any necessary modules for your system
|
19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
20 |
+
module load cuda/12.1
|
21 |
+
|
22 |
+
# Activate your conda environment if needed
|
23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
24 |
+
conda activate 2-1-cu121
|
25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
26 |
+
|
27 |
+
# Get the node names from SLURM
|
28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
30 |
+
export MASTER_PORT=12356
|
31 |
+
|
32 |
+
# Calculate total number of processes
|
33 |
+
export NNODES=$SLURM_NNODES
|
34 |
+
export GPUS_PER_NODE=8
|
35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
36 |
+
|
37 |
+
# Set some environment variables for better distributed training
|
38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
39 |
+
# export NCCL_DEBUG=INFO
|
40 |
+
|
41 |
+
# Nanotron specific
|
42 |
+
export NANOTRON_BENCHMARK=1
|
43 |
+
|
44 |
+
# # Disable EFA by changing the provider to tcp
|
45 |
+
# export FI_PROVIDER=tcp
|
46 |
+
|
47 |
+
# # Optionally, you can also unset these EFA-related variables
|
48 |
+
# unset FI_EFA_FORK_SAFE
|
49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
50 |
+
|
51 |
+
# # If you want to ensure NCCL uses TCP
|
52 |
+
# export NCCL_IB_DISABLE=1
|
53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
54 |
+
|
55 |
+
# Print some debugging information
|
56 |
+
echo "Master node: $MASTER_NODE"
|
57 |
+
echo "All nodes: $NODELIST"
|
58 |
+
echo "World size: $WORLD_SIZE"
|
59 |
+
|
60 |
+
# Launch the training script using srun
|
61 |
+
srun torchrun \
|
62 |
+
--nnodes=$NNODES \
|
63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
65 |
+
--rdzv_backend=c10d \
|
66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
67 |
+
run_train.py \
|
68 |
+
--config-file benchmark/configs/config_1.34G_dp4_tp2_pp1_acc4_mbs32_seq8192_zero1_tpmodeRED_vocab131k.yaml
|
scripts/run_1.34G_dp4_tp8_pp1_acc128_mbs4_seq2048_zero1_tpmodeRED_vocab131k.sh
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=bench_1.34G_dp4_tp8_pp1_acc128_mbs4_seq2048_zero1_tpmodeRED_vocab131k # Job name
|
4 |
+
#SBATCH --time=00:02:00
|
5 |
+
#SBATCH --partition=hopper-prod
|
6 |
+
#SBATCH --qos=high
|
7 |
+
|
8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
9 |
+
|
10 |
+
#SBATCH --nodes=4 # Number of nodes (modify as needed)
|
11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
15 |
+
|
16 |
+
set -x -e
|
17 |
+
|
18 |
+
# Load any necessary modules for your system
|
19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
20 |
+
module load cuda/12.1
|
21 |
+
|
22 |
+
# Activate your conda environment if needed
|
23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
24 |
+
conda activate 2-1-cu121
|
25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
26 |
+
|
27 |
+
# Get the node names from SLURM
|
28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
30 |
+
export MASTER_PORT=12356
|
31 |
+
|
32 |
+
# Calculate total number of processes
|
33 |
+
export NNODES=$SLURM_NNODES
|
34 |
+
export GPUS_PER_NODE=8
|
35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
36 |
+
|
37 |
+
# Set some environment variables for better distributed training
|
38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
39 |
+
# export NCCL_DEBUG=INFO
|
40 |
+
|
41 |
+
# Nanotron specific
|
42 |
+
export NANOTRON_BENCHMARK=1
|
43 |
+
|
44 |
+
# # Disable EFA by changing the provider to tcp
|
45 |
+
# export FI_PROVIDER=tcp
|
46 |
+
|
47 |
+
# # Optionally, you can also unset these EFA-related variables
|
48 |
+
# unset FI_EFA_FORK_SAFE
|
49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
50 |
+
|
51 |
+
# # If you want to ensure NCCL uses TCP
|
52 |
+
# export NCCL_IB_DISABLE=1
|
53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
54 |
+
|
55 |
+
# Print some debugging information
|
56 |
+
echo "Master node: $MASTER_NODE"
|
57 |
+
echo "All nodes: $NODELIST"
|
58 |
+
echo "World size: $WORLD_SIZE"
|
59 |
+
|
60 |
+
# Launch the training script using srun
|
61 |
+
srun torchrun \
|
62 |
+
--nnodes=$NNODES \
|
63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
65 |
+
--rdzv_backend=c10d \
|
66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
67 |
+
run_train.py \
|
68 |
+
--config-file benchmark/configs/config_1.34G_dp4_tp8_pp1_acc128_mbs4_seq2048_zero1_tpmodeRED_vocab131k.yaml
|
scripts/run_1.34G_dp64_tp1_pp2_acc4_mbs1_seq4096_zero1_tpmodeRED_vocab131k.sh
ADDED
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
#SBATCH --job-name=bench_1.34G_dp64_tp1_pp2_acc4_mbs1_seq4096_zero1_tpmodeRED_vocab131k # Job name
|
3 |
+
#SBATCH --time=01:10:00
|
4 |
+
#SBATCH --partition=hopper-prod
|
5 |
+
#SBATCH --qos=high
|
6 |
+
|
7 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
8 |
+
|
9 |
+
#SBATCH --nodes=16 # Number of nodes (modify as needed)
|
10 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
11 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
12 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
13 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
14 |
+
#SBATCH --wait-all-nodes=1 # fail if any node is not ready
|
15 |
+
|
16 |
+
# run using
|
17 |
+
# sbatch --nodes=1 run_multinode.sh
|
18 |
+
# or
|
19 |
+
# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh
|
20 |
+
|
21 |
+
set -x -e
|
22 |
+
|
23 |
+
# If not running under SLURM, set default SLURM environment variables
|
24 |
+
if [ -z "${SLURM_JOB_ID}" ]; then
|
25 |
+
if [ -z "${SALLOC_JOBID}" ]; then
|
26 |
+
echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session."
|
27 |
+
exit 1
|
28 |
+
fi
|
29 |
+
if [ -z "${NNODES}" ]; then
|
30 |
+
echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session."
|
31 |
+
exit 1
|
32 |
+
fi
|
33 |
+
export SALLOC_MODE=1
|
34 |
+
export SLURM_JOB_ID=$SALLOC_JOBID
|
35 |
+
export SLURM_NNODES=$NNODES
|
36 |
+
export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N")
|
37 |
+
fi
|
38 |
+
|
39 |
+
# Load any necessary modules for your system
|
40 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
41 |
+
module load cuda/12.1
|
42 |
+
# Unset FI_PROVIDER to avoid potential libfabric provider issues
|
43 |
+
# unset FI_PROVIDER
|
44 |
+
|
45 |
+
|
46 |
+
# Activate your conda environment if needed
|
47 |
+
source /fsx/nouamane/miniconda/bin/activate
|
48 |
+
conda activate 2-1-cu121
|
49 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
50 |
+
|
51 |
+
# Get the node names from SLURM
|
52 |
+
if [ -z "${SALLOC_MODE}" ]; then # sbatch mode
|
53 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
54 |
+
|
55 |
+
else # srun mode
|
56 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES`
|
57 |
+
fi
|
58 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
59 |
+
export MASTER_PORT=12356
|
60 |
+
|
61 |
+
# Calculate total number of processes
|
62 |
+
export NNODES=$SLURM_NNODES
|
63 |
+
export GPUS_PER_NODE=8
|
64 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
65 |
+
|
66 |
+
# Set some environment variables for better distributed training
|
67 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
68 |
+
export NCCL_DEBUG=INFO # INFO, WARN
|
69 |
+
# export NCCL_DEBUG_SUBSYS=ALL
|
70 |
+
# export CUDA_LAUNCH_BLOCKING=1
|
71 |
+
|
72 |
+
# Nanotron specific
|
73 |
+
export NANOTRON_BENCHMARK=1
|
74 |
+
export WANDB_MODE=disabled
|
75 |
+
|
76 |
+
# export TORCH_NCCL_USE_COMM_NONBLOCKING=1
|
77 |
+
|
78 |
+
# Trying to avoid hangs
|
79 |
+
export TORCH_NCCL_ASYNC_ERROR_HANDLING=1
|
80 |
+
|
81 |
+
# debug
|
82 |
+
export TORCH_DISTRIBUTED_DEBUG=DETAIL
|
83 |
+
|
84 |
+
# export NCCL_P2P_LEVEL=NVL
|
85 |
+
# export CUDA_LAUNCH_BLOCKING=1
|
86 |
+
# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA
|
87 |
+
# export NCCL_NET_GDR_LEVEL=LOC
|
88 |
+
# Test Script - save as test_comm.sh
|
89 |
+
|
90 |
+
# Test 1 - Force TCP
|
91 |
+
# echo "Running with TCP only..."
|
92 |
+
# export NCCL_P2P_LEVEL=LOC
|
93 |
+
|
94 |
+
# # Match bandwidth patterns
|
95 |
+
# export NCCL_MAX_NCHANNELS=2
|
96 |
+
# export NCCL_MIN_NCHANNELS=2
|
97 |
+
|
98 |
+
|
99 |
+
# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA
|
100 |
+
# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport
|
101 |
+
# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport
|
102 |
+
# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds
|
103 |
+
# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well
|
104 |
+
|
105 |
+
# Force SHM
|
106 |
+
# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode
|
107 |
+
# export NCCL_SOCKET_NTHREADS=1
|
108 |
+
# export FI_PROVIDER="tcp"
|
109 |
+
|
110 |
+
# Print GPU topology information
|
111 |
+
if [ -z "${SALLOC_MODE}" ]; then
|
112 |
+
echo "=== GPU Topology ==="
|
113 |
+
nvidia-smi topo -m
|
114 |
+
echo "=================="
|
115 |
+
export SRUN_ALLOC_ARGS=""
|
116 |
+
else
|
117 |
+
export JOBNAME="bench_1.34G_dp64_tp1_pp2_acc4_mbs1_seq4096_zero1_tpmodeRED_vocab131k"
|
118 |
+
export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out"
|
119 |
+
export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME"
|
120 |
+
fi
|
121 |
+
|
122 |
+
|
123 |
+
# Print some debugging information
|
124 |
+
echo "Master node: $MASTER_NODE"
|
125 |
+
echo "All nodes: $NODELIST"
|
126 |
+
echo "World size: $WORLD_SIZE"
|
127 |
+
|
128 |
+
# Launch the training script using srun in background
|
129 |
+
if [ -n "${SALLOC_MODE}" ]; then # srun mode
|
130 |
+
srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \
|
131 |
+
--nnodes=$NNODES \
|
132 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
133 |
+
--rdzv_id=$SLURM_JOB_ID \
|
134 |
+
--rdzv_backend=c10d \
|
135 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
136 |
+
--max_restarts 0 \
|
137 |
+
--rdzv_conf timeout=60 \
|
138 |
+
/fsx/nouamane/projects/nanotron/run_train.py \
|
139 |
+
--config-file benchmark/configs/config_1.34G_dp64_tp1_pp2_acc4_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 &
|
140 |
+
# Store the process ID
|
141 |
+
SRUN_PID=$!
|
142 |
+
echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE
|
143 |
+
|
144 |
+
# Optionally, you can add:
|
145 |
+
echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE
|
146 |
+
echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE
|
147 |
+
|
148 |
+
else # sbatch mode
|
149 |
+
srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \
|
150 |
+
--nnodes=$NNODES \
|
151 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
152 |
+
--rdzv_id=$SLURM_JOB_ID \
|
153 |
+
--rdzv_backend=c10d \
|
154 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
155 |
+
--max_restarts 0 \
|
156 |
+
--rdzv_conf timeout=60 \
|
157 |
+
/fsx/nouamane/projects/nanotron/run_train.py \
|
158 |
+
--config-file benchmark/configs/config_1.34G_dp64_tp1_pp2_acc4_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml
|
159 |
+
fi
|
scripts/run_1.34G_dp64_tp8_pp1_acc1_mbs8_seq2048_zero1_tpmodeRED_vocab131k.sh
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=bench_1.34G_dp64_tp8_pp1_acc1_mbs8_seq2048_zero1_tpmodeRED_vocab131k # Job name
|
4 |
+
#SBATCH --time=00:02:00
|
5 |
+
#SBATCH --partition=hopper-prod
|
6 |
+
#SBATCH --qos=high
|
7 |
+
|
8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
9 |
+
|
10 |
+
#SBATCH --nodes=64 # Number of nodes (modify as needed)
|
11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
15 |
+
|
16 |
+
set -x -e
|
17 |
+
|
18 |
+
# Load any necessary modules for your system
|
19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
20 |
+
module load cuda/12.1
|
21 |
+
|
22 |
+
# Activate your conda environment if needed
|
23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
24 |
+
conda activate 2-1-cu121
|
25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
26 |
+
|
27 |
+
# Get the node names from SLURM
|
28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
30 |
+
export MASTER_PORT=12356
|
31 |
+
|
32 |
+
# Calculate total number of processes
|
33 |
+
export NNODES=$SLURM_NNODES
|
34 |
+
export GPUS_PER_NODE=8
|
35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
36 |
+
|
37 |
+
# Set some environment variables for better distributed training
|
38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
39 |
+
# export NCCL_DEBUG=INFO
|
40 |
+
|
41 |
+
# Nanotron specific
|
42 |
+
export NANOTRON_BENCHMARK=1
|
43 |
+
|
44 |
+
# # Disable EFA by changing the provider to tcp
|
45 |
+
# export FI_PROVIDER=tcp
|
46 |
+
|
47 |
+
# # Optionally, you can also unset these EFA-related variables
|
48 |
+
# unset FI_EFA_FORK_SAFE
|
49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
50 |
+
|
51 |
+
# # If you want to ensure NCCL uses TCP
|
52 |
+
# export NCCL_IB_DISABLE=1
|
53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
54 |
+
|
55 |
+
# Print some debugging information
|
56 |
+
echo "Master node: $MASTER_NODE"
|
57 |
+
echo "All nodes: $NODELIST"
|
58 |
+
echo "World size: $WORLD_SIZE"
|
59 |
+
|
60 |
+
# Launch the training script using srun
|
61 |
+
srun torchrun \
|
62 |
+
--nnodes=$NNODES \
|
63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
65 |
+
--rdzv_backend=c10d \
|
66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
67 |
+
run_train.py \
|
68 |
+
--config-file benchmark/configs/config_1.34G_dp64_tp8_pp1_acc1_mbs8_seq2048_zero1_tpmodeRED_vocab131k.yaml
|
scripts/run_1.34G_dp8_tp32_pp1_acc32_mbs2_seq2048_zero1_tpmodeRED_vocab131k.sh
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=bench_1.34G_dp8_tp32_pp1_acc32_mbs2_seq2048_zero1_tpmodeRED_vocab131k # Job name
|
4 |
+
#SBATCH --time=00:02:00
|
5 |
+
#SBATCH --partition=hopper-prod
|
6 |
+
#SBATCH --qos=high
|
7 |
+
|
8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
9 |
+
|
10 |
+
#SBATCH --nodes=32 # Number of nodes (modify as needed)
|
11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
15 |
+
|
16 |
+
set -x -e
|
17 |
+
|
18 |
+
# Load any necessary modules for your system
|
19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
20 |
+
module load cuda/12.1
|
21 |
+
|
22 |
+
# Activate your conda environment if needed
|
23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
24 |
+
conda activate 2-1-cu121
|
25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
26 |
+
|
27 |
+
# Get the node names from SLURM
|
28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
30 |
+
export MASTER_PORT=12356
|
31 |
+
|
32 |
+
# Calculate total number of processes
|
33 |
+
export NNODES=$SLURM_NNODES
|
34 |
+
export GPUS_PER_NODE=8
|
35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
36 |
+
|
37 |
+
# Set some environment variables for better distributed training
|
38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
39 |
+
# export NCCL_DEBUG=INFO
|
40 |
+
|
41 |
+
# Nanotron specific
|
42 |
+
export NANOTRON_BENCHMARK=1
|
43 |
+
|
44 |
+
# # Disable EFA by changing the provider to tcp
|
45 |
+
# export FI_PROVIDER=tcp
|
46 |
+
|
47 |
+
# # Optionally, you can also unset these EFA-related variables
|
48 |
+
# unset FI_EFA_FORK_SAFE
|
49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
50 |
+
|
51 |
+
# # If you want to ensure NCCL uses TCP
|
52 |
+
# export NCCL_IB_DISABLE=1
|
53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
54 |
+
|
55 |
+
# Print some debugging information
|
56 |
+
echo "Master node: $MASTER_NODE"
|
57 |
+
echo "All nodes: $NODELIST"
|
58 |
+
echo "World size: $WORLD_SIZE"
|
59 |
+
|
60 |
+
# Launch the training script using srun
|
61 |
+
srun torchrun \
|
62 |
+
--nnodes=$NNODES \
|
63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
65 |
+
--rdzv_backend=c10d \
|
66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
67 |
+
run_train.py \
|
68 |
+
--config-file benchmark/configs/config_1.34G_dp8_tp32_pp1_acc32_mbs2_seq2048_zero1_tpmodeRED_vocab131k.yaml
|
scripts/run_187G_dp16_tp4_pp8_acc1_mbs1_seq2048_zero0_tpmodeRED_l126_h16384_heads128.sh
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=bench_187G_dp16_tp4_pp8_acc1_mbs1_seq2048_zero0_tpmodeRED_l126_h16384_heads128 # Job name
|
4 |
+
#SBATCH --time=00:15:00
|
5 |
+
#SBATCH --partition=hopper-prod
|
6 |
+
#SBATCH --qos=high
|
7 |
+
|
8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
9 |
+
|
10 |
+
#SBATCH --nodes=64 # Number of nodes (modify as needed)
|
11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
15 |
+
|
16 |
+
set -x -e
|
17 |
+
|
18 |
+
# Load any necessary modules for your system
|
19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
20 |
+
module load cuda/12.1
|
21 |
+
|
22 |
+
# Activate your conda environment if needed
|
23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
24 |
+
conda activate 2-1-cu121
|
25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
26 |
+
|
27 |
+
# Get the node names from SLURM
|
28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
30 |
+
export MASTER_PORT=12356
|
31 |
+
|
32 |
+
# Calculate total number of processes
|
33 |
+
export NNODES=$SLURM_NNODES
|
34 |
+
export GPUS_PER_NODE=8
|
35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
36 |
+
|
37 |
+
# Set some environment variables for better distributed training
|
38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
39 |
+
# export NCCL_DEBUG=INFO
|
40 |
+
|
41 |
+
# Nanotron specific
|
42 |
+
export NANOTRON_BENCHMARK=1
|
43 |
+
|
44 |
+
# # Disable EFA by changing the provider to tcp
|
45 |
+
# export FI_PROVIDER=tcp
|
46 |
+
|
47 |
+
# # Optionally, you can also unset these EFA-related variables
|
48 |
+
# unset FI_EFA_FORK_SAFE
|
49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
50 |
+
|
51 |
+
# # If you want to ensure NCCL uses TCP
|
52 |
+
# export NCCL_IB_DISABLE=1
|
53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
54 |
+
|
55 |
+
# Print some debugging information
|
56 |
+
echo "Master node: $MASTER_NODE"
|
57 |
+
echo "All nodes: $NODELIST"
|
58 |
+
echo "World size: $WORLD_SIZE"
|
59 |
+
|
60 |
+
# Launch the training script using srun
|
61 |
+
srun torchrun \
|
62 |
+
--nnodes=$NNODES \
|
63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
65 |
+
--rdzv_backend=c10d \
|
66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
67 |
+
run_train.py \
|
68 |
+
--config-file benchmark/configs/config_187G_dp16_tp4_pp8_acc1_mbs1_seq2048_zero0_tpmodeRED_l126_h16384_heads128.yaml
|
scripts/run_187G_dp8_tp2_pp32_acc1_mbs1_seq2048_zero0_tpmodeRED_l126_h16384_heads128.sh
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=bench_187G_dp8_tp2_pp32_acc1_mbs1_seq2048_zero0_tpmodeRED_l126_h16384_heads128 # Job name
|
4 |
+
#SBATCH --time=00:15:00
|
5 |
+
#SBATCH --partition=hopper-prod
|
6 |
+
#SBATCH --qos=high
|
7 |
+
|
8 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
9 |
+
|
10 |
+
#SBATCH --nodes=64 # Number of nodes (modify as needed)
|
11 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
12 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
13 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
14 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
15 |
+
|
16 |
+
set -x -e
|
17 |
+
|
18 |
+
# Load any necessary modules for your system
|
19 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
20 |
+
module load cuda/12.1
|
21 |
+
|
22 |
+
# Activate your conda environment if needed
|
23 |
+
source /fsx/nouamane/miniconda/bin/activate
|
24 |
+
conda activate 2-1-cu121
|
25 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
26 |
+
|
27 |
+
# Get the node names from SLURM
|
28 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
29 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
30 |
+
export MASTER_PORT=12356
|
31 |
+
|
32 |
+
# Calculate total number of processes
|
33 |
+
export NNODES=$SLURM_NNODES
|
34 |
+
export GPUS_PER_NODE=8
|
35 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
36 |
+
|
37 |
+
# Set some environment variables for better distributed training
|
38 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
39 |
+
# export NCCL_DEBUG=INFO
|
40 |
+
|
41 |
+
# Nanotron specific
|
42 |
+
export NANOTRON_BENCHMARK=1
|
43 |
+
|
44 |
+
# # Disable EFA by changing the provider to tcp
|
45 |
+
# export FI_PROVIDER=tcp
|
46 |
+
|
47 |
+
# # Optionally, you can also unset these EFA-related variables
|
48 |
+
# unset FI_EFA_FORK_SAFE
|
49 |
+
# unset FI_EFA_ENABLE_SHM_TRANSFER
|
50 |
+
|
51 |
+
# # If you want to ensure NCCL uses TCP
|
52 |
+
# export NCCL_IB_DISABLE=1
|
53 |
+
# export NCCL_SOCKET_IFNAME=eth0
|
54 |
+
|
55 |
+
# Print some debugging information
|
56 |
+
echo "Master node: $MASTER_NODE"
|
57 |
+
echo "All nodes: $NODELIST"
|
58 |
+
echo "World size: $WORLD_SIZE"
|
59 |
+
|
60 |
+
# Launch the training script using srun
|
61 |
+
srun torchrun \
|
62 |
+
--nnodes=$NNODES \
|
63 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
64 |
+
--rdzv_id=$SLURM_JOB_ID \
|
65 |
+
--rdzv_backend=c10d \
|
66 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
67 |
+
run_train.py \
|
68 |
+
--config-file benchmark/configs/config_187G_dp8_tp2_pp32_acc1_mbs1_seq2048_zero0_tpmodeRED_l126_h16384_heads128.yaml
|
scripts/run_3.57G_dp1_tp16_pp1_acc1_mbs40_seq4096_zero0_tpmodeRED_vocab131k_cache.sh
ADDED
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
#SBATCH --job-name=bench_3.57G_dp1_tp16_pp1_acc1_mbs40_seq4096_zero0_tpmodeRED_vocab131k_cache # Job name
|
3 |
+
#SBATCH --time=00:40:00
|
4 |
+
#SBATCH --partition=hopper-prod
|
5 |
+
#SBATCH --qos=high
|
6 |
+
|
7 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
8 |
+
|
9 |
+
#SBATCH --nodes=2 # Number of nodes (modify as needed)
|
10 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
11 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
12 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
13 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
14 |
+
#SBATCH --wait-all-nodes=1 # fail if any node is not ready
|
15 |
+
|
16 |
+
# run using
|
17 |
+
# sbatch --nodes=1 run_multinode.sh
|
18 |
+
# or
|
19 |
+
# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh
|
20 |
+
|
21 |
+
set -x -e
|
22 |
+
echo "Running script: $0"
|
23 |
+
|
24 |
+
|
25 |
+
# If not running under SLURM, set default SLURM environment variables
|
26 |
+
if [ -z "${SLURM_JOB_ID}" ]; then
|
27 |
+
if [ -z "${SALLOC_JOBID}" ]; then
|
28 |
+
echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session."
|
29 |
+
exit 1
|
30 |
+
fi
|
31 |
+
if [ -z "${NNODES}" ]; then
|
32 |
+
echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session."
|
33 |
+
exit 1
|
34 |
+
fi
|
35 |
+
export SALLOC_MODE=1
|
36 |
+
export SLURM_JOB_ID=$SALLOC_JOBID
|
37 |
+
export SLURM_NNODES=$NNODES
|
38 |
+
export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N")
|
39 |
+
fi
|
40 |
+
|
41 |
+
# Load any necessary modules for your system
|
42 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
43 |
+
module load cuda/12.1
|
44 |
+
# Unset FI_PROVIDER to avoid potential libfabric provider issues
|
45 |
+
# unset FI_PROVIDER
|
46 |
+
|
47 |
+
|
48 |
+
# Activate your conda environment if needed
|
49 |
+
source /fsx/nouamane/miniconda/bin/activate
|
50 |
+
conda activate 2-1-cu121
|
51 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
52 |
+
|
53 |
+
# Get the node names from SLURM
|
54 |
+
if [ -z "${SALLOC_MODE}" ]; then # sbatch mode
|
55 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
56 |
+
|
57 |
+
else # srun mode
|
58 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES`
|
59 |
+
fi
|
60 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
61 |
+
export MASTER_PORT=12356
|
62 |
+
|
63 |
+
# Calculate total number of processes
|
64 |
+
export NNODES=$SLURM_NNODES
|
65 |
+
export GPUS_PER_NODE=8
|
66 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
67 |
+
|
68 |
+
# Set some environment variables for better distributed training
|
69 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
70 |
+
export NCCL_DEBUG=WARN # INFO, WARN
|
71 |
+
# export NCCL_DEBUG_SUBSYS=ALL
|
72 |
+
# export CUDA_LAUNCH_BLOCKING=1
|
73 |
+
|
74 |
+
# Nanotron specific
|
75 |
+
export NANOTRON_BENCHMARK=1
|
76 |
+
export WANDB_MODE=disabled
|
77 |
+
|
78 |
+
# export TORCH_NCCL_USE_COMM_NONBLOCKING=1
|
79 |
+
|
80 |
+
# Trying to avoid hangs
|
81 |
+
export TORCH_NCCL_ASYNC_ERROR_HANDLING=1
|
82 |
+
|
83 |
+
# debug
|
84 |
+
export TORCH_DISTRIBUTED_DEBUG=DETAIL
|
85 |
+
|
86 |
+
# export NCCL_P2P_LEVEL=NVL
|
87 |
+
# export CUDA_LAUNCH_BLOCKING=1
|
88 |
+
# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA
|
89 |
+
# export NCCL_NET_GDR_LEVEL=LOC
|
90 |
+
# Test Script - save as test_comm.sh
|
91 |
+
|
92 |
+
# Test 1 - Force TCP
|
93 |
+
# echo "Running with TCP only..."
|
94 |
+
# export NCCL_P2P_LEVEL=LOC
|
95 |
+
|
96 |
+
# # Match bandwidth patterns
|
97 |
+
# export NCCL_MAX_NCHANNELS=2
|
98 |
+
# export NCCL_MIN_NCHANNELS=2
|
99 |
+
|
100 |
+
|
101 |
+
# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA
|
102 |
+
# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport
|
103 |
+
# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport
|
104 |
+
# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds
|
105 |
+
# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well
|
106 |
+
|
107 |
+
# Force SHM
|
108 |
+
# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode
|
109 |
+
# export NCCL_SOCKET_NTHREADS=1
|
110 |
+
# export FI_PROVIDER="tcp"
|
111 |
+
|
112 |
+
# Print GPU topology information
|
113 |
+
if [ -z "${SALLOC_MODE}" ]; then
|
114 |
+
echo "=== GPU Topology ==="
|
115 |
+
nvidia-smi topo -m
|
116 |
+
echo "=================="
|
117 |
+
export SRUN_ALLOC_ARGS=""
|
118 |
+
else
|
119 |
+
export JOBNAME="bench_3.57G_dp1_tp16_pp1_acc1_mbs40_seq4096_zero0_tpmodeRED_vocab131k_cache"
|
120 |
+
export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out"
|
121 |
+
export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME"
|
122 |
+
fi
|
123 |
+
|
124 |
+
|
125 |
+
# Print some debugging information
|
126 |
+
echo "Master node: $MASTER_NODE"
|
127 |
+
echo "All nodes: $NODELIST"
|
128 |
+
echo "World size: $WORLD_SIZE"
|
129 |
+
|
130 |
+
# Launch the training script using srun in background
|
131 |
+
if [ -n "${SALLOC_MODE}" ]; then # srun mode
|
132 |
+
srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \
|
133 |
+
--nnodes=$NNODES \
|
134 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
135 |
+
--rdzv_id=$SLURM_JOB_ID \
|
136 |
+
--rdzv_backend=c10d \
|
137 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
138 |
+
--max_restarts 0 \
|
139 |
+
--rdzv_conf timeout=60 \
|
140 |
+
/fsx/nouamane/projects/nanotron/run_train.py \
|
141 |
+
--config-file benchmark/configs/config_3.57G_dp1_tp16_pp1_acc1_mbs40_seq4096_zero0_tpmodeRED_vocab131k_cache.yaml > $OUTPUT_FILE 2>&1 &
|
142 |
+
# Store the process ID
|
143 |
+
SRUN_PID=$!
|
144 |
+
echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE
|
145 |
+
|
146 |
+
# Optionally, you can add:
|
147 |
+
echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE
|
148 |
+
echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE
|
149 |
+
|
150 |
+
else # sbatch mode
|
151 |
+
srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \
|
152 |
+
--nnodes=$NNODES \
|
153 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
154 |
+
--rdzv_id=$SLURM_JOB_ID \
|
155 |
+
--rdzv_backend=c10d \
|
156 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
157 |
+
--max_restarts 0 \
|
158 |
+
--rdzv_conf timeout=60 \
|
159 |
+
/fsx/nouamane/projects/nanotron/run_train.py \
|
160 |
+
--config-file benchmark/configs/config_3.57G_dp1_tp16_pp1_acc1_mbs40_seq4096_zero0_tpmodeRED_vocab131k_cache.yaml
|
161 |
+
fi
|
scripts/run_3.57G_dp1_tp1_pp8_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k_cache.sh
ADDED
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
#SBATCH --job-name=bench_3.57G_dp1_tp1_pp8_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k_cache # Job name
|
3 |
+
#SBATCH --time=00:40:00
|
4 |
+
#SBATCH --partition=hopper-prod
|
5 |
+
#SBATCH --qos=high
|
6 |
+
|
7 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
8 |
+
|
9 |
+
#SBATCH --nodes=1 # Number of nodes (modify as needed)
|
10 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
11 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
12 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
13 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
14 |
+
#SBATCH --wait-all-nodes=1 # fail if any node is not ready
|
15 |
+
|
16 |
+
# run using
|
17 |
+
# sbatch --nodes=1 run_multinode.sh
|
18 |
+
# or
|
19 |
+
# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh
|
20 |
+
|
21 |
+
set -x -e
|
22 |
+
echo "Running script: $0"
|
23 |
+
|
24 |
+
|
25 |
+
# If not running under SLURM, set default SLURM environment variables
|
26 |
+
if [ -z "${SLURM_JOB_ID}" ]; then
|
27 |
+
if [ -z "${SALLOC_JOBID}" ]; then
|
28 |
+
echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session."
|
29 |
+
exit 1
|
30 |
+
fi
|
31 |
+
if [ -z "${NNODES}" ]; then
|
32 |
+
echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session."
|
33 |
+
exit 1
|
34 |
+
fi
|
35 |
+
export SALLOC_MODE=1
|
36 |
+
export SLURM_JOB_ID=$SALLOC_JOBID
|
37 |
+
export SLURM_NNODES=$NNODES
|
38 |
+
export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N")
|
39 |
+
fi
|
40 |
+
|
41 |
+
# Load any necessary modules for your system
|
42 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
43 |
+
module load cuda/12.1
|
44 |
+
# Unset FI_PROVIDER to avoid potential libfabric provider issues
|
45 |
+
# unset FI_PROVIDER
|
46 |
+
|
47 |
+
|
48 |
+
# Activate your conda environment if needed
|
49 |
+
source /fsx/nouamane/miniconda/bin/activate
|
50 |
+
conda activate 2-1-cu121
|
51 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
52 |
+
|
53 |
+
# Get the node names from SLURM
|
54 |
+
if [ -z "${SALLOC_MODE}" ]; then # sbatch mode
|
55 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
56 |
+
|
57 |
+
else # srun mode
|
58 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES`
|
59 |
+
fi
|
60 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
61 |
+
export MASTER_PORT=12356
|
62 |
+
|
63 |
+
# Calculate total number of processes
|
64 |
+
export NNODES=$SLURM_NNODES
|
65 |
+
export GPUS_PER_NODE=8
|
66 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
67 |
+
|
68 |
+
# Set some environment variables for better distributed training
|
69 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
70 |
+
export NCCL_DEBUG=WARN # INFO, WARN
|
71 |
+
# export NCCL_DEBUG_SUBSYS=ALL
|
72 |
+
# export CUDA_LAUNCH_BLOCKING=1
|
73 |
+
|
74 |
+
# Nanotron specific
|
75 |
+
export NANOTRON_BENCHMARK=1
|
76 |
+
export WANDB_MODE=disabled
|
77 |
+
|
78 |
+
# export TORCH_NCCL_USE_COMM_NONBLOCKING=1
|
79 |
+
|
80 |
+
# Trying to avoid hangs
|
81 |
+
export TORCH_NCCL_ASYNC_ERROR_HANDLING=1
|
82 |
+
|
83 |
+
# debug
|
84 |
+
export TORCH_DISTRIBUTED_DEBUG=DETAIL
|
85 |
+
|
86 |
+
# export NCCL_P2P_LEVEL=NVL
|
87 |
+
# export CUDA_LAUNCH_BLOCKING=1
|
88 |
+
# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA
|
89 |
+
# export NCCL_NET_GDR_LEVEL=LOC
|
90 |
+
# Test Script - save as test_comm.sh
|
91 |
+
|
92 |
+
# Test 1 - Force TCP
|
93 |
+
# echo "Running with TCP only..."
|
94 |
+
# export NCCL_P2P_LEVEL=LOC
|
95 |
+
|
96 |
+
# # Match bandwidth patterns
|
97 |
+
# export NCCL_MAX_NCHANNELS=2
|
98 |
+
# export NCCL_MIN_NCHANNELS=2
|
99 |
+
|
100 |
+
|
101 |
+
# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA
|
102 |
+
# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport
|
103 |
+
# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport
|
104 |
+
# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds
|
105 |
+
# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well
|
106 |
+
|
107 |
+
# Force SHM
|
108 |
+
# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode
|
109 |
+
# export NCCL_SOCKET_NTHREADS=1
|
110 |
+
# export FI_PROVIDER="tcp"
|
111 |
+
|
112 |
+
# Print GPU topology information
|
113 |
+
if [ -z "${SALLOC_MODE}" ]; then
|
114 |
+
echo "=== GPU Topology ==="
|
115 |
+
nvidia-smi topo -m
|
116 |
+
echo "=================="
|
117 |
+
export SRUN_ALLOC_ARGS=""
|
118 |
+
else
|
119 |
+
export JOBNAME="bench_3.57G_dp1_tp1_pp8_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k_cache"
|
120 |
+
export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out"
|
121 |
+
export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME"
|
122 |
+
fi
|
123 |
+
|
124 |
+
|
125 |
+
# Print some debugging information
|
126 |
+
echo "Master node: $MASTER_NODE"
|
127 |
+
echo "All nodes: $NODELIST"
|
128 |
+
echo "World size: $WORLD_SIZE"
|
129 |
+
|
130 |
+
# Launch the training script using srun in background
|
131 |
+
if [ -n "${SALLOC_MODE}" ]; then # srun mode
|
132 |
+
srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \
|
133 |
+
--nnodes=$NNODES \
|
134 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
135 |
+
--rdzv_id=$SLURM_JOB_ID \
|
136 |
+
--rdzv_backend=c10d \
|
137 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
138 |
+
--max_restarts 0 \
|
139 |
+
--rdzv_conf timeout=60 \
|
140 |
+
/fsx/nouamane/projects/nanotron/run_train.py \
|
141 |
+
--config-file benchmark/configs/config_3.57G_dp1_tp1_pp8_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k_cache.yaml > $OUTPUT_FILE 2>&1 &
|
142 |
+
# Store the process ID
|
143 |
+
SRUN_PID=$!
|
144 |
+
echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE
|
145 |
+
|
146 |
+
# Optionally, you can add:
|
147 |
+
echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE
|
148 |
+
echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE
|
149 |
+
|
150 |
+
else # sbatch mode
|
151 |
+
srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \
|
152 |
+
--nnodes=$NNODES \
|
153 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
154 |
+
--rdzv_id=$SLURM_JOB_ID \
|
155 |
+
--rdzv_backend=c10d \
|
156 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
157 |
+
--max_restarts 0 \
|
158 |
+
--rdzv_conf timeout=60 \
|
159 |
+
/fsx/nouamane/projects/nanotron/run_train.py \
|
160 |
+
--config-file benchmark/configs/config_3.57G_dp1_tp1_pp8_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k_cache.yaml
|
161 |
+
fi
|
scripts/run_3.57G_dp1_tp2_pp8_acc16_mbs16_seq4096_zero0_tpmodeRED_vocab131k.sh
ADDED
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=bench_3.57G_dp1_tp2_pp8_acc16_mbs16_seq4096_zero0_tpmodeRED_vocab131k # Job name
|
4 |
+
#SBATCH --time=00:10:00
|
5 |
+
#SBATCH --partition=hopper-prod
|
6 |
+
#SBATCH --qos=high
|
7 |
+
#SBATCH --exclude=ip-26-0-160-192,ip-26-0-171-102
|
8 |
+
|
9 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
10 |
+
|
11 |
+
#SBATCH --nodes=2 # Number of nodes (modify as needed)
|
12 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
13 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
14 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
15 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
16 |
+
#SBATCH --wait-all-nodes=1 # fail if any node is not ready
|
17 |
+
|
18 |
+
set -x -e
|
19 |
+
|
20 |
+
# Load any necessary modules for your system
|
21 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
22 |
+
module load cuda/12.1
|
23 |
+
|
24 |
+
# Activate your conda environment if needed
|
25 |
+
source /fsx/nouamane/miniconda/bin/activate
|
26 |
+
conda activate 2-1-cu121
|
27 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
28 |
+
|
29 |
+
# Get the node names from SLURM
|
30 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
31 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
32 |
+
export MASTER_PORT=12356
|
33 |
+
|
34 |
+
# Calculate total number of processes
|
35 |
+
export NNODES=$SLURM_NNODES
|
36 |
+
export GPUS_PER_NODE=8
|
37 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
38 |
+
|
39 |
+
# Set some environment variables for better distributed training
|
40 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
41 |
+
export NCCL_DEBUG=WARN # INFO
|
42 |
+
|
43 |
+
# Nanotron specific
|
44 |
+
export NANOTRON_BENCHMARK=1
|
45 |
+
# Disable wandb
|
46 |
+
export WANDB_MODE=disabled
|
47 |
+
|
48 |
+
# Trying to avoid hangs
|
49 |
+
export TORCH_NCCL_ASYNC_ERROR_HANDLING=1
|
50 |
+
|
51 |
+
|
52 |
+
# Print GPU topology information
|
53 |
+
echo "=== GPU Topology ==="
|
54 |
+
nvidia-smi topo -m
|
55 |
+
echo "=================="
|
56 |
+
|
57 |
+
|
58 |
+
# Print some debugging information
|
59 |
+
echo "Master node: $MASTER_NODE"
|
60 |
+
echo "All nodes: $NODELIST"
|
61 |
+
echo "World size: $WORLD_SIZE"
|
62 |
+
|
63 |
+
# Launch the training script using srun
|
64 |
+
srun --wait=0 --kill-on-bad-exit=1 torchrun \
|
65 |
+
--nnodes=$NNODES \
|
66 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
67 |
+
--rdzv_id=$SLURM_JOB_ID \
|
68 |
+
--rdzv_backend=c10d \
|
69 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
70 |
+
--max_restarts 0 \
|
71 |
+
--rdzv_conf timeout=60 \
|
72 |
+
/fsx/nouamane/projects/nanotron/run_train.py \
|
73 |
+
--config-file benchmark/configs/config_3.57G_dp1_tp2_pp8_acc16_mbs16_seq4096_zero0_tpmodeRED_vocab131k.yaml
|
scripts/run_3.57G_dp2_tp16_pp8_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh
ADDED
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
#SBATCH --job-name=bench_3.57G_dp2_tp16_pp8_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k # Job name
|
3 |
+
#SBATCH --time=01:10:00
|
4 |
+
#SBATCH --partition=hopper-prod
|
5 |
+
#SBATCH --qos=high
|
6 |
+
|
7 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
8 |
+
|
9 |
+
#SBATCH --nodes=32 # Number of nodes (modify as needed)
|
10 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
11 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
12 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
13 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
14 |
+
#SBATCH --wait-all-nodes=1 # fail if any node is not ready
|
15 |
+
|
16 |
+
# run using
|
17 |
+
# sbatch --nodes=1 run_multinode.sh
|
18 |
+
# or
|
19 |
+
# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh
|
20 |
+
|
21 |
+
set -x -e
|
22 |
+
|
23 |
+
# If not running under SLURM, set default SLURM environment variables
|
24 |
+
if [ -z "${SLURM_JOB_ID}" ]; then
|
25 |
+
if [ -z "${SALLOC_JOBID}" ]; then
|
26 |
+
echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session."
|
27 |
+
exit 1
|
28 |
+
fi
|
29 |
+
if [ -z "${NNODES}" ]; then
|
30 |
+
echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session."
|
31 |
+
exit 1
|
32 |
+
fi
|
33 |
+
export SALLOC_MODE=1
|
34 |
+
export SLURM_JOB_ID=$SALLOC_JOBID
|
35 |
+
export SLURM_NNODES=$NNODES
|
36 |
+
export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N")
|
37 |
+
fi
|
38 |
+
|
39 |
+
# Load any necessary modules for your system
|
40 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
41 |
+
module load cuda/12.1
|
42 |
+
# Unset FI_PROVIDER to avoid potential libfabric provider issues
|
43 |
+
# unset FI_PROVIDER
|
44 |
+
|
45 |
+
|
46 |
+
# Activate your conda environment if needed
|
47 |
+
source /fsx/nouamane/miniconda/bin/activate
|
48 |
+
conda activate 2-1-cu121
|
49 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
50 |
+
|
51 |
+
# Get the node names from SLURM
|
52 |
+
if [ -z "${SALLOC_MODE}" ]; then # sbatch mode
|
53 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
54 |
+
|
55 |
+
else # srun mode
|
56 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES`
|
57 |
+
fi
|
58 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
59 |
+
export MASTER_PORT=12356
|
60 |
+
|
61 |
+
# Calculate total number of processes
|
62 |
+
export NNODES=$SLURM_NNODES
|
63 |
+
export GPUS_PER_NODE=8
|
64 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
65 |
+
|
66 |
+
# Set some environment variables for better distributed training
|
67 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
68 |
+
export NCCL_DEBUG=INFO # INFO, WARN
|
69 |
+
# export NCCL_DEBUG_SUBSYS=ALL
|
70 |
+
# export CUDA_LAUNCH_BLOCKING=1
|
71 |
+
|
72 |
+
# Nanotron specific
|
73 |
+
export NANOTRON_BENCHMARK=1
|
74 |
+
export WANDB_MODE=disabled
|
75 |
+
|
76 |
+
# export TORCH_NCCL_USE_COMM_NONBLOCKING=1
|
77 |
+
|
78 |
+
# Trying to avoid hangs
|
79 |
+
export TORCH_NCCL_ASYNC_ERROR_HANDLING=1
|
80 |
+
|
81 |
+
# debug
|
82 |
+
export TORCH_DISTRIBUTED_DEBUG=DETAIL
|
83 |
+
|
84 |
+
# export NCCL_P2P_LEVEL=NVL
|
85 |
+
# export CUDA_LAUNCH_BLOCKING=1
|
86 |
+
# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA
|
87 |
+
# export NCCL_NET_GDR_LEVEL=LOC
|
88 |
+
# Test Script - save as test_comm.sh
|
89 |
+
|
90 |
+
# Test 1 - Force TCP
|
91 |
+
# echo "Running with TCP only..."
|
92 |
+
# export NCCL_P2P_LEVEL=LOC
|
93 |
+
|
94 |
+
# # Match bandwidth patterns
|
95 |
+
# export NCCL_MAX_NCHANNELS=2
|
96 |
+
# export NCCL_MIN_NCHANNELS=2
|
97 |
+
|
98 |
+
|
99 |
+
# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA
|
100 |
+
# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport
|
101 |
+
# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport
|
102 |
+
# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds
|
103 |
+
# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well
|
104 |
+
|
105 |
+
# Force SHM
|
106 |
+
# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode
|
107 |
+
# export NCCL_SOCKET_NTHREADS=1
|
108 |
+
# export FI_PROVIDER="tcp"
|
109 |
+
|
110 |
+
# Print GPU topology information
|
111 |
+
if [ -z "${SALLOC_MODE}" ]; then
|
112 |
+
echo "=== GPU Topology ==="
|
113 |
+
nvidia-smi topo -m
|
114 |
+
echo "=================="
|
115 |
+
export SRUN_ALLOC_ARGS=""
|
116 |
+
else
|
117 |
+
export JOBNAME="bench_3.57G_dp2_tp16_pp8_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k"
|
118 |
+
export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out"
|
119 |
+
export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME"
|
120 |
+
fi
|
121 |
+
|
122 |
+
|
123 |
+
# Print some debugging information
|
124 |
+
echo "Master node: $MASTER_NODE"
|
125 |
+
echo "All nodes: $NODELIST"
|
126 |
+
echo "World size: $WORLD_SIZE"
|
127 |
+
|
128 |
+
# Launch the training script using srun in background
|
129 |
+
if [ -n "${SALLOC_MODE}" ]; then # srun mode
|
130 |
+
srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \
|
131 |
+
--nnodes=$NNODES \
|
132 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
133 |
+
--rdzv_id=$SLURM_JOB_ID \
|
134 |
+
--rdzv_backend=c10d \
|
135 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
136 |
+
--max_restarts 0 \
|
137 |
+
--rdzv_conf timeout=60 \
|
138 |
+
/fsx/nouamane/projects/nanotron/run_train.py \
|
139 |
+
--config-file benchmark/configs/config_3.57G_dp2_tp16_pp8_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 &
|
140 |
+
# Store the process ID
|
141 |
+
SRUN_PID=$!
|
142 |
+
echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE
|
143 |
+
|
144 |
+
# Optionally, you can add:
|
145 |
+
echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE
|
146 |
+
echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE
|
147 |
+
|
148 |
+
else # sbatch mode
|
149 |
+
srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \
|
150 |
+
--nnodes=$NNODES \
|
151 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
152 |
+
--rdzv_id=$SLURM_JOB_ID \
|
153 |
+
--rdzv_backend=c10d \
|
154 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
155 |
+
--max_restarts 0 \
|
156 |
+
--rdzv_conf timeout=60 \
|
157 |
+
/fsx/nouamane/projects/nanotron/run_train.py \
|
158 |
+
--config-file benchmark/configs/config_3.57G_dp2_tp16_pp8_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml
|
159 |
+
fi
|
scripts/run_3.57G_dp4_tp16_pp4_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k.sh
ADDED
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
#SBATCH --job-name=bench_3.57G_dp4_tp16_pp4_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k # Job name
|
3 |
+
#SBATCH --time=01:10:00
|
4 |
+
#SBATCH --partition=hopper-prod
|
5 |
+
#SBATCH --qos=high
|
6 |
+
|
7 |
+
#SBATCH -o /fsx/nouamane/projects/nanotron/logs/%j-%x.out
|
8 |
+
|
9 |
+
#SBATCH --nodes=32 # Number of nodes (modify as needed)
|
10 |
+
#SBATCH --ntasks-per-node=1 # Number of tasks per node
|
11 |
+
#SBATCH --cpus-per-task=60 # CPU cores per task
|
12 |
+
#SBATCH --gres=gpu:8 # Number of GPUs per node
|
13 |
+
#SBATCH --exclusive # Exclusive use of nodes
|
14 |
+
#SBATCH --wait-all-nodes=1 # fail if any node is not ready
|
15 |
+
|
16 |
+
# run using
|
17 |
+
# sbatch --nodes=1 run_multinode.sh
|
18 |
+
# or
|
19 |
+
# SALLOC_JOBID=13482276 NNODES=1 bash run_multinode.sh
|
20 |
+
|
21 |
+
set -x -e
|
22 |
+
|
23 |
+
# If not running under SLURM, set default SLURM environment variables
|
24 |
+
if [ -z "${SLURM_JOB_ID}" ]; then
|
25 |
+
if [ -z "${SALLOC_JOBID}" ]; then
|
26 |
+
echo "Error: SALLOC_JOBID environment variable is required but not set. Please run this script within an salloc session."
|
27 |
+
exit 1
|
28 |
+
fi
|
29 |
+
if [ -z "${NNODES}" ]; then
|
30 |
+
echo "Error: NNODES environment variable is required but not set. Please run this script within an salloc session."
|
31 |
+
exit 1
|
32 |
+
fi
|
33 |
+
export SALLOC_MODE=1
|
34 |
+
export SLURM_JOB_ID=$SALLOC_JOBID
|
35 |
+
export SLURM_NNODES=$NNODES
|
36 |
+
export SLURM_JOB_NODELIST=$(squeue -j $SALLOC_JOBID -h -o "%N")
|
37 |
+
fi
|
38 |
+
|
39 |
+
# Load any necessary modules for your system
|
40 |
+
source /etc/profile.d/modules.sh # for some reason module isn't loaded
|
41 |
+
module load cuda/12.1
|
42 |
+
# Unset FI_PROVIDER to avoid potential libfabric provider issues
|
43 |
+
# unset FI_PROVIDER
|
44 |
+
|
45 |
+
|
46 |
+
# Activate your conda environment if needed
|
47 |
+
source /fsx/nouamane/miniconda/bin/activate
|
48 |
+
conda activate 2-1-cu121
|
49 |
+
export PATH=/fsx/nouamane/miniconda/envs/2-1-cu121/bin:$PATH
|
50 |
+
|
51 |
+
# Get the node names from SLURM
|
52 |
+
if [ -z "${SALLOC_MODE}" ]; then # sbatch mode
|
53 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST`
|
54 |
+
|
55 |
+
else # srun mode
|
56 |
+
export NODELIST=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n$SLURM_NNODES`
|
57 |
+
fi
|
58 |
+
export MASTER_NODE=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n1`
|
59 |
+
export MASTER_PORT=12356
|
60 |
+
|
61 |
+
# Calculate total number of processes
|
62 |
+
export NNODES=$SLURM_NNODES
|
63 |
+
export GPUS_PER_NODE=8
|
64 |
+
export WORLD_SIZE=$(($NNODES * $GPUS_PER_NODE))
|
65 |
+
|
66 |
+
# Set some environment variables for better distributed training
|
67 |
+
export CUDA_DEVICE_MAX_CONNECTIONS=1
|
68 |
+
export NCCL_DEBUG=INFO # INFO, WARN
|
69 |
+
# export NCCL_DEBUG_SUBSYS=ALL
|
70 |
+
# export CUDA_LAUNCH_BLOCKING=1
|
71 |
+
|
72 |
+
# Nanotron specific
|
73 |
+
export NANOTRON_BENCHMARK=1
|
74 |
+
export WANDB_MODE=disabled
|
75 |
+
|
76 |
+
# export TORCH_NCCL_USE_COMM_NONBLOCKING=1
|
77 |
+
|
78 |
+
# Trying to avoid hangs
|
79 |
+
export TORCH_NCCL_ASYNC_ERROR_HANDLING=1
|
80 |
+
|
81 |
+
# debug
|
82 |
+
export TORCH_DISTRIBUTED_DEBUG=DETAIL
|
83 |
+
|
84 |
+
# export NCCL_P2P_LEVEL=NVL
|
85 |
+
# export CUDA_LAUNCH_BLOCKING=1
|
86 |
+
# export NCCL_IB_CUDA_SUPPORT=0 # Disable RDMA
|
87 |
+
# export NCCL_NET_GDR_LEVEL=LOC
|
88 |
+
# Test Script - save as test_comm.sh
|
89 |
+
|
90 |
+
# Test 1 - Force TCP
|
91 |
+
# echo "Running with TCP only..."
|
92 |
+
# export NCCL_P2P_LEVEL=LOC
|
93 |
+
|
94 |
+
# # Match bandwidth patterns
|
95 |
+
# export NCCL_MAX_NCHANNELS=2
|
96 |
+
# export NCCL_MIN_NCHANNELS=2
|
97 |
+
|
98 |
+
|
99 |
+
# export NCCL_NET_GDR_LEVEL=LOC # Disable RDMA
|
100 |
+
# export NCCL_SHM_DISABLE=0 # disables the Shared Memory (SHM) transport
|
101 |
+
# export NCCL_IB_DISABLE=0 # disables the InfiniBand (IB) transport
|
102 |
+
# export NCCL_IB_TIMEOUT=60 # 20 = ~4 seconds , 21 = ~8 seconds , 22 = ~16 seconds
|
103 |
+
# export NCCL_IB_RETRY_CNT=7 # Increase retry count as well
|
104 |
+
|
105 |
+
# Force SHM
|
106 |
+
# export NCCL_NET_PLUGIN=none # fixes hang but doesnt work multinode
|
107 |
+
# export NCCL_SOCKET_NTHREADS=1
|
108 |
+
# export FI_PROVIDER="tcp"
|
109 |
+
|
110 |
+
# Print GPU topology information
|
111 |
+
if [ -z "${SALLOC_MODE}" ]; then
|
112 |
+
echo "=== GPU Topology ==="
|
113 |
+
nvidia-smi topo -m
|
114 |
+
echo "=================="
|
115 |
+
export SRUN_ALLOC_ARGS=""
|
116 |
+
else
|
117 |
+
export JOBNAME="bench_3.57G_dp4_tp16_pp4_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k"
|
118 |
+
export OUTPUT_FILE="/fsx/nouamane/projects/nanotron/logs/$SLURM_JOB_ID-$(date +%Y-%m-%d-%H-%M-%S)-$JOBNAME.out"
|
119 |
+
export SRUN_ALLOC_ARGS="--jobid=$SLURM_JOB_ID --nodes=$NNODES --gres=gpu:$GPUS_PER_NODE --time=01:02:00 --job-name=$JOBNAME"
|
120 |
+
fi
|
121 |
+
|
122 |
+
|
123 |
+
# Print some debugging information
|
124 |
+
echo "Master node: $MASTER_NODE"
|
125 |
+
echo "All nodes: $NODELIST"
|
126 |
+
echo "World size: $WORLD_SIZE"
|
127 |
+
|
128 |
+
# Launch the training script using srun in background
|
129 |
+
if [ -n "${SALLOC_MODE}" ]; then # srun mode
|
130 |
+
srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \
|
131 |
+
--nnodes=$NNODES \
|
132 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
133 |
+
--rdzv_id=$SLURM_JOB_ID \
|
134 |
+
--rdzv_backend=c10d \
|
135 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
136 |
+
--max_restarts 0 \
|
137 |
+
--rdzv_conf timeout=60 \
|
138 |
+
/fsx/nouamane/projects/nanotron/run_train.py \
|
139 |
+
--config-file benchmark/configs/config_3.57G_dp4_tp16_pp4_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml > $OUTPUT_FILE 2>&1 &
|
140 |
+
# Store the process ID
|
141 |
+
SRUN_PID=$!
|
142 |
+
echo "Job started in background with PID: $SRUN_PID" | tee -a $OUTPUT_FILE
|
143 |
+
|
144 |
+
# Optionally, you can add:
|
145 |
+
echo "To check job status: ps -p $SRUN_PID" | tee -a $OUTPUT_FILE
|
146 |
+
echo "To kill the job: kill $SRUN_PID" | tee -a $OUTPUT_FILE
|
147 |
+
|
148 |
+
else # sbatch mode
|
149 |
+
srun $SRUN_ALLOC_ARGS --wait=0 --kill-on-bad-exit=1 torchrun \
|
150 |
+
--nnodes=$NNODES \
|
151 |
+
--nproc_per_node=$GPUS_PER_NODE \
|
152 |
+
--rdzv_id=$SLURM_JOB_ID \
|
153 |
+
--rdzv_backend=c10d \
|
154 |
+
--rdzv_endpoint=$MASTER_NODE:$MASTER_PORT \
|
155 |
+
--max_restarts 0 \
|
156 |
+
--rdzv_conf timeout=60 \
|
157 |
+
/fsx/nouamane/projects/nanotron/run_train.py \
|
158 |
+
--config-file benchmark/configs/config_3.57G_dp4_tp16_pp4_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml
|
159 |
+
fi
|