File size: 1,230 Bytes
246c106
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
#!/bin/bash
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --exclusive
#SBATCH --time=3-0
#SBATCH --partition=learnfair
#SBATCH --error=logs/std-%j.err
#SBATCH --output=logs/std-%j.out
#SBATCH --gpus-per-node=8
#SBATCH --cpus-per-task=32
#SBATCH --exclude=learnfair[021,025,045,081,082,089,097,098,101,102,103,105]


set -x
ulimit -c 0


script_name=${1}
dataset_file=${2}
WANDB_KEY=4c1540ebf8cb9964703ac212a937c00848a79b67
wandb login ${WANDB_KEY}

# assume dataset names are split with , do a for loop

#!/bin/bash

echo "--------------------------------------------------" >> ~/history.txt
echo "Slurm job id | job id | command | model | dataset" >> ~/history.txt
echo "$SLURM_JOB_ID | $JOB_ID | evaluation | $script_name | $dataset" >> ~/history.txt
datasets=$(python -c "import yaml; print(','.join(yaml.safe_load(open('experiments/datasplit/$dataset_file.yaml'))['domains'].split(',')))")

IFS=',' read -ra dataset_array <<< "$datasets"

# Iterate over the datasets and run the evaluation script for each one
for dataset in "${dataset_array[@]}"; do
    dataset=$(echo "$dataset" | xargs)
    bash experiments/scripts/eval_action_scripts/run_evaluation_waction_valset_cluster2_raw_accel.sh $script_name $dataset
done