File size: 1,717 Bytes
246c106
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
#!/bin/bash
set -x
ulimit -c 0

script_name=${1}
dataset_file=${2}
export WANDB_INIT_TIMEOUT=200

# assume dataset names are split with , do a for loop

#!/bin/bash

echo "--------------------------------------------------" >> ~/history.txt
echo "Slurm job id | job id | command | model | dataset" >> ~/history.txt
echo "$SLURM_JOB_ID | $JOB_ID | evaluation | $script_name | $dataset" >> ~/history.txt
datasets=$(cat experiments/datasplit/$dataset_file.yaml | shyaml get-value domains)
WANDB_KEY=4c1540ebf8cb9964703ac212a937c00848a79b67
wandb login ${WANDB_KEY}
export WANDB__SERVICE_WAIT=300

# Split the string into an array
IFS=',' read -ra dataset_array <<< "$datasets"

# Iterate over the datasets and run the evaluation script for each one
for dataset in "${dataset_array[@]}"; do
    # Evaluate and Create the output directory
    dataset=$(echo "$dataset" | xargs)
    mkdir -p "data/${script_name}_${dataset}/output"

    CUDA_VISIBLE_DEVICES=0 python genie/evaluate.py --checkpoint_dir "data/${script_name}" \
        --val_data_dir "data/${dataset}_magvit_traj1000000_val"  --save_outputs_dir data/${script_name}_${dataset}

    # CUDA_VISIBLE_DEVICES=0 python genie/evaluate.py --checkpoint_dir data/${script_name}  \
    # --val_data_dir data/${dataset}_magvit_traj1000000_val --autoregressive_time --save_outputs_dir data/${script_name}_${dataset}

    # Iterate from 0 to 240 in steps of 10
    # Generate
    python genie/generate.py --checkpoint_dir "data/${script_name}" \
        --val_data_dir "data/${dataset}_magvit_traj1000000_val" \
        --output_dir "data/${script_name}_${dataset}/output"

    # Visualize
    python visualize.py --token_dir "data/${script_name}_${dataset}/output"

done