hma / experiments /scripts /eval_action_scripts /run_evaluation_multidataset.sh
LeroyWaa's picture
draft
246c106
raw
history blame
1.72 kB
#!/bin/bash
set -x
ulimit -c 0
script_name=${1}
dataset_file=${2}
export WANDB_INIT_TIMEOUT=200
# assume dataset names are split with , do a for loop
#!/bin/bash
echo "--------------------------------------------------" >> ~/history.txt
echo "Slurm job id | job id | command | model | dataset" >> ~/history.txt
echo "$SLURM_JOB_ID | $JOB_ID | evaluation | $script_name | $dataset" >> ~/history.txt
datasets=$(cat experiments/datasplit/$dataset_file.yaml | shyaml get-value domains)
WANDB_KEY=4c1540ebf8cb9964703ac212a937c00848a79b67
wandb login ${WANDB_KEY}
export WANDB__SERVICE_WAIT=300
# Split the string into an array
IFS=',' read -ra dataset_array <<< "$datasets"
# Iterate over the datasets and run the evaluation script for each one
for dataset in "${dataset_array[@]}"; do
# Evaluate and Create the output directory
dataset=$(echo "$dataset" | xargs)
mkdir -p "data/${script_name}_${dataset}/output"
CUDA_VISIBLE_DEVICES=0 python genie/evaluate.py --checkpoint_dir "data/${script_name}" \
--val_data_dir "data/${dataset}_magvit_traj1000000_val" --save_outputs_dir data/${script_name}_${dataset}
# CUDA_VISIBLE_DEVICES=0 python genie/evaluate.py --checkpoint_dir data/${script_name} \
# --val_data_dir data/${dataset}_magvit_traj1000000_val --autoregressive_time --save_outputs_dir data/${script_name}_${dataset}
# Iterate from 0 to 240 in steps of 10
# Generate
python genie/generate.py --checkpoint_dir "data/${script_name}" \
--val_data_dir "data/${dataset}_magvit_traj1000000_val" \
--output_dir "data/${script_name}_${dataset}/output"
# Visualize
python visualize.py --token_dir "data/${script_name}_${dataset}/output"
done