llavaguard / scripts /run_minigpt_baseline.sh
Ahren09's picture
Upload 227 files
5ca4e86 verified
raw
history blame
1.61 kB
set -x
MODEL=minigpt4
GPU_ID=4
for BASELINE_ATTACK_MODE in blur compress; do
for TASK in unconstrained constrained qna; do
INFERENCE_FILE="outputs/${MODEL}/inference_qna_${MODEL}_${BASELINE_ATTACK_MODE}"
METRIC_FILE="outputs/${MODEL}/metric_qna_${MODEL}_${BASELINE_ATTACK_MODE}"
SUMMARY_FILE="outputs/${MODEL}/summary_qna_${MODEL}_${BASELINE_ATTACK_MODE}"
if [ "${TASK}" = "constrained" ]; then
echo "Running constrained"
python minigpt_constrained_inference.py --output_file ${INFERENCE_FILE} \
--gpu-id 3 \
--do_baseline \
--baseline_mode 1 \
--baseline_attack_mode ${BASELINE_ATTACK_MODE}
elif [ "${TASK}" = "unconstrained" ]; then
echo "Running unconstrained"
python minigpt_unconstrained_inference.py --output_file ${INFERENCE_FILE} \
--gpu-id 4 \
--do_baseline \
--baseline_mode 1 \
--baseline_attack_mode ${BASELINE_ATTACK_MODE}
elif [ "${TASK}" = "qna" ]; then
echo "Running qna"
python minigpt_qna.py \
--image_path ${ATTACK_MODE}_attack_images/adversarial_ \
--output_file ${INFERENCE_FILE} \
--gpu-id ${GPU_ID} \
--do_baseline \
--baseline_mode 1 \
--baseline_attack_mode ${BASELINE_ATTACK_MODE}
else
echo "Wrong Implementation"
exit 1
fi
CUDA_VISIBLE_DEVICES=4 python get_metric.py --input ${INFERENCE_FILE} \
--output ${METRIC_FILE} \
--perplexity ${SUMMARY_FILE} \
--device cuda \
python cal_metrics.py --input ${METRIC_FILE} \
--output ${SUMMARY_FILE}
done
done