File size: 1,343 Bytes
5ca4e86
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
set -x

MODEL=instructblip
MODEL_PATH=/workingdir/models_hf/lmsys/vicuna-13b-v1.1
GPU_ID=2


for TASK in unconstrained constrained qna; do

  INFERENCE_FILE="outputs/${MODEL}/inference_${MODEL}_attack_${TASK}"
  METRIC_FILE="outputs/${MODEL}/metric_${MODEL}_attack_${TASK}"
  SUMMARY_FILE="outputs/${MODEL}/summary_${MODEL}_attack_${TASK}"


  if [ "${TASK}" = "constrained" ]; then
    echo "Running constrained"
    python instructblip_constrained_inference.py --gpu-id ${GPU_ID} \
    --model_path ${MODEL_PATH} \
    --output_file ${INFERENCE_FILE} \
    --do_attack

  elif [ "${TASK}" = "unconstrained" ]; then
    echo "Running unconstrained"
    python instructblip_unconstrained_inference.py --gpu-id ${GPU_ID} \
    --model_path ${MODEL_PATH} \
    --output_file ${INFERENCE_FILE} \
    --do_attack

  elif [ "${TASK}" = "qna" ]; then
    echo "Running qna"
     python instructblip_qna.py --gpu-id ${GPU_ID} \
     --model_path ${MODEL_PATH} \
     --output_file ${INFERENCE_FILE} \
     --do_attack

  else
    echo "Wrong Implementation"
    exit 1

  fi

  CUDA_VISIBLE_DEVICES=2 python get_metric.py --input ${INFERENCE_FILE} \
    --output ${METRIC_FILE} \
    --perplexity ${SUMMARY_FILE} \
    --load_existing_generation \
    --device cuda

  python cal_metrics.py --input ${METRIC_FILE} \
    --output ${SUMMARY_FILE}

done