Fang Yunhao commited on
Commit
d2dbd7f
·
1 Parent(s): 246b4eb

Update evaluation script.

Browse files
Files changed (1) hide show
  1. evaluation.py +16 -12
evaluation.py CHANGED
@@ -34,12 +34,12 @@ if __name__ == "__main__":
34
  help="Path to judge model checkpoint.",
35
  )
36
  parser.add_argument(
37
- "--video_path",
38
  type=str,
39
  help="Path to the generated video directory.",
40
  )
41
  parser.add_argument(
42
- "--save_path",
43
  type=str,
44
  help="Path to save evaluation results.",
45
  )
@@ -47,25 +47,27 @@ if __name__ == "__main__":
47
  args = parser.parse_args()
48
 
49
  validation_set = load("./worldmodelbench.json")
50
- save_name = args.judge.replace("/", "_")
51
  if args.cot:
52
- save_name += "_cot"
53
  results = None
54
- if os.path.exists(save_name):
55
- results = load(save_name)
56
- preds = results["preds"]
57
- accs = results["accs"]
 
 
 
58
  else:
59
  model = llava.load(args.judge)
60
- video_root = args.video_path
61
 
62
  preds = dict()
63
  accs = defaultdict(list)
64
  for vid, v_i in tqdm(enumerate(validation_set), total=len(validation_set)):
 
65
  video_name = v_i["first_frame"].split("/")[-1].split(".")[0]
66
- video = os.path.join(video_root, video_name + ".mp4")
67
  video = llava.Video(video)
68
- ## traverse criterions
69
  for k in ["instruction", "physical_laws", "commonsense"]:
70
  preds_i = []
71
  prompt_template = PROMPT_TEMPLATES[k]
@@ -83,6 +85,7 @@ if __name__ == "__main__":
83
  ).replace("Let's analyze step-by-step and conclude with", "Answer with")
84
  pred = model.generate_content([video, text_prompt])
85
  preds_i.append(pred)
 
86
  accs_i.append("no" in pred.lower())
87
  accs[k].append(np.mean(accs_i))
88
  else:
@@ -107,7 +110,8 @@ if __name__ == "__main__":
107
  print(f"{k} accuracy: {np.mean(v) * 100}%.")
108
  else:
109
  print(f"{k} accuracy: {v}%.")
 
110
  if results is None:
111
  results = {"preds": preds, "accs": accs}
112
- dump(results, f"./{save_name}.json", indent=4)
113
 
 
34
  help="Path to judge model checkpoint.",
35
  )
36
  parser.add_argument(
37
+ "--video_dir",
38
  type=str,
39
  help="Path to the generated video directory.",
40
  )
41
  parser.add_argument(
42
+ "--save_name",
43
  type=str,
44
  help="Path to save evaluation results.",
45
  )
 
47
  args = parser.parse_args()
48
 
49
  validation_set = load("./worldmodelbench.json")
 
50
  if args.cot:
51
+ args.save_name += "_cot"
52
  results = None
53
+ if os.path.exists(args.save_name):
54
+ results = load(args.save_name)
55
+ try:
56
+ preds = results["preds"]
57
+ accs = results["accs"]
58
+ except:
59
+ raise "Expected keys are not found in the results."
60
  else:
61
  model = llava.load(args.judge)
 
62
 
63
  preds = dict()
64
  accs = defaultdict(list)
65
  for vid, v_i in tqdm(enumerate(validation_set), total=len(validation_set)):
66
+ ## Load video
67
  video_name = v_i["first_frame"].split("/")[-1].split(".")[0]
68
+ video = os.path.join(args.video_dir, video_name + ".mp4")
69
  video = llava.Video(video)
70
+ ## Traverse criterions
71
  for k in ["instruction", "physical_laws", "commonsense"]:
72
  preds_i = []
73
  prompt_template = PROMPT_TEMPLATES[k]
 
85
  ).replace("Let's analyze step-by-step and conclude with", "Answer with")
86
  pred = model.generate_content([video, text_prompt])
87
  preds_i.append(pred)
88
+ ## Always ask for violations, so a "No" is preferred!
89
  accs_i.append("no" in pred.lower())
90
  accs[k].append(np.mean(accs_i))
91
  else:
 
110
  print(f"{k} accuracy: {np.mean(v) * 100}%.")
111
  else:
112
  print(f"{k} accuracy: {v}%.")
113
+ ## Save results
114
  if results is None:
115
  results = {"preds": preds, "accs": accs}
116
+ dump(results, f"./{args.save_name}.json", indent=4)
117