Fang Yunhao
commited on
Commit
·
246b4eb
1
Parent(s):
a4cbf77
Add evaluation script.
Browse files- evaluation.py +113 -0
evaluation.py
ADDED
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os, llava, argparse
|
2 |
+
import numpy as np
|
3 |
+
from mmengine import load, dump
|
4 |
+
from tqdm import tqdm
|
5 |
+
from collections import defaultdict
|
6 |
+
|
7 |
+
|
8 |
+
PROMPT_TEMPLATES = {
|
9 |
+
"instruction": "Evaluate if this video follows the instruction: '{instruction}'. Use the following scoring criteria:\n\n- 0: The video does not follow the instruction at all.\n- 1: The video includes the correct object but performs the wrong action, or vice versa.\n- 2: The video follows the instruction and shows a tendency toward the intended action but does not fully achieve the goal.\n- 3: The video follows the instruction precisely and successfully achieves the intended goal.\n\nLet's analyze step-by-step and conclude with 'Score: [score]'.",
|
10 |
+
"physical_laws": 'Watch the video and determine if it shows any \'{physical_laws}\' Let\'s think step-by-step and conclude with "Yes" or "No".',
|
11 |
+
"commonsense": 'Does the video exhibit \'{commonsense}\'? Let\'s think step-by-step and conclude with "Yes" or "No".',
|
12 |
+
}
|
13 |
+
|
14 |
+
QUESTION_POOL = {
|
15 |
+
"instruction": None,
|
16 |
+
"physical_laws": [
|
17 |
+
"Violation of Newton's Law: Objects move without any external force.",
|
18 |
+
"Violation of the Law of Conservation of Mass or Solid Constitutive Law: Objects deform or distort irregularly.",
|
19 |
+
"Violation of Fluid Constitutive Law: Liquids flow in an unnatural or irregular manner.",
|
20 |
+
"Violation of Non-physical Penetration: Objects unnaturally pass through each other.",
|
21 |
+
"Violation of Gravity: Objects behave inconsistently with gravity, such as floating in the air.",
|
22 |
+
],
|
23 |
+
"common_sense": [
|
24 |
+
"Poor Aesthetics: Visually unappealing or low-quality content.",
|
25 |
+
"Temporal Inconsistency: Noticeable flickering, choppy motion, or abrupt appearance/disappearance of irrelevant objects.",
|
26 |
+
],
|
27 |
+
}
|
28 |
+
|
29 |
+
if __name__ == "__main__":
|
30 |
+
parser = argparse.ArgumentParser(description="Script for evaluating the WorldModelBenchmark.")
|
31 |
+
parser.add_argument(
|
32 |
+
"--judge",
|
33 |
+
type=str,
|
34 |
+
help="Path to judge model checkpoint.",
|
35 |
+
)
|
36 |
+
parser.add_argument(
|
37 |
+
"--video_path",
|
38 |
+
type=str,
|
39 |
+
help="Path to the generated video directory.",
|
40 |
+
)
|
41 |
+
parser.add_argument(
|
42 |
+
"--save_path",
|
43 |
+
type=str,
|
44 |
+
help="Path to save evaluation results.",
|
45 |
+
)
|
46 |
+
parser.add_argument("--cot", action="store_true", help="Enable or disable Chain-of-Thought output.")
|
47 |
+
args = parser.parse_args()
|
48 |
+
|
49 |
+
validation_set = load("./worldmodelbench.json")
|
50 |
+
save_name = args.judge.replace("/", "_")
|
51 |
+
if args.cot:
|
52 |
+
save_name += "_cot"
|
53 |
+
results = None
|
54 |
+
if os.path.exists(save_name):
|
55 |
+
results = load(save_name)
|
56 |
+
preds = results["preds"]
|
57 |
+
accs = results["accs"]
|
58 |
+
else:
|
59 |
+
model = llava.load(args.judge)
|
60 |
+
video_root = args.video_path
|
61 |
+
|
62 |
+
preds = dict()
|
63 |
+
accs = defaultdict(list)
|
64 |
+
for vid, v_i in tqdm(enumerate(validation_set), total=len(validation_set)):
|
65 |
+
video_name = v_i["first_frame"].split("/")[-1].split(".")[0]
|
66 |
+
video = os.path.join(video_root, video_name + ".mp4")
|
67 |
+
video = llava.Video(video)
|
68 |
+
## traverse criterions
|
69 |
+
for k in ["instruction", "physical_laws", "commonsense"]:
|
70 |
+
preds_i = []
|
71 |
+
prompt_template = PROMPT_TEMPLATES[k]
|
72 |
+
qs = QUESTION_POOL[k]
|
73 |
+
if qs is not None:
|
74 |
+
accs_i = []
|
75 |
+
for q in qs:
|
76 |
+
if k == "physical_laws":
|
77 |
+
text_prompt = prompt_template.format(physical_laws=k.lower())
|
78 |
+
else:
|
79 |
+
text_prompt = prompt_template.format(commonsense=k.lower())
|
80 |
+
if not args.cot:
|
81 |
+
text_prompt = text_prompt.replace(
|
82 |
+
"Let's think step-by-step and conclude with", "Answer with"
|
83 |
+
).replace("Let's analyze step-by-step and conclude with", "Answer with")
|
84 |
+
pred = model.generate_content([video, text_prompt])
|
85 |
+
preds_i.append(pred)
|
86 |
+
accs_i.append("no" in pred.lower())
|
87 |
+
accs[k].append(np.mean(accs_i))
|
88 |
+
else:
|
89 |
+
text_prompt = prompt_template.format(instruction=v_i["text_instruction"])
|
90 |
+
if not args.cot:
|
91 |
+
text_prompt = text_prompt.replace(
|
92 |
+
"Let's think step-by-step and conclude with", "Answer with"
|
93 |
+
).replace("Let's analyze step-by-step and conclude with", "Answer with")
|
94 |
+
pred = model.generate_content([video, text_prompt])
|
95 |
+
preds_i.append(pred)
|
96 |
+
try:
|
97 |
+
score = float(pred.split(":")[-1].strip(" ."))
|
98 |
+
except:
|
99 |
+
score = 0
|
100 |
+
accs[k].append(score / 3)
|
101 |
+
if video_name not in preds:
|
102 |
+
preds[video_name] = dict()
|
103 |
+
preds[video_name][k] = preds_i
|
104 |
+
## Print results
|
105 |
+
for k, v in accs.items():
|
106 |
+
if isinstance(v, list):
|
107 |
+
print(f"{k} accuracy: {np.mean(v) * 100}%.")
|
108 |
+
else:
|
109 |
+
print(f"{k} accuracy: {v}%.")
|
110 |
+
if results is None:
|
111 |
+
results = {"preds": preds, "accs": accs}
|
112 |
+
dump(results, f"./{save_name}.json", indent=4)
|
113 |
+
|