|
import os, llava, argparse |
|
import numpy as np |
|
from mmengine import load, dump |
|
from tqdm import tqdm |
|
from collections import defaultdict |
|
|
|
|
|
PROMPT_TEMPLATES = { |
|
"instruction": "Evaluate if this video follows the instruction: '{instruction}'. Use the following scoring criteria:\n\n- 0: The video does not follow the instruction at all.\n- 1: The video includes the correct object but performs the wrong action, or vice versa.\n- 2: The video follows the instruction and shows a tendency toward the intended action but does not fully achieve the goal.\n- 3: The video follows the instruction precisely and successfully achieves the intended goal.\n\nLet's analyze step-by-step and conclude with 'Score: [score]'.", |
|
"physical_laws": 'Watch the video and determine if it shows any \'{physical_laws}\' Let\'s think step-by-step and conclude with "Yes" or "No".', |
|
"commonsense": 'Does the video exhibit \'{commonsense}\'? Let\'s think step-by-step and conclude with "Yes" or "No".', |
|
} |
|
|
|
QUESTION_POOL = { |
|
"instruction": None, |
|
"physical_laws": [ |
|
"Violation of Newton's Law: Objects move without any external force.", |
|
"Violation of the Law of Conservation of Mass or Solid Constitutive Law: Objects deform or distort irregularly.", |
|
"Violation of Fluid Constitutive Law: Liquids flow in an unnatural or irregular manner.", |
|
"Violation of Non-physical Penetration: Objects unnaturally pass through each other.", |
|
"Violation of Gravity: Objects behave inconsistently with gravity, such as floating in the air.", |
|
], |
|
"common_sense": [ |
|
"Poor Aesthetics: Visually unappealing or low-quality content.", |
|
"Temporal Inconsistency: Noticeable flickering, choppy motion, or abrupt appearance/disappearance of irrelevant objects.", |
|
], |
|
} |
|
|
|
if __name__ == "__main__": |
|
parser = argparse.ArgumentParser(description="Script for evaluating the WorldModelBenchmark.") |
|
parser.add_argument( |
|
"--judge", |
|
type=str, |
|
help="Path to judge model checkpoint.", |
|
) |
|
parser.add_argument( |
|
"--video_dir", |
|
type=str, |
|
help="Path to the generated video directory.", |
|
) |
|
parser.add_argument( |
|
"--save_name", |
|
type=str, |
|
help="Path to save evaluation results.", |
|
) |
|
parser.add_argument("--cot", action="store_true", help="Enable or disable Chain-of-Thought output.") |
|
args = parser.parse_args() |
|
|
|
validation_set = load("./worldmodelbench.json") |
|
if args.cot: |
|
args.save_name += "_cot" |
|
results = None |
|
if os.path.exists(args.save_name): |
|
results = load(args.save_name) |
|
try: |
|
preds = results["preds"] |
|
accs = results["accs"] |
|
except: |
|
raise "Expected keys are not found in the results." |
|
else: |
|
model = llava.load(args.judge) |
|
|
|
preds = dict() |
|
accs = defaultdict(list) |
|
for vid, v_i in tqdm(enumerate(validation_set), total=len(validation_set)): |
|
|
|
video_name = v_i["first_frame"].split("/")[-1].split(".")[0] |
|
video = os.path.join(args.video_dir, video_name + ".mp4") |
|
video = llava.Video(video) |
|
|
|
for k in ["instruction", "physical_laws", "commonsense"]: |
|
preds_i = [] |
|
prompt_template = PROMPT_TEMPLATES[k] |
|
qs = QUESTION_POOL[k] |
|
if qs is not None: |
|
accs_i = [] |
|
for q in qs: |
|
if k == "physical_laws": |
|
text_prompt = prompt_template.format(physical_laws=k.lower()) |
|
else: |
|
text_prompt = prompt_template.format(commonsense=k.lower()) |
|
if not args.cot: |
|
text_prompt = text_prompt.replace( |
|
"Let's think step-by-step and conclude with", "Answer with" |
|
).replace("Let's analyze step-by-step and conclude with", "Answer with") |
|
pred = model.generate_content([video, text_prompt]) |
|
preds_i.append(pred) |
|
|
|
accs_i.append("no" in pred.lower()) |
|
accs[k].append(np.mean(accs_i)) |
|
else: |
|
text_prompt = prompt_template.format(instruction=v_i["text_instruction"]) |
|
if not args.cot: |
|
text_prompt = text_prompt.replace( |
|
"Let's think step-by-step and conclude with", "Answer with" |
|
).replace("Let's analyze step-by-step and conclude with", "Answer with") |
|
pred = model.generate_content([video, text_prompt]) |
|
preds_i.append(pred) |
|
try: |
|
score = float(pred.split(":")[-1].strip(" .")) |
|
except: |
|
score = 0 |
|
accs[k].append(score / 3) |
|
if video_name not in preds: |
|
preds[video_name] = dict() |
|
preds[video_name][k] = preds_i |
|
|
|
for k, v in accs.items(): |
|
if isinstance(v, list): |
|
print(f"{k} accuracy: {np.mean(v) * 100}%.") |
|
else: |
|
print(f"{k} accuracy: {v}%.") |
|
|
|
if results is None: |
|
results = {"preds": preds, "accs": accs} |
|
dump(results, f"./{args.save_name}.json", indent=4) |
|
|
|
|