|
import json |
|
import sys |
|
import numpy as np |
|
from utils import * |
|
from tqdm import tqdm |
|
from pydantic import BaseModel |
|
from openai import OpenAI |
|
from typing import List |
|
import multiprocessing as mp |
|
from functools import partial |
|
from glob import glob |
|
|
|
|
|
class AnswerScore(BaseModel): |
|
score: int |
|
|
|
data_functions = { |
|
|
|
"medicalmmt": medicalmmt_eval, |
|
"medicalMMMU": medicalMMMU_eval, |
|
"medicalMMMUPro": medicalMMMU_eval, |
|
|
|
|
|
"celebvqa": celebvqa_eval, |
|
"foodvqa": foodvqa_eval, |
|
"countriesvqa": countriesvqa_eval, |
|
|
|
|
|
"agrovqa": agrovqa_eval, |
|
|
|
|
|
"iconqa": iconqa_eval, |
|
"chartqa": chartqa_eval, |
|
"diagramsMMMU": diagramsMMMU_eval, |
|
"diagramsvqa": diagramsvqa_eval, |
|
"tablesvqa": tablesvqa_eval, |
|
|
|
|
|
"culturevideovqa": culturevideovqa_eval, |
|
"videomme": videomme_eval, |
|
|
|
|
|
"ocrisi": ocrisi_eval, |
|
"khatt": khatt_eval, |
|
"isidocvqa": isidocvqa_eval, |
|
"patddocvqa": patddocvqa_eval, |
|
"patsocr": patsocr_eval, |
|
"evarest": evarest_eval, |
|
"historicalbooks": historicalbooks_eval, |
|
"arabicocr": arabicocr_eval, |
|
|
|
|
|
"mme": mme_eval, |
|
"mmbench": mmbench_eval, |
|
"vqammt": vqammt_eval, |
|
"seed": seed_eval, |
|
"mmmu": mmmu_eval, |
|
"countbench": countbench_eval, |
|
"hallucinationmmt": hallucinationmmt_eval, |
|
"pope": pope_eval, |
|
"scienceqa": scienceqa_eval, |
|
"examsv": examsv_eval, |
|
"gqa": gqa_eval, |
|
"vizwiz": vizwiz_eval, |
|
"infographicsvqa": infographicsvqa_eval, |
|
"blink": blink_eval, |
|
"realworldqa": realworldqa_eval, |
|
"mutliimagemmt": mutliimagemmt_eval, |
|
"muribench": muribench_eval, |
|
"objectcoco": objectcoco_eval, |
|
} |
|
|
|
fuzz = { |
|
"mtvqa": mtvqa_user_prompt, |
|
"geochat": geochat_user_prompt, |
|
"mathvista": mathvista_user_prompt, |
|
"vqav2": vqav2_user_prompt, |
|
} |
|
|
|
medical_data = ["medicalmmt", "medicalMMMU", "medicalMMMUPro"] |
|
medical_results = {} |
|
|
|
cultural_data = ["celebvqa", "foodvqa", "countriesvqa"] |
|
cultural_results = {} |
|
|
|
agro_data = ["agrovqa"] |
|
agro_results = {} |
|
|
|
charts_data = ["iconqa", "chartqa", "diagramsMMMU", "diagramsvqa", "tablesvqa"] |
|
charts_results = {} |
|
|
|
remotesensing_data = ["geochat"] |
|
remotesensing_results = {} |
|
|
|
video_data = ["culturevideovqa", "videomme"] |
|
video_results = {} |
|
|
|
ocr_data = ["ocrisi", "khatt", "isidocvqa", "patddocvqa", "patsocr", "mtvqa", "evarest", "historicalbooks", "arabicocr"] |
|
ocr_results = {} |
|
|
|
vqa_data = ["mme", "mmbench", "vqammt", "seed", "mmmu", "countbench", "hallucinationmmt", "pope", "mathvista", "scienceqa", "examsv", "gqa", "vizwiz", "vqav2", "infographicsvqa", "blink", "realworldqa", "mutliimagemmt", "muribench", "objectcoco"] |
|
vqa_results = {} |
|
|
|
|
|
def eval_gpt(row, user_prompt): |
|
client = OpenAI() |
|
question = row['question'].split("\n")[0] |
|
pred = row['pred_answer'] |
|
pred = pred.split("assistant\n")[-1].strip() |
|
gt = row['answer'] |
|
|
|
messages = [ |
|
{ |
|
"role": "system", |
|
"content": fuzz_eval_system_prompt, |
|
}, |
|
{ |
|
"role": "user", |
|
"content": user_prompt.format(question=question, pred=pred, gt=gt) |
|
}, |
|
] |
|
|
|
completion = client.chat.completions.create( |
|
model="gpt-4o-mini", |
|
messages=messages, |
|
max_tokens=300, |
|
tools=[ |
|
{ |
|
"type": "function", |
|
"function": { |
|
"name": "answer_score", |
|
"description": "Provide a [0, 1] score to the semantic similarity between two sentences", |
|
"parameters": AnswerScore.model_json_schema(), |
|
}, |
|
} |
|
], |
|
tool_choice={"type": "function", "function": {"name": "answer_score"}}, |
|
) |
|
|
|
vqa_answer = AnswerScore.model_validate_json( |
|
completion.choices[0].message.tool_calls[0].function.arguments |
|
) |
|
return { |
|
'index': row['index'], |
|
'question': question, |
|
'pred_answer': pred, |
|
'answer': gt, |
|
'evaluation': vqa_answer.score |
|
} |
|
|
|
def process_chunk(user_prompt, chunk): |
|
d = [] |
|
for row in chunk: |
|
try: |
|
d.append(eval_gpt(row, user_prompt)) |
|
except Exception as e: |
|
print("ERROR", e) |
|
continue |
|
return d |
|
|
|
def fuzz_eval(user_prompt, data): |
|
num_cores = mp.cpu_count() |
|
chunk_size = len(data) // num_cores |
|
chunks = [data[i:i + chunk_size] for i in range(0, len(data), chunk_size)] |
|
pool = mp.Pool(num_cores) |
|
results = [] |
|
process_chunk_f = partial(process_chunk, user_prompt) |
|
with tqdm(total=len(data)) as pbar: |
|
for chunk_result in pool.imap_unordered(process_chunk_f, chunks): |
|
results.extend(chunk_result) |
|
pbar.update(len(chunk_result)) |
|
|
|
pool.close() |
|
pool.join() |
|
|
|
correct_count = sum(1 for item in results if item['evaluation'] == 1) |
|
total_count = len(results) |
|
return round(correct_count * 100 / total_count, 2) |
|
|
|
|
|
MODEL = "peacock" |
|
files = glob(f"results/{MODEL}_*.json") |
|
for file in files: |
|
name = file.split(f"_")[-1].replace(".json", "") |
|
print(name) |
|
with open(file, "r") as f: |
|
data = json.load(f) |
|
if len(data) == 0: continue |
|
|
|
accuracy = 0 |
|
if name in fuzz: |
|
accuracy = fuzz_eval(fuzz[name], data) |
|
else: |
|
tot = 0 |
|
for r in data: |
|
tot += data_functions[name](r["pred_answer"], r["answer"]) |
|
accuracy = round(tot * 100 / len(data), 2) |
|
print(f"{name}: {tot} / {len(data)} -> {accuracy:.2f}") |
|
|
|
if name in medical_data: |
|
medical_results[name] = accuracy |
|
elif name in cultural_data: |
|
cultural_results[name] = accuracy |
|
elif name in agro_data: |
|
agro_results[name] = accuracy |
|
elif name in charts_data: |
|
charts_results[name] = accuracy |
|
elif name in remotesensing_data: |
|
remotesensing_results[name] = accuracy |
|
elif name in video_data: |
|
video_results[name] = accuracy |
|
elif name in ocr_data: |
|
ocr_results[name] = accuracy |
|
elif name in vqa_data: |
|
vqa_results[name] = accuracy |
|
|
|
|
|
from pprint import pprint |
|
print("\nMedical Results") |
|
pprint(medical_results) |
|
if len(medical_results) > 0: |
|
print("Medical average:", round(sum(list(medical_results.values())) / len(medical_results), 2)) |
|
|
|
print("\ncultural Results") |
|
pprint(cultural_results) |
|
if len(cultural_results) > 0: |
|
print("cultural average:", round(sum(list(cultural_results.values())) / len(cultural_results), 2)) |
|
|
|
print("\nagro Results") |
|
pprint(agro_results) |
|
if len(agro_results) > 0: |
|
print("agro average:", round(sum(list(agro_results.values())) / len(agro_results), 2)) |
|
|
|
print("\ncharts Results") |
|
pprint(charts_results) |
|
if len(charts_results) > 0: |
|
print("charts average:", round(sum(list(charts_results.values())) / len(charts_results), 2)) |
|
|
|
print("\nremotesensing Results") |
|
pprint(remotesensing_results) |
|
if len(remotesensing_results) > 0: |
|
print("remotesensing average:", round(sum(list(remotesensing_results.values())) / len(remotesensing_results), 2)) |
|
|
|
print("\nvideo Results") |
|
pprint(video_results) |
|
if len(video_results) > 0: |
|
print("video average:", round(sum(list(video_results.values())) / len(video_results), 2)) |
|
|
|
print("\nocr Results") |
|
pprint(ocr_results) |
|
if len(ocr_results) > 0: |
|
print("ocr average:", round(sum(list(ocr_results.values())) / len(ocr_results), 2)) |
|
|
|
print("\nvqa Results") |
|
pprint(vqa_results) |
|
if len(vqa_results) > 0: |
|
print("vqa average:", round(sum(list(vqa_results.values())) / len(vqa_results), 2)) |