Create get_results.py
Browse files- get_results.py +259 -0
get_results.py
ADDED
@@ -0,0 +1,259 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import sys
|
3 |
+
import numpy as np
|
4 |
+
from utils import *
|
5 |
+
from tqdm import tqdm
|
6 |
+
from pydantic import BaseModel
|
7 |
+
from openai import OpenAI
|
8 |
+
from typing import List
|
9 |
+
import multiprocessing as mp
|
10 |
+
from functools import partial
|
11 |
+
from glob import glob
|
12 |
+
|
13 |
+
|
14 |
+
class AnswerScore(BaseModel):
|
15 |
+
score: int
|
16 |
+
|
17 |
+
data_functions = {
|
18 |
+
# medical
|
19 |
+
"medicalmmt": medicalmmt_eval,
|
20 |
+
"medicalMMMU": medicalMMMU_eval,
|
21 |
+
"medicalMMMUPro": medicalMMMU_eval,
|
22 |
+
|
23 |
+
# cultural
|
24 |
+
"celebvqa": celebvqa_eval,
|
25 |
+
"foodvqa": foodvqa_eval,
|
26 |
+
"countriesvqa": countriesvqa_eval,
|
27 |
+
|
28 |
+
# agro
|
29 |
+
"agrovqa": agrovqa_eval,
|
30 |
+
|
31 |
+
# chart and diagrams
|
32 |
+
"iconqa": iconqa_eval,
|
33 |
+
"chartqa": chartqa_eval,
|
34 |
+
"diagramsMMMU": diagramsMMMU_eval,
|
35 |
+
"diagramsvqa": diagramsvqa_eval,
|
36 |
+
"tablesvqa": tablesvqa_eval,
|
37 |
+
|
38 |
+
# video
|
39 |
+
"culturevideovqa": culturevideovqa_eval,
|
40 |
+
"videomme": videomme_eval,
|
41 |
+
|
42 |
+
# ocr
|
43 |
+
"ocrisi": ocrisi_eval,
|
44 |
+
"khatt": khatt_eval,
|
45 |
+
"isidocvqa": isidocvqa_eval,
|
46 |
+
"patddocvqa": patddocvqa_eval,
|
47 |
+
"patsocr": patsocr_eval,
|
48 |
+
"evarest": evarest_eval,
|
49 |
+
"historicalbooks": historicalbooks_eval,
|
50 |
+
"arabicocr": arabicocr_eval,
|
51 |
+
|
52 |
+
# vqa
|
53 |
+
"mme": mme_eval,
|
54 |
+
"mmbench": mmbench_eval,
|
55 |
+
"vqammt": vqammt_eval,
|
56 |
+
"seed": seed_eval,
|
57 |
+
"mmmu": mmmu_eval,
|
58 |
+
"countbench": countbench_eval,
|
59 |
+
"hallucinationmmt": hallucinationmmt_eval,
|
60 |
+
"pope": pope_eval,
|
61 |
+
"scienceqa": scienceqa_eval,
|
62 |
+
"examsv": examsv_eval,
|
63 |
+
"gqa": gqa_eval,
|
64 |
+
"vizwiz": vizwiz_eval,
|
65 |
+
"infographicsvqa": infographicsvqa_eval,
|
66 |
+
"blink": blink_eval,
|
67 |
+
"realworldqa": realworldqa_eval,
|
68 |
+
"mutliimagemmt": mutliimagemmt_eval,
|
69 |
+
"muribench": muribench_eval,
|
70 |
+
"objectcoco": objectcoco_eval,
|
71 |
+
}
|
72 |
+
|
73 |
+
fuzz = {
|
74 |
+
"mtvqa": mtvqa_user_prompt,
|
75 |
+
"geochat": geochat_user_prompt,
|
76 |
+
"mathvista": mathvista_user_prompt,
|
77 |
+
"vqav2": vqav2_user_prompt,
|
78 |
+
}
|
79 |
+
|
80 |
+
medical_data = ["medicalmmt", "medicalMMMU", "medicalMMMUPro"]
|
81 |
+
medical_results = {}
|
82 |
+
|
83 |
+
cultural_data = ["celebvqa", "foodvqa", "countriesvqa"]
|
84 |
+
cultural_results = {}
|
85 |
+
|
86 |
+
agro_data = ["agrovqa"]
|
87 |
+
agro_results = {}
|
88 |
+
|
89 |
+
charts_data = ["iconqa", "chartqa", "diagramsMMMU", "diagramsvqa", "tablesvqa"]
|
90 |
+
charts_results = {}
|
91 |
+
|
92 |
+
remotesensing_data = ["geochat"]
|
93 |
+
remotesensing_results = {}
|
94 |
+
|
95 |
+
video_data = ["culturevideovqa", "videomme"]
|
96 |
+
video_results = {}
|
97 |
+
|
98 |
+
ocr_data = ["ocrisi", "khatt", "isidocvqa", "patddocvqa", "patsocr", "mtvqa", "evarest", "historicalbooks", "arabicocr"]
|
99 |
+
ocr_results = {}
|
100 |
+
|
101 |
+
vqa_data = ["mme", "mmbench", "vqammt", "seed", "mmmu", "countbench", "hallucinationmmt", "pope", "mathvista", "scienceqa", "examsv", "gqa", "vizwiz", "vqav2", "infographicsvqa", "blink", "realworldqa", "mutliimagemmt", "muribench", "objectcoco"]
|
102 |
+
vqa_results = {}
|
103 |
+
|
104 |
+
|
105 |
+
def eval_gpt(row, user_prompt):
|
106 |
+
client = OpenAI(
|
107 |
+
api_key="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"
|
108 |
+
)
|
109 |
+
question = row['question'].split("\n")[0]
|
110 |
+
pred = row['pred_answer']
|
111 |
+
pred = pred.split("assistant\n")[-1].strip()
|
112 |
+
gt = row['answer']
|
113 |
+
|
114 |
+
messages = [
|
115 |
+
{
|
116 |
+
"role": "system",
|
117 |
+
"content": fuzz_eval_system_prompt,
|
118 |
+
},
|
119 |
+
{
|
120 |
+
"role": "user",
|
121 |
+
"content": user_prompt.format(question=question, pred=pred, gt=gt)
|
122 |
+
},
|
123 |
+
]
|
124 |
+
|
125 |
+
completion = client.chat.completions.create(
|
126 |
+
model="gpt-4o-mini",
|
127 |
+
messages=messages,
|
128 |
+
max_tokens=300,
|
129 |
+
tools=[
|
130 |
+
{
|
131 |
+
"type": "function",
|
132 |
+
"function": {
|
133 |
+
"name": "answer_score",
|
134 |
+
"description": "Provide a [0, 1] score to the semantic similarity between two sentences",
|
135 |
+
"parameters": AnswerScore.model_json_schema(),
|
136 |
+
},
|
137 |
+
}
|
138 |
+
],
|
139 |
+
tool_choice={"type": "function", "function": {"name": "answer_score"}},
|
140 |
+
)
|
141 |
+
|
142 |
+
vqa_answer = AnswerScore.model_validate_json(
|
143 |
+
completion.choices[0].message.tool_calls[0].function.arguments
|
144 |
+
)
|
145 |
+
return {
|
146 |
+
'index': row['index'],
|
147 |
+
'question': question,
|
148 |
+
'pred_answer': pred,
|
149 |
+
'answer': gt,
|
150 |
+
'evaluation': vqa_answer.score
|
151 |
+
}
|
152 |
+
|
153 |
+
def process_chunk(user_prompt, chunk):
|
154 |
+
d = []
|
155 |
+
for row in chunk:
|
156 |
+
try:
|
157 |
+
d.append(eval_gpt(row, user_prompt))
|
158 |
+
except Exception as e:
|
159 |
+
print("ERROR", e)
|
160 |
+
continue
|
161 |
+
return d
|
162 |
+
|
163 |
+
def fuzz_eval(user_prompt, data):
|
164 |
+
num_cores = mp.cpu_count()
|
165 |
+
chunk_size = len(data) // num_cores
|
166 |
+
chunks = [data[i:i + chunk_size] for i in range(0, len(data), chunk_size)]
|
167 |
+
pool = mp.Pool(num_cores)
|
168 |
+
results = []
|
169 |
+
process_chunk_f = partial(process_chunk, user_prompt)
|
170 |
+
with tqdm(total=len(data)) as pbar:
|
171 |
+
for chunk_result in pool.imap_unordered(process_chunk_f, chunks):
|
172 |
+
results.extend(chunk_result)
|
173 |
+
pbar.update(len(chunk_result))
|
174 |
+
|
175 |
+
pool.close()
|
176 |
+
pool.join()
|
177 |
+
|
178 |
+
correct_count = sum(1 for item in results if item['evaluation'] == 1)
|
179 |
+
total_count = len(results)
|
180 |
+
return round(correct_count * 100 / total_count, 2)
|
181 |
+
|
182 |
+
|
183 |
+
MODEL = "peacock"
|
184 |
+
files = glob(f"results/{MODEL}_*.json")
|
185 |
+
for file in files:
|
186 |
+
name = file.split(f"_")[-1].replace(".json", "")
|
187 |
+
print(name)
|
188 |
+
with open(file, "r") as f:
|
189 |
+
data = json.load(f)
|
190 |
+
if len(data) == 0: continue
|
191 |
+
|
192 |
+
accuracy = 0
|
193 |
+
if name in fuzz:
|
194 |
+
accuracy = fuzz_eval(fuzz[name], data)
|
195 |
+
else:
|
196 |
+
tot = 0
|
197 |
+
for r in data:
|
198 |
+
tot += data_functions[name](r["pred_answer"], r["answer"])
|
199 |
+
accuracy = round(tot * 100 / len(data), 2)
|
200 |
+
print(f"{name}: {tot} / {len(data)} -> {accuracy:.2f}")
|
201 |
+
|
202 |
+
if name in medical_data:
|
203 |
+
medical_results[name] = accuracy
|
204 |
+
elif name in cultural_data:
|
205 |
+
cultural_results[name] = accuracy
|
206 |
+
elif name in agro_data:
|
207 |
+
agro_results[name] = accuracy
|
208 |
+
elif name in charts_data:
|
209 |
+
charts_results[name] = accuracy
|
210 |
+
elif name in remotesensing_data:
|
211 |
+
remotesensing_results[name] = accuracy
|
212 |
+
elif name in video_data:
|
213 |
+
video_results[name] = accuracy
|
214 |
+
elif name in ocr_data:
|
215 |
+
ocr_results[name] = accuracy
|
216 |
+
elif name in vqa_data:
|
217 |
+
vqa_results[name] = accuracy
|
218 |
+
|
219 |
+
|
220 |
+
from pprint import pprint
|
221 |
+
print("\nMedical Results")
|
222 |
+
pprint(medical_results)
|
223 |
+
if len(medical_results) > 0:
|
224 |
+
print("Medical average:", round(sum(list(medical_results.values())) / len(medical_results), 2))
|
225 |
+
|
226 |
+
print("\ncultural Results")
|
227 |
+
pprint(cultural_results)
|
228 |
+
if len(cultural_results) > 0:
|
229 |
+
print("cultural average:", round(sum(list(cultural_results.values())) / len(cultural_results), 2))
|
230 |
+
|
231 |
+
print("\nagro Results")
|
232 |
+
pprint(agro_results)
|
233 |
+
if len(agro_results) > 0:
|
234 |
+
print("agro average:", round(sum(list(agro_results.values())) / len(agro_results), 2))
|
235 |
+
|
236 |
+
print("\ncharts Results")
|
237 |
+
pprint(charts_results)
|
238 |
+
if len(charts_results) > 0:
|
239 |
+
print("charts average:", round(sum(list(charts_results.values())) / len(charts_results), 2))
|
240 |
+
|
241 |
+
print("\nremotesensing Results")
|
242 |
+
pprint(remotesensing_results)
|
243 |
+
if len(remotesensing_results) > 0:
|
244 |
+
print("remotesensing average:", round(sum(list(remotesensing_results.values())) / len(remotesensing_results), 2))
|
245 |
+
|
246 |
+
print("\nvideo Results")
|
247 |
+
pprint(video_results)
|
248 |
+
if len(video_results) > 0:
|
249 |
+
print("video average:", round(sum(list(video_results.values())) / len(video_results), 2))
|
250 |
+
|
251 |
+
print("\nocr Results")
|
252 |
+
pprint(ocr_results)
|
253 |
+
if len(ocr_results) > 0:
|
254 |
+
print("ocr average:", round(sum(list(ocr_results.values())) / len(ocr_results), 2))
|
255 |
+
|
256 |
+
print("\nvqa Results")
|
257 |
+
pprint(vqa_results)
|
258 |
+
if len(vqa_results) > 0:
|
259 |
+
print("vqa average:", round(sum(list(vqa_results.values())) / len(vqa_results), 2))
|