ahmedheakl commited on
Commit
cb2eba1
·
verified ·
1 Parent(s): eb170d1

Create eval_peacock.py

Browse files
Files changed (1) hide show
  1. eval_peacock.py +205 -0
eval_peacock.py ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ from typing import Callable, List
4
+ import pandas as pd
5
+ import json
6
+ import os
7
+ from tqdm import tqdm
8
+ from transformers import AutoTokenizer, AutoModel
9
+ from utils import *
10
+ from PIL import Image
11
+ import shutil
12
+ from glob import glob
13
+ import numpy as np
14
+ from transformers import InstructBlipProcessor, InstructBlipForConditionalGeneration, AddedToken
15
+ from datasets import load_dataset
16
+
17
+ model = InstructBlipForConditionalGeneration.from_pretrained("UBC-NLP/Peacock")
18
+ processor = InstructBlipProcessor.from_pretrained("UBC-NLP/Peacock")
19
+ device = "cuda" if torch.cuda.is_available() else "cpu"
20
+ model.to(device)
21
+
22
+ def handle_images1(row: pd.Series) -> List[str]:
23
+ return [row["image"].convert("RGB")]
24
+
25
+
26
+ def handle_images2(row: pd.Series) -> List[str]:
27
+ return [
28
+ row.get(f"image_{i}", None).convert("RGB")
29
+ for i in range(9)
30
+ if row.get(f"image_{i}", None) is not None
31
+ ]
32
+
33
+
34
+ def save_images(images: List[str], with_resize: bool = True):
35
+ for i, image in enumerate(images):
36
+ if image is None:
37
+ continue
38
+
39
+ if with_resize:
40
+ img = image
41
+ width, height = img.size
42
+ req_dim = 420
43
+ new_width = req_dim if width > height else int((req_dim / height) * width)
44
+ new_height = int((req_dim / width) * height) if width > height else req_dim
45
+ img = img.resize((420, 420))
46
+ img = img.convert("RGB")
47
+ img.save(f"temp/image{i}.png")
48
+
49
+
50
+ def generate_qwen(prompt: str, images: List[str]) -> str:
51
+ images = images[:1]
52
+ save_images(images)
53
+ inputs = processor(images=Image.open("temp/image0.png").convert("RGB"), text=prompt, return_tensors="pt").to(device)
54
+ outputs = model.generate(
55
+ **inputs,
56
+ do_sample=False,
57
+ num_beams=1,
58
+ max_length=256,
59
+ min_length=2,
60
+ top_p=0.9,
61
+ temperature=1,
62
+ length_penalty=1.0,
63
+ repetition_penalty=1.5,
64
+ )
65
+ generated_text = processor.batch_decode(outputs, skip_special_tokens=True)[0].strip()
66
+ return generated_text
67
+
68
+
69
+ answer_field = "answer"
70
+
71
+
72
+ def process_row(row: pd.Series, fn: Callable, fn_images: Callable) -> dict:
73
+ i, row = row
74
+ d = {}
75
+ try:
76
+ d["index"] = i
77
+ images = fn_images(row)
78
+ d["pred_answer"] = generate_qwen(fn(row), images)
79
+ d["answer"] = str(row[answer_field])
80
+ d["question"] = fn(row)
81
+ print(f"Question: {fn(row)}\nPredicted: {d['pred_answer']}")
82
+ return d
83
+ except Exception as e:
84
+ print(f"Error processing row: {e}")
85
+ return None
86
+
87
+
88
+ name_to_processor = {
89
+ "mmmu": mmmu_doc_to_text,
90
+ "mme": mme_doc_to_text,
91
+ "gqa": gqa_doc_to_text,
92
+ "realworldqa": realworldqa_doc_to_text,
93
+ "vqav2": vqav2_doc_to_text,
94
+ "vizwiz": vizwiz_doc_to_text,
95
+ "pope": pope_doc_to_text,
96
+ "countbench": countbench_doc_to_text,
97
+ "medicalMMMU": medicalMMMU_doc_to_text,
98
+ "medicalMMMUPro": medicalMMMUPro_doc_to_text,
99
+ "diagramsMMMU": diagramsMMMU_doc_to_text,
100
+ "mmbench": mmbench_doc_to_text,
101
+ "seed": seed_doc_to_text,
102
+ "medicalmmt": medicalmmt_doc_to_text,
103
+ "hallucinationmmt": hallucinationmmt_doc_to_text,
104
+ "vqammt": vqammt_doc_to_text,
105
+ "mutliimagemmt": mutliimagemmt_doc_to_text,
106
+ "isidocvqa": isidocvqa_doc_to_text,
107
+ "patddocvqa": patddocvqa_doc_to_text,
108
+ "celebvqa": celebvqa_doc_to_text,
109
+ "countriesvqa": countriesvqa_doc_to_text,
110
+ "foodvqa": foodvqa_doc_to_text,
111
+ "objectcoco": objectcoco_doc_to_text,
112
+ "blink": blink_doc_to_text,
113
+ "examsv": examsv_doc_to_text,
114
+ "chartqa": chartqa_doc_to_text,
115
+ "mtvqa": mtvqa_doc_to_text,
116
+ "mathvista": mathvista_doc_to_text,
117
+ "infographicsvqa": infographicsvqa_doc_to_text,
118
+ "agrovqa": agrovqa_doc_to_text,
119
+ "diagramsvqa": diagramsvqa_doc_to_text,
120
+ "tablesvqa": tablesvqa_doc_to_text,
121
+ "iconqa": iconqa_doc_to_text,
122
+ "scienceqa": scienceqa_doc_to_text,
123
+ "ocrisi": ocrisi_doc_to_text,
124
+ "evarest": evarest_doc_to_text,
125
+ "historicalbooks": historicalbooks_doc_to_text,
126
+ "khatt": khatt_doc_to_text,
127
+ "patsocr": patsocr_doc_to_text,
128
+ "arabicocr": arabicocr_doc_to_text,
129
+ "culturevideovqa": culturevideovqa_doc_to_text,
130
+ "videomme": videomme_doc_to_text,
131
+ "geochat": geochat_doc_to_text,
132
+ "muribench": muribench_doc_to_text,
133
+ }
134
+ name_to_handle_type = {
135
+ # "mmmu": handle_images2,
136
+ # "mme": handle_images1,
137
+ # "gqa": handle_images1,
138
+ # "realworldqa": handle_images1,
139
+ # "vqav2": handle_images1,
140
+ # "vizwiz": handle_images1,
141
+ # "pope": handle_images1,
142
+ # "countbench": handle_images1,
143
+ # "medicalMMMU": handle_images2,
144
+ # "medicalMMMUPro": handle_images2,
145
+ # "diagramsMMMU": handle_images2,
146
+ # "mmbench": handle_images1,
147
+ # "seed": handle_images2,
148
+ "vqammt": handle_images1,
149
+ "isidocvqa": handle_images1,
150
+ "patddocvqa": handle_images1,
151
+ "celebvqa": handle_images1,
152
+ "countriesvqa": handle_images1,
153
+ "foodvqa": handle_images1,
154
+ "objectcoco": handle_images1,
155
+ "blink": handle_images2,
156
+ "examsv": handle_images1,
157
+ "chartqa": handle_images1,
158
+ "mtvqa": handle_images1,
159
+ "mathvista": handle_images1,
160
+ "infographicsvqa": handle_images1,
161
+ "agrovqa": handle_images1,
162
+ "diagramsvqa": handle_images1,
163
+ "tablesvqa": handle_images1,
164
+ "scienceqa": handle_images1,
165
+ "geochat": handle_images1,
166
+ "ocrisi": handle_images1,
167
+ "evarest": handle_images1,
168
+ "historicalbooks": handle_images1,
169
+ "khatt": handle_images1,
170
+ "patsocr": handle_images1,
171
+ "hallucinationmmt": handle_images1,
172
+ "medicalmmt": handle_images1,
173
+ "arabicocr": handle_images1,
174
+ # "iconqa": handle_images2,
175
+ # "culturevideovqa": handle_images2,
176
+ # "muribench": handle_images2,
177
+ # "videomme": handle_images2,
178
+ # "mutliimagemmt": handle_images2,
179
+ }
180
+ names = list(name_to_handle_type.keys())
181
+ os.makedirs("results", exist_ok=True)
182
+ os.makedirs("temp", exist_ok=True)
183
+
184
+ for name in tqdm(names):
185
+ try:
186
+ ds = load_dataset(f"ahmedheakl/arabicp_{name}", split="train", num_proc=4)
187
+ except:
188
+ continue
189
+ # if os.path.exists(f"results/peacock_{name}.json"):
190
+ # with open(f"results/peacock_{name}.json", "r", encoding="utf-8") as f:
191
+ # dd = json.load(f)
192
+ # if len(dd) >= (len(ds) // 2): continue
193
+
194
+ df = pd.DataFrame(ds)
195
+ print(f"Evaluating {name} dataset")
196
+ fn = name_to_processor[name]
197
+ fn_images = name_to_handle_type[name]
198
+ results = []
199
+ for i in tqdm(range(len(df))):
200
+ results.append(process_row((i, df.iloc[i]), fn, fn_images))
201
+ report = [r for r in results if r is not None]
202
+ with open(f"results/peacock_{name}.json", "w", encoding="utf-8") as f:
203
+ json.dump(report, f, ensure_ascii=False, indent=2)
204
+
205
+ shutil.rmtree("temp")