|
import math |
|
import torch |
|
from typing import Callable, List |
|
import pandas as pd |
|
import json |
|
import os |
|
from tqdm import tqdm |
|
from transformers import AutoTokenizer, AutoModel |
|
from utils import * |
|
from PIL import Image |
|
import shutil |
|
from glob import glob |
|
import numpy as np |
|
from transformers import InstructBlipProcessor, InstructBlipForConditionalGeneration, AddedToken |
|
from datasets import load_dataset |
|
|
|
model = InstructBlipForConditionalGeneration.from_pretrained("UBC-NLP/Peacock") |
|
processor = InstructBlipProcessor.from_pretrained("UBC-NLP/Peacock") |
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
model.to(device) |
|
|
|
def handle_images1(row: pd.Series) -> List[str]: |
|
return [row["image"].convert("RGB")] |
|
|
|
|
|
def handle_images2(row: pd.Series) -> List[str]: |
|
return [ |
|
row.get(f"image_{i}", None).convert("RGB") |
|
for i in range(9) |
|
if row.get(f"image_{i}", None) is not None |
|
] |
|
|
|
|
|
def save_images(images: List[str], with_resize: bool = True): |
|
for i, image in enumerate(images): |
|
if image is None: |
|
continue |
|
|
|
if with_resize: |
|
img = image |
|
width, height = img.size |
|
req_dim = 420 |
|
new_width = req_dim if width > height else int((req_dim / height) * width) |
|
new_height = int((req_dim / width) * height) if width > height else req_dim |
|
img = img.resize((420, 420)) |
|
img = img.convert("RGB") |
|
img.save(f"temp/image{i}.png") |
|
|
|
|
|
def generate_qwen(prompt: str, images: List[str]) -> str: |
|
images = images[:1] |
|
save_images(images) |
|
inputs = processor(images=Image.open("temp/image0.png").convert("RGB"), text=prompt, return_tensors="pt").to(device) |
|
outputs = model.generate( |
|
**inputs, |
|
do_sample=False, |
|
num_beams=1, |
|
max_length=256, |
|
min_length=2, |
|
top_p=0.9, |
|
temperature=1, |
|
length_penalty=1.0, |
|
repetition_penalty=1.5, |
|
) |
|
generated_text = processor.batch_decode(outputs, skip_special_tokens=True)[0].strip() |
|
return generated_text |
|
|
|
|
|
answer_field = "answer" |
|
|
|
|
|
def process_row(row: pd.Series, fn: Callable, fn_images: Callable) -> dict: |
|
i, row = row |
|
d = {} |
|
try: |
|
d["index"] = i |
|
images = fn_images(row) |
|
d["pred_answer"] = generate_qwen(fn(row), images) |
|
d["answer"] = str(row[answer_field]) |
|
d["question"] = fn(row) |
|
print(f"Question: {fn(row)}\nPredicted: {d['pred_answer']}") |
|
return d |
|
except Exception as e: |
|
print(f"Error processing row: {e}") |
|
return None |
|
|
|
|
|
name_to_processor = { |
|
"mmmu": mmmu_doc_to_text, |
|
"mme": mme_doc_to_text, |
|
"gqa": gqa_doc_to_text, |
|
"realworldqa": realworldqa_doc_to_text, |
|
"vqav2": vqav2_doc_to_text, |
|
"vizwiz": vizwiz_doc_to_text, |
|
"pope": pope_doc_to_text, |
|
"countbench": countbench_doc_to_text, |
|
"medicalMMMU": medicalMMMU_doc_to_text, |
|
"medicalMMMUPro": medicalMMMUPro_doc_to_text, |
|
"diagramsMMMU": diagramsMMMU_doc_to_text, |
|
"mmbench": mmbench_doc_to_text, |
|
"seed": seed_doc_to_text, |
|
"medicalmmt": medicalmmt_doc_to_text, |
|
"hallucinationmmt": hallucinationmmt_doc_to_text, |
|
"vqammt": vqammt_doc_to_text, |
|
"mutliimagemmt": mutliimagemmt_doc_to_text, |
|
"isidocvqa": isidocvqa_doc_to_text, |
|
"patddocvqa": patddocvqa_doc_to_text, |
|
"celebvqa": celebvqa_doc_to_text, |
|
"countriesvqa": countriesvqa_doc_to_text, |
|
"foodvqa": foodvqa_doc_to_text, |
|
"objectcoco": objectcoco_doc_to_text, |
|
"blink": blink_doc_to_text, |
|
"examsv": examsv_doc_to_text, |
|
"chartqa": chartqa_doc_to_text, |
|
"mtvqa": mtvqa_doc_to_text, |
|
"mathvista": mathvista_doc_to_text, |
|
"infographicsvqa": infographicsvqa_doc_to_text, |
|
"agrovqa": agrovqa_doc_to_text, |
|
"diagramsvqa": diagramsvqa_doc_to_text, |
|
"tablesvqa": tablesvqa_doc_to_text, |
|
"iconqa": iconqa_doc_to_text, |
|
"scienceqa": scienceqa_doc_to_text, |
|
"ocrisi": ocrisi_doc_to_text, |
|
"evarest": evarest_doc_to_text, |
|
"historicalbooks": historicalbooks_doc_to_text, |
|
"khatt": khatt_doc_to_text, |
|
"patsocr": patsocr_doc_to_text, |
|
"arabicocr": arabicocr_doc_to_text, |
|
"culturevideovqa": culturevideovqa_doc_to_text, |
|
"videomme": videomme_doc_to_text, |
|
"geochat": geochat_doc_to_text, |
|
"muribench": muribench_doc_to_text, |
|
} |
|
name_to_handle_type = { |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"vqammt": handle_images1, |
|
"isidocvqa": handle_images1, |
|
"patddocvqa": handle_images1, |
|
"celebvqa": handle_images1, |
|
"countriesvqa": handle_images1, |
|
"foodvqa": handle_images1, |
|
"objectcoco": handle_images1, |
|
"blink": handle_images2, |
|
"examsv": handle_images1, |
|
"chartqa": handle_images1, |
|
"mtvqa": handle_images1, |
|
"mathvista": handle_images1, |
|
"infographicsvqa": handle_images1, |
|
"agrovqa": handle_images1, |
|
"diagramsvqa": handle_images1, |
|
"tablesvqa": handle_images1, |
|
"scienceqa": handle_images1, |
|
"geochat": handle_images1, |
|
"ocrisi": handle_images1, |
|
"evarest": handle_images1, |
|
"historicalbooks": handle_images1, |
|
"khatt": handle_images1, |
|
"patsocr": handle_images1, |
|
"hallucinationmmt": handle_images1, |
|
"medicalmmt": handle_images1, |
|
"arabicocr": handle_images1, |
|
|
|
|
|
|
|
|
|
|
|
} |
|
names = list(name_to_handle_type.keys()) |
|
os.makedirs("results", exist_ok=True) |
|
os.makedirs("temp", exist_ok=True) |
|
|
|
for name in tqdm(names): |
|
try: |
|
ds = load_dataset(f"ahmedheakl/arabicp_{name}", split="train", num_proc=4) |
|
except: |
|
continue |
|
|
|
|
|
|
|
|
|
|
|
df = pd.DataFrame(ds) |
|
print(f"Evaluating {name} dataset") |
|
fn = name_to_processor[name] |
|
fn_images = name_to_handle_type[name] |
|
results = [] |
|
for i in tqdm(range(len(df))): |
|
results.append(process_row((i, df.iloc[i]), fn, fn_images)) |
|
report = [r for r in results if r is not None] |
|
with open(f"results/peacock_{name}.json", "w", encoding="utf-8") as f: |
|
json.dump(report, f, ensure_ascii=False, indent=2) |
|
|
|
shutil.rmtree("temp") |
|
|