|
from PIL import Image |
|
import os |
|
import torch |
|
import json |
|
from tqdm import tqdm |
|
from transformers import MllamaForConditionalGeneration, AutoProcessor |
|
import argparse |
|
|
|
parser = argparse.ArgumentParser() |
|
parser.add_argument("--num_beams", type=int, default=1) |
|
args = parser.parse_args() |
|
|
|
model_id = "/proj/berzelius-2023-191/CoT/llama-recipes/finetuned_model_llama_pixmogeo_mt/Llama-3.2-11B-Vision-Instruct_epoch_2" |
|
|
|
|
|
model = MllamaForConditionalGeneration.from_pretrained( |
|
model_id, |
|
torch_dtype=torch.bfloat16, |
|
device_map="auto", |
|
).eval() |
|
processor = AutoProcessor.from_pretrained(model_id) |
|
num_beams = args.num_beams |
|
max_new_tokens = 1024 |
|
summary_prompt = "\nSummarize how you will approach the problem and explain the steps you will take to reach the answer." |
|
caption_prompt = "Provide a detailed description of the image, particularly emphasizing the aspects related to the question." |
|
reasoning_prompt = "Provide a chain-of-thought, logical explanation of the problem. This should outline step-by-step reasoning." |
|
conclusion_prompt = "State the final answer in a clear and direct format. It must match the correct answer exactly." |
|
|
|
def generate_inner(question, image): |
|
start_n = 1 |
|
kwargs = { |
|
'max_new_tokens': max_new_tokens, |
|
"top_p": 0.9, |
|
"pad_token_id": 128004, |
|
"bos_token_id": 128000, |
|
"do_sample": False, |
|
"eos_token_id": [ |
|
128001, |
|
128008, |
|
128009 |
|
], |
|
"temperature": 0.6, |
|
"num_beams": num_beams, |
|
"use_cache": True, |
|
|
|
} |
|
messages = [[ |
|
|
|
{ |
|
'role': 'user', |
|
'content': [ |
|
{'type': 'image'}, |
|
{'type': 'text', 'text': question+summary_prompt} |
|
], |
|
} |
|
|
|
]] |
|
|
|
def infer(messages: dict, n) -> str: |
|
input_text = processor.apply_chat_template(messages, add_generation_prompt=True) |
|
inputs = processor(image, input_text, return_tensors='pt').to(model.device) |
|
output = model.generate(**inputs, **kwargs) |
|
return [processor.decode(output[i][inputs['input_ids'].shape[1]:]).replace('<|eot_id|>', '').replace("<|end_of_text|>", "") for i in range(n)] |
|
|
|
def tmp(inp, out): |
|
return [ |
|
{ |
|
'role': 'assistant', |
|
'content': [ |
|
{'type': 'text', 'text': inp} |
|
] |
|
}, |
|
{ |
|
'role': 'user', |
|
'content': [ |
|
{'type': 'text', 'text': out} |
|
] |
|
} |
|
] |
|
outs = infer(messages[0]) |
|
for i, out in enumerate(outs): |
|
messages[i].extend(tmp(out, caption_prompt)) |
|
out = infer(messages) |
|
messages.extend(tmp(out, reasoning_prompt)) |
|
reasoning = infer(messages) |
|
messages.extend(tmp(reasoning, conclusion_prompt)) |
|
out = infer(messages) |
|
print(f"Question: {question}\nAnswer: {out}") |
|
return out, reasoning |
|
|
|
|
|
def reasoning_steps_answer(img, question, choices): |
|
|
|
predicted_answer, reasoning = generate_inner(question, img) |
|
return predicted_answer, reasoning |
|
|
|
print(f"Evaluating with {num_beams=}") |
|
print("="*50) |
|
|
|
all_data = [] |
|
json_paths = "/proj/berzelius-2023-191/CoT/cot_eval/jsonv2" |
|
image_path = "/proj/berzelius-2023-191/CoT/cot_eval/images" |
|
for file in tqdm(os.listdir(json_paths)): |
|
if not file.endswith(".json"): continue |
|
with open(f"{json_paths}/{file}", "r") as json_file: |
|
data = json.load(json_file) |
|
try: |
|
image = Image.open(f"{image_path}/{data['image']}") |
|
question = data["question"] |
|
final_answer = data["final_answer"] |
|
idx = data["idx"] |
|
reasoning_answer = data["answer"] |
|
question += "\nPlease select the correct option by its letter." if "Choices" in question else "" |
|
model_answer, reasoning = generate_inner(question, image) |
|
|
|
all_data.append({ |
|
"idx": idx, |
|
"question": question, |
|
"final_answer": final_answer, |
|
"answer": reasoning_answer, |
|
"llm_response": reasoning+"\n\n\n"+model_answer, |
|
}) |
|
except Exception as e: |
|
print("Skipping file", file, "for", e) |
|
continue |
|
|
|
model_pref = model_id.replace("/", "_") |
|
with open(f"results_llavao1_pixmogeo_mt_beams{num_beams}_nosample.json", "w") as json_file: |
|
json.dump(all_data, json_file, indent=4) |