File size: 4,464 Bytes
23ababd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
from PIL import Image
import os
import torch
import json
from tqdm import tqdm
from transformers import MllamaForConditionalGeneration, AutoProcessor
import argparse

parser = argparse.ArgumentParser()
parser.add_argument("--num_beams", type=int, default=1)
args = parser.parse_args()

model_id = "/proj/berzelius-2023-191/CoT/llama-recipes/finetuned_model_llama_pixmogeo_mt/Llama-3.2-11B-Vision-Instruct_epoch_2"


model = MllamaForConditionalGeneration.from_pretrained(
    model_id,
    torch_dtype=torch.bfloat16,
    device_map="auto",
).eval()
processor = AutoProcessor.from_pretrained(model_id)
num_beams = args.num_beams
max_new_tokens = 1024
summary_prompt = "\nSummarize how you will approach the problem and explain the steps you will take to reach the answer."
caption_prompt = "Provide a detailed description of the image, particularly emphasizing the aspects related to the question."
reasoning_prompt = "Provide a chain-of-thought, logical explanation of the problem. This should outline step-by-step reasoning."
conclusion_prompt = "State the final answer in a clear and direct format. It must match the correct answer exactly."

def generate_inner(question, image):
    start_n = 1
    kwargs = {
        'max_new_tokens': max_new_tokens,
        "top_p": 0.9,
        "pad_token_id": 128004,
        "bos_token_id": 128000,
        "do_sample": False,
        "eos_token_id": [
            128001,
            128008,
            128009
        ],
        "temperature": 0.6,
        "num_beams": num_beams,
        "use_cache": True,

    }
    messages = [[

        {
            'role': 'user', 
            'content': [
                {'type': 'image'},
                {'type': 'text', 'text': question+summary_prompt}
            ],
        }
    
    ]]

    def infer(messages: dict, n) -> str:
        input_text = processor.apply_chat_template(messages, add_generation_prompt=True)
        inputs = processor(image, input_text, return_tensors='pt').to(model.device)
        output = model.generate(**inputs, **kwargs)
        return [processor.decode(output[i][inputs['input_ids'].shape[1]:]).replace('<|eot_id|>', '').replace("<|end_of_text|>", "") for i in range(n)]

    def tmp(inp, out):
        return [
            {
                'role': 'assistant',
                'content': [
                    {'type': 'text', 'text': inp}
                ]
            },
            {
                'role': 'user',
                'content': [
                    {'type': 'text', 'text': out}
                ]
            }
        ]
    outs = infer(messages[0])
    for i, out in enumerate(outs):
        messages[i].extend(tmp(out, caption_prompt))
    out = infer(messages)
    messages.extend(tmp(out, reasoning_prompt))
    reasoning = infer(messages)
    messages.extend(tmp(reasoning, conclusion_prompt))
    out = infer(messages)
    print(f"Question: {question}\nAnswer: {out}")
    return out, reasoning    


def reasoning_steps_answer(img, question, choices):
    
    predicted_answer, reasoning = generate_inner(question, img)
    return predicted_answer, reasoning

print(f"Evaluating with {num_beams=}")
print("="*50)

all_data = []
json_paths = "/proj/berzelius-2023-191/CoT/cot_eval/jsonv2"
image_path = "/proj/berzelius-2023-191/CoT/cot_eval/images"
for file in tqdm(os.listdir(json_paths)):
    if not file.endswith(".json"): continue
    with open(f"{json_paths}/{file}", "r") as json_file:
        data = json.load(json_file)
        try:
            image = Image.open(f"{image_path}/{data['image']}")
            question = data["question"]
            final_answer = data["final_answer"]
            idx = data["idx"]
            reasoning_answer = data["answer"]
            question += "\nPlease select the correct option by its letter." if "Choices" in question else ""
            model_answer, reasoning = generate_inner(question, image)
            
            all_data.append({
                "idx": idx,
                "question": question,
                "final_answer": final_answer,
                "answer": reasoning_answer,
                "llm_response": reasoning+"\n\n\n"+model_answer,
            })
        except Exception as e:
            print("Skipping file", file, "for", e)
            continue

model_pref = model_id.replace("/", "_")
with open(f"results_llavao1_pixmogeo_mt_beams{num_beams}_nosample.json", "w") as json_file:
    json.dump(all_data, json_file, indent=4)