ahmedheakl commited on
Commit
23ababd
·
verified ·
1 Parent(s): 78f46a2

Create llamav-o1-inference.py

Browse files
Files changed (1) hide show
  1. llamav-o1-inference.py +129 -0
llamav-o1-inference.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+ import os
3
+ import torch
4
+ import json
5
+ from tqdm import tqdm
6
+ from transformers import MllamaForConditionalGeneration, AutoProcessor
7
+ import argparse
8
+
9
+ parser = argparse.ArgumentParser()
10
+ parser.add_argument("--num_beams", type=int, default=1)
11
+ args = parser.parse_args()
12
+
13
+ model_id = "/proj/berzelius-2023-191/CoT/llama-recipes/finetuned_model_llama_pixmogeo_mt/Llama-3.2-11B-Vision-Instruct_epoch_2"
14
+
15
+
16
+ model = MllamaForConditionalGeneration.from_pretrained(
17
+ model_id,
18
+ torch_dtype=torch.bfloat16,
19
+ device_map="auto",
20
+ ).eval()
21
+ processor = AutoProcessor.from_pretrained(model_id)
22
+ num_beams = args.num_beams
23
+ max_new_tokens = 1024
24
+ summary_prompt = "\nSummarize how you will approach the problem and explain the steps you will take to reach the answer."
25
+ caption_prompt = "Provide a detailed description of the image, particularly emphasizing the aspects related to the question."
26
+ reasoning_prompt = "Provide a chain-of-thought, logical explanation of the problem. This should outline step-by-step reasoning."
27
+ conclusion_prompt = "State the final answer in a clear and direct format. It must match the correct answer exactly."
28
+
29
+ def generate_inner(question, image):
30
+ start_n = 1
31
+ kwargs = {
32
+ 'max_new_tokens': max_new_tokens,
33
+ "top_p": 0.9,
34
+ "pad_token_id": 128004,
35
+ "bos_token_id": 128000,
36
+ "do_sample": False,
37
+ "eos_token_id": [
38
+ 128001,
39
+ 128008,
40
+ 128009
41
+ ],
42
+ "temperature": 0.6,
43
+ "num_beams": num_beams,
44
+ "use_cache": True,
45
+
46
+ }
47
+ messages = [[
48
+
49
+ {
50
+ 'role': 'user',
51
+ 'content': [
52
+ {'type': 'image'},
53
+ {'type': 'text', 'text': question+summary_prompt}
54
+ ],
55
+ }
56
+
57
+ ]]
58
+
59
+ def infer(messages: dict, n) -> str:
60
+ input_text = processor.apply_chat_template(messages, add_generation_prompt=True)
61
+ inputs = processor(image, input_text, return_tensors='pt').to(model.device)
62
+ output = model.generate(**inputs, **kwargs)
63
+ return [processor.decode(output[i][inputs['input_ids'].shape[1]:]).replace('<|eot_id|>', '').replace("<|end_of_text|>", "") for i in range(n)]
64
+
65
+ def tmp(inp, out):
66
+ return [
67
+ {
68
+ 'role': 'assistant',
69
+ 'content': [
70
+ {'type': 'text', 'text': inp}
71
+ ]
72
+ },
73
+ {
74
+ 'role': 'user',
75
+ 'content': [
76
+ {'type': 'text', 'text': out}
77
+ ]
78
+ }
79
+ ]
80
+ outs = infer(messages[0])
81
+ for i, out in enumerate(outs):
82
+ messages[i].extend(tmp(out, caption_prompt))
83
+ out = infer(messages)
84
+ messages.extend(tmp(out, reasoning_prompt))
85
+ reasoning = infer(messages)
86
+ messages.extend(tmp(reasoning, conclusion_prompt))
87
+ out = infer(messages)
88
+ print(f"Question: {question}\nAnswer: {out}")
89
+ return out, reasoning
90
+
91
+
92
+ def reasoning_steps_answer(img, question, choices):
93
+
94
+ predicted_answer, reasoning = generate_inner(question, img)
95
+ return predicted_answer, reasoning
96
+
97
+ print(f"Evaluating with {num_beams=}")
98
+ print("="*50)
99
+
100
+ all_data = []
101
+ json_paths = "/proj/berzelius-2023-191/CoT/cot_eval/jsonv2"
102
+ image_path = "/proj/berzelius-2023-191/CoT/cot_eval/images"
103
+ for file in tqdm(os.listdir(json_paths)):
104
+ if not file.endswith(".json"): continue
105
+ with open(f"{json_paths}/{file}", "r") as json_file:
106
+ data = json.load(json_file)
107
+ try:
108
+ image = Image.open(f"{image_path}/{data['image']}")
109
+ question = data["question"]
110
+ final_answer = data["final_answer"]
111
+ idx = data["idx"]
112
+ reasoning_answer = data["answer"]
113
+ question += "\nPlease select the correct option by its letter." if "Choices" in question else ""
114
+ model_answer, reasoning = generate_inner(question, image)
115
+
116
+ all_data.append({
117
+ "idx": idx,
118
+ "question": question,
119
+ "final_answer": final_answer,
120
+ "answer": reasoning_answer,
121
+ "llm_response": reasoning+"\n\n\n"+model_answer,
122
+ })
123
+ except Exception as e:
124
+ print("Skipping file", file, "for", e)
125
+ continue
126
+
127
+ model_pref = model_id.replace("/", "_")
128
+ with open(f"results_llavao1_pixmogeo_mt_beams{num_beams}_nosample.json", "w") as json_file:
129
+ json.dump(all_data, json_file, indent=4)