File size: 2,050 Bytes
acafc25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
import json
from tqdm import tqdm
from datasets import load_dataset
import os

ds_id = "lmms-lab/multimodal-open-r1-8k-verified"
out_root = "LLaMA-Factory/data"
dataset_name = "open_r1_v2"
ds = load_dataset(ds_id, split="train")
SYSTEM_PROMPT = (
    "A conversation between User and Assistant. The user asks a question, and the Assistant solves it. The assistant "
    "first thinks about the reasoning process in the mind and then provides the user with the answer. The reasoning "
    "process and answer are enclosed within <think> </think> and <answer> </answer> tags, respectively, i.e., "
    "<think> reasoning process here </think><answer> answer here </answer>"
)
data = []
os.makedirs(f"{out_root}/{dataset_name}", exist_ok=True)
for idx, d in tqdm(enumerate(ds), total=len(ds)):
    base_image_path = f"{dataset_name}/{idx}.png"
    image_path = f"{out_root}/{base_image_path}"
    image = d['image']
    image.save(image_path)
    conversations = [
        {
            "role": "user",
            "content": "<image>\n" + d['problem']
        },
        {
            "role": "assistant",
            "content": d['solution']
        }
    ]
    data.append({
        "conversations": conversations,
        "images": [base_image_path],
        "system": SYSTEM_PROMPT
    })

output_path = f"{out_root}/{dataset_name}.json"
with open(output_path, "w") as f:
    json.dump(data, f, indent=4, ensure_ascii=False)

with open(f"{out_root}/dataset_info.json", "r") as f:
    dataset_info = json.load(f)

dataset_info[dataset_name] = {
        "file_name": f"{dataset_name}.json",
        "formatting": "sharegpt",
        "columns": {
            "messages": "conversations",
            "images": "images",
            "system": "system"
        },
        "tags": {
            "role_tag": "role",
            "content_tag": "content",
            "user_tag": "user",
            "assistant_tag": "assistant"
        }
}

with open(f"{out_root}/dataset_info.json", "w") as f:
    json.dump(dataset_info, f, indent=4, ensure_ascii=False)