|
import json |
|
from tqdm import tqdm |
|
from datasets import load_dataset |
|
import os |
|
|
|
ds_id = "lmms-lab/multimodal-open-r1-8k-verified" |
|
out_root = "LLaMA-Factory/data" |
|
dataset_name = "open_r1_v2" |
|
ds = load_dataset(ds_id, split="train") |
|
SYSTEM_PROMPT = ( |
|
"A conversation between User and Assistant. The user asks a question, and the Assistant solves it. The assistant " |
|
"first thinks about the reasoning process in the mind and then provides the user with the answer. The reasoning " |
|
"process and answer are enclosed within <think> </think> and <answer> </answer> tags, respectively, i.e., " |
|
"<think> reasoning process here </think><answer> answer here </answer>" |
|
) |
|
data = [] |
|
os.makedirs(f"{out_root}/{dataset_name}", exist_ok=True) |
|
for idx, d in tqdm(enumerate(ds), total=len(ds)): |
|
base_image_path = f"{dataset_name}/{idx}.png" |
|
image_path = f"{out_root}/{base_image_path}" |
|
image = d['image'] |
|
image.save(image_path) |
|
conversations = [ |
|
{ |
|
"role": "user", |
|
"content": "<image>\n" + d['problem'] |
|
}, |
|
{ |
|
"role": "assistant", |
|
"content": d['solution'] |
|
} |
|
] |
|
data.append({ |
|
"conversations": conversations, |
|
"images": [base_image_path], |
|
"system": SYSTEM_PROMPT |
|
}) |
|
|
|
output_path = f"{out_root}/{dataset_name}.json" |
|
with open(output_path, "w") as f: |
|
json.dump(data, f, indent=4, ensure_ascii=False) |
|
|
|
with open(f"{out_root}/dataset_info.json", "r") as f: |
|
dataset_info = json.load(f) |
|
|
|
dataset_info[dataset_name] = { |
|
"file_name": f"{dataset_name}.json", |
|
"formatting": "sharegpt", |
|
"columns": { |
|
"messages": "conversations", |
|
"images": "images", |
|
"system": "system" |
|
}, |
|
"tags": { |
|
"role_tag": "role", |
|
"content_tag": "content", |
|
"user_tag": "user", |
|
"assistant_tag": "assistant" |
|
} |
|
} |
|
|
|
with open(f"{out_root}/dataset_info.json", "w") as f: |
|
json.dump(dataset_info, f, indent=4, ensure_ascii=False) |
|
|