File size: 2,476 Bytes
df1db28
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
import datasets
from tqdm import tqdm
import os
import json
import multiprocessing as mp
from functools import partial

def process_item(idx_data, dataset_name):
    idx, d = idx_data
    conversations = d['conversations']
    image = d['image']
    image_path = f"{dataset_name}/{idx}.png"
    os.makedirs(os.path.dirname(f"data/{image_path}"), exist_ok=True)
    image.save(f"data/{image_path}")
    for i, c in enumerate(conversations):
        conversations[i]['content'] = c['content'].replace("<image>", "")
    if len(conversations) > 1:
        conversations[1]['content'] = "<image>" + conversations[1]['content']
    return {
        "images": [image_path],
        "system": SYSTEM_PROMPT,
        "conversations": conversations
    }

ds_id = "ahmedheakl/r1_90k_instruct"
dataset_name = "r1_onevision_90k"
out_root = "data"  
SYSTEM_PROMPT = (
    "A conversation between User and Assistant. The user asks a question, and the Assistant solves it. The assistant "
    "first thinks about the reasoning process in the mind and then provides the user with the answer. The reasoning "
    "process and answer are enclosed within <think> </think> and <answer> </answer> tags, respectively, i.e., "
    "<think> reasoning process here </think><answer> answer here </answer>"
)
os.makedirs(f"{out_root}/{dataset_name}", exist_ok=True)
print("Loading dataset...")
ds = datasets.load_dataset(ds_id, split="train", trust_remote_code=True)
num_processes = mp.cpu_count() - 6
print(f"Using {num_processes} processes")
process_func = partial(process_item, dataset_name=dataset_name)
with mp.Pool(processes=num_processes) as pool:
    data = list(tqdm(pool.imap(process_func, enumerate(ds)), total=len(ds), desc="Processing items"))
output_path = f"{out_root}/{dataset_name}.json"
print(f"Saving dataset to {output_path}")
with open(output_path, "w") as f:
    json.dump(data, f, indent=4, ensure_ascii=False)
with open(f"{out_root}/dataset_info.json", "r") as f:
    dataset_info = json.load(f)
dataset_info[dataset_name] = {
    "file_name": f"{dataset_name}.json",
    "formatting": "sharegpt",
    "columns": {
        "messages": "conversations",
        "images": "images",
        "system": "system"
    },
    "tags": {
        "role_tag": "role",
        "content_tag": "content",
        "user_tag": "user",
        "assistant_tag": "assistant"
    }
}
with open(f"{out_root}/dataset_info.json", "w") as f:
    json.dump(dataset_info, f, indent=4, ensure_ascii=False)