ahmedheakl commited on
Commit
73b6bc8
·
verified ·
1 Parent(s): a27029b

Upload convert_dataset_v2.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. convert_dataset_v2.py +59 -0
convert_dataset_v2.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from tqdm import tqdm
3
+ import os
4
+
5
+ # GAIA
6
+ data_path = "data/mat_train.json"
7
+
8
+ with open(data_path, "r") as f:
9
+ dataset = json.load(f)
10
+
11
+ def _convert(image_path_map, conversations):
12
+ output = []
13
+ for turn in conversations:
14
+ role = turn["role"]
15
+ content = turn["content"]
16
+ turn_new = dict()
17
+ turn_new["from"] = role
18
+ pid = 1
19
+ keys = sorted(list(image_path_map.keys()))
20
+ for k in keys:
21
+ v = image_path_map[k]
22
+ if k in content:
23
+ content = content.replace(k, f"Picture {pid}: <img>{v}</img>\n")
24
+ content = content.replace(f"</img>\n\n", "</img>\n")
25
+ pid += 1
26
+ turn_new["value"] = content
27
+ output.append(turn_new)
28
+ return output
29
+
30
+
31
+ for item in tqdm(dataset):
32
+ #print(item["image"])
33
+ #print(item.keys())
34
+ conversations = item["conversations"]
35
+ #print(len(conversations), conversations[1])
36
+ image_path_map = dict()
37
+ if "image" not in item:
38
+ pass
39
+ elif type(item["image"]) == str:
40
+ image_path_map["<image>"] = item["image"]
41
+ item['image'] = f"{os.getcwd()}/data/{item['image']}"
42
+ else:
43
+ for k, v in item["image"].items():
44
+ image_path_map[k] = v
45
+ item["image"][k] = f"{os.getcwd()}/data/{v}"
46
+ item["conversations"] = _convert(image_path_map, conversations)
47
+
48
+ from datetime import datetime
49
+ import json
50
+
51
+ now = "20241209_1731"
52
+ print("write to", f"data/train_{now}.json")
53
+ with open(f"data/train_{now}.json", "w") as f:
54
+ json.dump(dataset, f, indent=4, ensure_ascii=False)
55
+
56
+ import random
57
+ with open(f"data/train_{now}_subset.json", "w") as f:
58
+ random.shuffle(dataset)
59
+ json.dump(dataset[:1000], f, indent=4, ensure_ascii=False)