Soptq commited on
Commit
95ca6e6
·
verified ·
1 Parent(s): ed06cb8

Delete build_dataset.py

Browse files
Files changed (1) hide show
  1. build_dataset.py +0 -431
build_dataset.py DELETED
@@ -1,431 +0,0 @@
1
- import json
2
- import re
3
-
4
- from datasets import Dataset, DatasetDict
5
- import os
6
-
7
- from PIL import Image
8
- import hashlib
9
- import shutil
10
-
11
- import openai
12
-
13
-
14
- VERSION = "20250515"
15
- LANG = "zh"
16
- OUTPUT_PATH = "/fs-computility/ai4sData/earth-shared/SFE/sfe_dataset/v0"
17
-
18
- chemistry_dir = "/fs-computility/ai4sData/earth-shared/SFE/v0/chem"
19
- life_dir = "/fs-computility/ai4sData/earth-shared/SFE/v0/life"
20
- astro_dir = "/fs-computility/ai4sData/earth-shared/SFE/v0/astro"
21
- earth_dir = "/fs-computility/ai4sData/earth-shared/SFE/v0/earth"
22
- mat_dir = "/fs-computility/ai4sData/earth-shared/SFE/v0/mat"
23
-
24
- VERSION = f"{VERSION}-{LANG}"
25
-
26
- ENABLE_FILETER = False
27
- FILTERS = ["e014"]
28
-
29
- def enumerate_question_types(question_type):
30
- if question_type.lower() in ["exact match"]:
31
- return "exact_match"
32
- elif question_type.lower() in ["multiple choice"]:
33
- return "mcq"
34
- elif question_type.lower() in ["open question"]:
35
- return "open_ended"
36
-
37
- raise ValueError(f"Unknown question type: {question_type}")
38
-
39
-
40
- def parse_float_sequence_within(input_str):
41
- pattern_in_bracket = r"\[(.*)\]"
42
- match = re.search(pattern_in_bracket, input_str)
43
-
44
- if not match:
45
- return None
46
-
47
- inside_str = match.group(1)
48
- groups = inside_str.split(";")
49
-
50
- bboxs = []
51
- for group in groups:
52
- floats = group.split(",")
53
- if len(floats) != 4:
54
- continue
55
- try:
56
- bboxs.append([float(f) for f in floats])
57
- except Exception as e:
58
- continue
59
-
60
- if len(bboxs) == 0:
61
- return None
62
-
63
- return bboxs
64
-
65
-
66
- if __name__ == "__main__":
67
- question_ids = []
68
- question_texts = []
69
- question_options = []
70
- question_images = []
71
- question_answers = []
72
- question_types = []
73
- question_fields = []
74
-
75
- # load astro data
76
- for folder_name in os.listdir(astro_dir):
77
- if not folder_name.startswith("A"):
78
- continue
79
-
80
- if ENABLE_FILETER and not folder_name.lower() in FILTERS:
81
- continue
82
-
83
- folder_path = os.path.join(astro_dir, folder_name)
84
- if not os.path.isdir(folder_path):
85
- continue
86
-
87
- file_processed = False
88
- for file_name in os.listdir(folder_path):
89
- if not file_name.endswith(".jsonl"):
90
- continue
91
- if not file_name.startswith(LANG):
92
- continue
93
- file_processed = True
94
- file_path = os.path.join(folder_path, file_name)
95
- print(file_path)
96
- with open(file_path, "r") as f:
97
- for line in f:
98
- data = json.loads(line)
99
- data = {k.lower(): v for k, v in data.items()}
100
-
101
- images = data["images"]
102
- assert len(images) != 0
103
-
104
- abs_images = []
105
- for image in images:
106
- image_path = os.path.join(astro_dir, image)
107
- assert os.path.exists(image_path), f"Image not found: {image_path}"
108
- md5hash = hashlib.md5(Image.open(image_path).tobytes()).hexdigest()
109
- new_image_path = os.path.join(OUTPUT_PATH, "images", f"{image.split('.')[0].replace('/', '_')}_{md5hash}.{image.split('.')[1]}")
110
- if not os.path.exists(new_image_path):
111
- shutil.copy(image_path, new_image_path)
112
- # abs_images.append(new_image_path)
113
- abs_images.append(f"{image.split('.')[0].replace('/', '_')}_{md5hash}.{image.split('.')[1]}")
114
-
115
- question_text = data["text"]
116
- missing_images = len(images) - question_text.count("<image>") - sum([option.count("<image>") for option in data["answer choices"]])
117
- question_text = " ".join(["<image>" for _ in range(missing_images)]) + " " + question_text
118
- question_text = question_text.strip()
119
- assert len(images) == question_text.count("<image>") + sum([option.count("<image>") for option in data["answer choices"]])
120
-
121
- question_options_ = data["answer choices"]
122
- question_type = enumerate_question_types(data["question type"])
123
- question_answer = str(data["ground truth"])
124
-
125
- question_ids.append(data["question_id"])
126
- question_texts.append(question_text)
127
- question_options.append(question_options_)
128
- question_images.append(abs_images)
129
- question_answers.append(question_answer)
130
- question_types.append(question_type)
131
- question_fields.append("astronomy")
132
- if not file_processed:
133
- print(folder_path)
134
- raise
135
- print("astro data loaded")
136
-
137
- # load chem data
138
- for folder_name in os.listdir(chemistry_dir):
139
- if not folder_name.startswith("C"):
140
- continue
141
-
142
- if ENABLE_FILETER and not folder_name.lower() in FILTERS:
143
- continue
144
-
145
- folder_path = os.path.join(chemistry_dir, folder_name)
146
- if not os.path.isdir(folder_path):
147
- continue
148
-
149
- file_processed = False
150
- for file_name in os.listdir(folder_path):
151
- if not file_name.endswith(".jsonl"):
152
- continue
153
- if not file_name.startswith(LANG):
154
- continue
155
- file_processed = True
156
- file_path = os.path.join(folder_path, file_name)
157
- print(file_path)
158
- with open(file_path, "r") as f:
159
- for line in f:
160
- data = json.loads(line)
161
- data = {k.lower(): v for k, v in data.items()}
162
-
163
- images = data["images"]
164
- assert len(images) != 0
165
-
166
- abs_images = []
167
- for image in images:
168
- image_path = os.path.join(chemistry_dir, image)
169
- if not os.path.exists(image_path):
170
- image_path = os.path.join(folder_path, image)
171
- assert os.path.exists(image_path), f"Image not found: {image_path}"
172
- md5hash = hashlib.md5(Image.open(image_path).tobytes()).hexdigest()
173
- new_image_path = os.path.join(OUTPUT_PATH, "images", f"{image.split('.')[0].replace('/', '_')}_{md5hash}.{image.split('.')[1]}")
174
- if not os.path.exists(new_image_path):
175
- shutil.copy(image_path, new_image_path)
176
- abs_images.append(new_image_path)
177
-
178
- question_text = data["text"]
179
- missing_images = len(images) - question_text.count("<image>") - sum([option.count("<image>") for option in data["answer choices"]])
180
- question_text = " ".join(["<image>" for _ in range(missing_images)]) + " " + question_text
181
- question_text = question_text.strip()
182
- assert len(images) == question_text.count("<image>") + sum([option.count("<image>") for option in data["answer choices"]])
183
-
184
- question_options_ = data["answer choices"]
185
- question_type = enumerate_question_types(data["question type"])
186
- question_answer = str(data["ground truth"])
187
-
188
- question_ids.append(data["question_id"])
189
- question_texts.append(question_text)
190
- question_options.append(question_options_)
191
- question_images.append(abs_images)
192
- question_answers.append(question_answer)
193
- question_types.append(question_type)
194
- question_fields.append("chemistry")
195
- if not file_processed:
196
- print(folder_path)
197
- raise
198
- print("chem data loaded")
199
-
200
- # load earth data
201
- for folder_name in os.listdir(earth_dir):
202
- if not folder_name.startswith("E"):
203
- continue
204
-
205
- if ENABLE_FILETER and not folder_name.lower() in FILTERS:
206
- continue
207
-
208
- folder_path = os.path.join(earth_dir, folder_name)
209
- if not os.path.isdir(folder_path):
210
- continue
211
-
212
- file_processed = False
213
- for file_name in os.listdir(folder_path):
214
- if not file_name.endswith(".jsonl"):
215
- continue
216
- if not file_name.startswith(LANG):
217
- continue
218
- file_processed = True
219
- file_path = os.path.join(folder_path, file_name)
220
- print(file_path)
221
- with open(file_path, "r") as f:
222
- for line in f:
223
- if line.strip() == "":
224
- continue
225
- data = json.loads(line)
226
- data = {k.lower(): v for k, v in data.items()}
227
-
228
- images = data["images"] if "images" in data else data["image"]
229
- assert len(images) != 0
230
-
231
- abs_images = []
232
- for image in images:
233
- image_path = os.path.join(earth_dir, image)
234
- if not os.path.exists(image_path):
235
- image_path = os.path.join(folder_path, image)
236
- assert os.path.exists(image_path), f"Image not found: {image_path}"
237
- md5hash = hashlib.md5(Image.open(image_path).tobytes()).hexdigest()
238
- new_image_path = os.path.join(OUTPUT_PATH, "images", f"{image.split('.')[0].replace('/', '_')}_{md5hash}.{image.split('.')[1]}")
239
- if not os.path.exists(new_image_path):
240
- shutil.copy(image_path, new_image_path)
241
- abs_images.append(new_image_path)
242
-
243
- question_text = data["text"]
244
- missing_images = len(images) - question_text.count("<image>") - sum([option.count("<image>") for option in data["answer choices"]])
245
- question_text = " ".join(["<image>" for _ in range(missing_images)]) + " " + question_text
246
- question_text = question_text.strip()
247
- assert len(images) == question_text.count("<image>") + sum([option.count("<image>") for option in data["answer choices"]])
248
-
249
- if folder_name in ["E011", "E012"]:
250
- print("adding task-specific prompts")
251
- task_prompt = open(os.path.join(folder_path, f"{LANG}.prompt.txt")).read()
252
- question_text = f"{question_text}\n\n{task_prompt}"
253
-
254
- assert parse_float_sequence_within(str(data["ground truth"])) is not None
255
-
256
- question_options_ = data["answer choices"]
257
- question_type = enumerate_question_types(data["question type"])
258
- question_answer = str(data["ground truth"])
259
-
260
- question_ids.append(data["question_id"])
261
- question_texts.append(question_text)
262
- question_options.append(question_options_)
263
- question_images.append(abs_images)
264
- question_answers.append(question_answer)
265
- question_types.append(question_type)
266
- question_fields.append("earth")
267
- if not file_processed:
268
- print(folder_path)
269
- raise
270
- print("earth data loaded")
271
-
272
- # load life data
273
- for folder_name in os.listdir(life_dir):
274
- if not folder_name.startswith("L"):
275
- continue
276
-
277
- if ENABLE_FILETER and not folder_name.lower() in FILTERS:
278
- continue
279
-
280
- folder_path = os.path.join(life_dir, folder_name)
281
- if not os.path.isdir(folder_path):
282
- continue
283
-
284
- file_processed = False
285
- for file_name in os.listdir(folder_path):
286
- if not file_name.endswith(".jsonl"):
287
- continue
288
- if not file_name.startswith(LANG):
289
- continue
290
- file_processed = True
291
- file_path = os.path.join(folder_path, file_name)
292
- print(file_path)
293
- with open(file_path, "r") as f:
294
- for line in f:
295
- if line.strip() == "":
296
- continue
297
- data = json.loads(line)
298
- data = {k.lower(): v for k, v in data.items()}
299
-
300
- images = data["images"] if "images" in data else data["image"]
301
- assert len(images) != 0
302
-
303
- abs_images = []
304
- for image in images:
305
- image_path = os.path.join(life_dir, image)
306
- if not os.path.exists(image_path):
307
- image_path = os.path.join(folder_path, image)
308
- assert os.path.exists(image_path), f"Image not found: {image_path}"
309
- md5hash = hashlib.md5(Image.open(image_path).tobytes()).hexdigest()
310
- new_image_path = os.path.join(OUTPUT_PATH, "images", f"{image.split('.')[0].replace('/', '_')}_{md5hash}.{image.split('.')[1]}")
311
- if not os.path.exists(new_image_path):
312
- shutil.copy(image_path, new_image_path)
313
- abs_images.append(new_image_path)
314
-
315
- question_text = data["text"]
316
- missing_images = len(images) - question_text.count("<image>") - sum([option.count("<image>") for option in data["answer choices"]])
317
- question_text = " ".join(["<image>" for _ in range(missing_images)]) + " " + question_text
318
- question_text = question_text.strip()
319
- assert len(images) == question_text.count("<image>") + sum([option.count("<image>") for option in data["answer choices"]])
320
-
321
- question_options_ = data["answer choices"]
322
- question_type = enumerate_question_types(data["question type"])
323
- question_answer = str(data["ground truth"])
324
-
325
- question_ids.append(data["question_id"])
326
- question_texts.append(question_text)
327
- question_options.append(question_options_)
328
- question_images.append(abs_images)
329
- question_answers.append(question_answer)
330
- question_types.append(question_type)
331
- question_fields.append("life")
332
- if not file_processed:
333
- print(folder_path)
334
- raise
335
- print("life data loaded")
336
-
337
- # load mat data
338
- for folder_name in os.listdir(mat_dir):
339
- if not folder_name.startswith("M"):
340
- continue
341
-
342
- if ENABLE_FILETER and not folder_name.lower() in FILTERS:
343
- continue
344
-
345
- folder_path = os.path.join(mat_dir, folder_name)
346
- if not os.path.isdir(folder_path):
347
- continue
348
-
349
- file_processed = False
350
- for file_name in os.listdir(folder_path):
351
- if not file_name.endswith(".jsonl"):
352
- continue
353
- if not file_name.startswith(LANG):
354
- continue
355
- file_processed = True
356
- file_path = os.path.join(folder_path, file_name)
357
- print(file_path)
358
- with open(file_path, "r") as f:
359
- for line in f:
360
- if line.strip() == "":
361
- continue
362
- data = json.loads(line)
363
- data = {k.lower(): v for k, v in data.items()}
364
-
365
- images = data["images"] if "images" in data else data["image"]
366
- assert len(images) != 0
367
-
368
- abs_images = []
369
- for image in images:
370
- image_path = os.path.join(mat_dir, image)
371
- if not os.path.exists(image_path):
372
- image_path = os.path.join(folder_path, image)
373
- assert os.path.exists(image_path), f"Image not found: {image_path}"
374
- md5hash = hashlib.md5(Image.open(image_path).tobytes()).hexdigest()
375
- new_image_path = os.path.join(OUTPUT_PATH, "images", f"{image.split('.')[0].replace('/', '_')}_{md5hash}.{image.split('.')[1]}")
376
- if not os.path.exists(new_image_path):
377
- shutil.copy(image_path, new_image_path)
378
- abs_images.append(new_image_path)
379
-
380
- question_text = data["text"]
381
- missing_images = len(images) - question_text.count("<image>") - sum([option.count("<image>") for option in data["answer choices"]])
382
- question_text = " ".join(["<image>" for _ in range(missing_images)]) + " " + question_text
383
- question_text = question_text.strip()
384
- assert len(images) == question_text.count("<image>") + sum([option.count("<image>") for option in data["answer choices"]])
385
-
386
- question_options_ = data["answer choices"]
387
- question_type = enumerate_question_types(data["question type"])
388
- question_answer = str(data["ground truth"])
389
-
390
- question_ids.append(data["question_id"])
391
- question_texts.append(question_text)
392
- question_options.append(question_options_)
393
- question_images.append(abs_images)
394
- question_answers.append(question_answer)
395
- question_types.append(question_type)
396
- question_fields.append("material")
397
- if not file_processed:
398
- print(folder_path)
399
- raise
400
- print("mat data loaded")
401
-
402
- dataset_dict = {
403
- "id": question_ids,
404
- "question": question_texts,
405
- "options": question_options,
406
- "answer": question_answers,
407
- "images": question_images,
408
- "question_type": question_types,
409
- "field": question_fields,
410
- "lang": [LANG for _ in range(len(question_ids))],
411
- }
412
-
413
- dataset = Dataset.from_dict(dataset_dict)
414
-
415
- dataset_split = DatasetDict({
416
- "test": dataset
417
- })
418
- print(dataset_split)
419
- # get the first row
420
- print(dataset_split["test"][0])
421
-
422
- # save dataset
423
- os.makedirs(os.path.join(OUTPUT_PATH, VERSION), exist_ok=True)
424
- test_split_path = os.path.join(OUTPUT_PATH, VERSION, "test.parquet")
425
- dataset_split["test"].to_parquet(test_split_path)
426
-
427
- metadata_path = os.path.join(OUTPUT_PATH, VERSION, "dataset_info.json")
428
- with open(metadata_path, "w") as f:
429
- json.dump({"splits": ["test"]}, f)
430
- # dataset.to_parquet(os.path.join(OUTPUT_PATH, VERSION, "data.parquet"))
431
-