|
import json |
|
import os |
|
from datasets import Dataset, DatasetDict, Features, Value, Sequence |
|
|
|
def load_json_lines(file_path): |
|
with open(file_path, 'r') as f: |
|
for line in f: |
|
yield json.loads(line) |
|
|
|
def _generate_examples(chunk_files): |
|
for file in chunk_files: |
|
for example in load_json_lines(file): |
|
yield example['Question_id'], example |
|
|
|
def load_dataset(*args, **kwargs): |
|
dataset_dir = kwargs['data_dir'] |
|
chunk_files = [os.path.join(dataset_dir, f) for f in os.listdir(dataset_dir) if f.endswith('.json')] |
|
features = Features({ |
|
'index': Value(dtype='string'), |
|
'image': Value(dtype='string'), |
|
'question': Value(dtype='string'), |
|
'multi-choice options': Sequence(Value(dtype='string')), |
|
'answer': Value(dtype='string'), |
|
'category': Value(dtype='string'), |
|
'l2-category': Value(dtype='string'), |
|
}) |
|
dataset = DatasetDict({ |
|
'train': Dataset.from_generator(lambda: _generate_examples(chunk_files), features=features) |
|
}) |
|
return dataset |
|
|