Datasets:
Tasks:
Text Generation
Modalities:
Text
Sub-tasks:
language-modeling
Languages:
Spanish
Size:
10K - 100K
ArXiv:
Tags:
question-generation
License:
import json | |
import os | |
from random import seed, shuffle | |
import re | |
from tqdm import tqdm | |
from typing import Dict | |
from datasets import load_dataset | |
SEP_TOKEN = " | " | |
def create_data(hf_data): | |
df = hf_data.to_pandas() | |
output = [] | |
for paragraph, g in df.groupby("paragraph"): | |
example = { | |
'paragraph': paragraph.replace(SEP_TOKEN, " "), | |
'questions': [_g.replace(SEP_TOKEN, " ") for _g in g['question']], | |
'answers': [_g.replace(SEP_TOKEN, " ") for _g in g['answer']], | |
} | |
example["questions_answers"] = SEP_TOKEN.join([f"question: {q}, answer: {a}" for q, a in zip(example["questions"], example["answers"])]) | |
output.append(example) | |
return output | |
if __name__ == '__main__': | |
qg_squad = load_dataset("lmqg/qg_esquad") | |
data_valid = create_data(qg_squad['validation']) | |
data_train = create_data(qg_squad['train']) | |
data_test = create_data(qg_squad['test']) | |
data_all = {'train': data_train, 'validation': data_valid, 'test': data_test} | |
output = './data/processed' | |
os.makedirs(output, exist_ok=True) | |
for k, _data in data_all.items(): | |
with open('{}/{}.jsonl'.format(output, k), 'w') as f: | |
for single_data in tqdm(_data): | |
f.write(json.dumps(single_data) + '\n') | |