Datasets:
Tasks:
Text Generation
Sub-tasks:
language-modeling
Languages:
Chinese
Size:
10K<n<100K
ArXiv:
Tags:
question-generation
License:
File size: 1,297 Bytes
eee0edd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
import json
import os
from random import shuffle, seed
def get_dict(filepath):
output = []
with open(filepath) as f:
tmp = json.load(f)["data"]
for t in tmp:
for x in t["paragraphs"]:
context = x["context"]
for qa in x["qas"]:
if qa["is_impossible"]:
continue
answers = qa["answers"]
if len(answers) == 0:
continue
answer = answers[0]["text"]
if answer not in context:
continue
output.append({
"answer": answer,
"context": context,
"question": qa["question"]
})
return output
train = get_dict("dataset/train-zen-v1.0.json")
dev = get_dict("dataset/dev-zen-v1.0.json")
seed(42)
shuffle(train)
test = train[:len(dev)]
train = train[len(dev):]
with open("data/raw.train.jsonl", "w") as f:
f.write("\n".join([json.dumps(x) for x in train]))
with open("data/raw.valid.jsonl", "w") as f:
f.write("\n".join([json.dumps(x) for x in dev]))
with open("data/raw.test.jsonl", "w") as f:
f.write("\n".join([json.dumps(x) for x in test])) |