File size: 1,862 Bytes
23efc67
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
from datasets import Dataset, load_dataset

ds: Dataset = load_dataset("DDSC/partial-danish-gigaword-no-twitter")  # type: ignore
ds = ds["train"]
# filter to only include spontaneous speech
ds = ds.filter(lambda x: x["source"] == "spont", num_proc=6)



texts = ds["text"]


def remove_taler(text):
    if text.startswith("Taler"):
        text = text.split(":")[1:]
        text = ":".join(text)
    return text.strip()


qa_pairs = []
for text in texts:
    lines = text.split("\n")
    lines = [remove_taler(line) for line in lines]
    questions = [
        (i, text)
        for i, text in enumerate(lines)
        if len(text.split(" ")) > 7 and text.endswith("?")
    ]

    qa_pairs_ = [{"question": lines[i], "answer": lines[i + 1]} for i, _ in questions]
    qa_pairs += qa_pairs_


# filter qa pairs
def get_length_of_pair(qa: dict):
    return len(qa["question"].split(" ")) + len(qa["answer"].split(" "))


def get_min_length_of_pair(qa: dict):
    return min(len(qa["question"].split(" ")), len(qa["answer"].split(" ")))


qa_pairs = [
    qa
    for qa in qa_pairs
    if get_length_of_pair(qa) < 20 and get_min_length_of_pair(qa) > 4
]


# create dataset
qa_ds = Dataset.from_list(qa_pairs)

# add readme
qa_ds.info.description = """# Spontanous speech QA

This dataset contains QA pairs from the spontaneous speech subsection of the Danish Gigaword. 
The dataset is created from the [DDSC dataset](DDSC/partial-danish-gigaword-no-twitter) and 
filtered to only include QA pairs where the question is less than 20 tokens and the answer is
at least 4 tokens long.

To find out more about the creation see the accompanying script.
"""

qa_ds.info.license = ds[0]["LICENSE"]
qa_ds.info.dataset_name = "Spontanous Speech QA"


# split dataset
qa_ds = qa_ds.train_test_split(test_size=0.2)

# upload dataset
qa_ds.push_to_hub("spontanous-speech-qa")