Datasets:
tau
/

Modalities:
Text
Libraries:
Datasets
File size: 3,954 Bytes
3e38ab8
 
127e3fd
3e38ab8
b847b51
 
 
 
127e3fd
b847b51
a6a875e
 
c6ded99
1ade361
3e38ab8
a6a875e
3e38ab8
 
 
1ade361
8a2bf29
1ade361
3e38ab8
1ade361
3e38ab8
8a2bf29
3e38ab8
1ade361
8a2bf29
3e38ab8
 
89c5f61
3e38ab8
8a2bf29
 
 
 
 
 
 
 
 
1ade361
c6ded99
 
 
 
 
 
 
 
3e38ab8
 
 
 
 
 
 
 
 
 
 
 
 
 
c6ded99
 
3e38ab8
 
 
 
 
 
 
 
 
 
1ade361
3e38ab8
 
 
 
 
1ade361
3e38ab8
 
 
 
 
a6a875e
3e38ab8
 
 
 
127e3fd
3e38ab8
 
 
bf19c4f
c6ded99
 
8a2bf29
c6ded99
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
import json
import os

import datasets
from citations_and_descriptions import (
    _SUMM_SCREEN_DESCRIPTION, _SUMM_SCREEN_CITATION,
    _GOV_REPORT_CITATION, _GOV_REPORT_DESCRIPTION,
    _ARXIV_CITATION, _ARXIV_DESCRIPTION,
    _FS_DESCRIPTION, _FS_CITATION,
)
from configs.arxiv import ArxivConfig
from configs.scrolls import ScrollsConfig
from configs.super_glue import BoolQConfig


class FS(datasets.GeneratorBasedBuilder):
    """The SCROLLS benchmark."""
    DEFAULT_WRITER_BATCH_SIZE = 1000  # because Narrative QA is a rather large dataset
    BUILDER_CONFIGS = [
        ScrollsConfig(
            additional_features=["id"],
            name="summ_screen_fd_debug",
            description=_SUMM_SCREEN_DESCRIPTION,
            data_url="https://huggingface.co/datasets/tau/fs/resolve/main/data/summ_screen_fd_debug.zip",
            citation=_SUMM_SCREEN_CITATION,
            url="https://github.com/mingdachen/SummScreen"
        ),
        ScrollsConfig(
            additional_features=["id"],
            name="gov_report",
            description=_GOV_REPORT_CITATION,
            data_url="https://huggingface.co/datasets/tau/fs/resolve/main/data/gov_report.zip",
            citation=_GOV_REPORT_DESCRIPTION,
            url="https://gov-report-data.github.io/"
        ),
        ArxivConfig(
            additional_features=['section_names', 'sections'],
            name="arxiv_debug",
            description=_ARXIV_CITATION,
            data_url="https://huggingface.co/datasets/tau/fs/resolve/main/data/arxiv_debug.zip",
            citation=_ARXIV_DESCRIPTION,
            url="https://github.com/armancohan/long-summarization"
        ),
        BoolQConfig(
            additional_features=[],
            name="boolq",
            description="",  # TODO
            data_url="https://dl.fbaipublicfiles.com/glue/superglue/data/v2/BoolQ.zip",
            citation=_ARXIV_DESCRIPTION,
            url=""  # TODO
        )
    ]

    def _info(self):
        features = {feature: datasets.Value("string") for feature in self.config.features}

        return datasets.DatasetInfo(
            description=_FS_DESCRIPTION + self.config.description,
            features=datasets.Features(features),
            homepage=self.config.url,
            citation=self.config.citation + "\n" + _FS_CITATION,
        )

    def _split_generators(self, dl_manager):
        dl_dir = dl_manager.download_and_extract(self.config.data_url)
        task_name = _get_task_name_from_data_url(self.config.data_url)
        dl_dir = os.path.join(dl_dir, task_name)

        data_files = {} if self.config.data_files is not None else None
        if data_files is not None:
            for split, paths in self.config.data_files.items():
                data_files[split] = paths[0]

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "data_file": os.path.join(dl_dir, self.config.train_file),
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION,
                gen_kwargs={
                    "data_file": os.path.join(dl_dir, self.config.validation_file),
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={
                    "data_file": os.path.join(dl_dir, self.config.test_file),
                },
            ),
        ]

    def _generate_examples(self, data_file):
        with open(data_file, encoding="utf-8") as f:
            for line in f:
                row = json.loads(line)
                self.config.process(row)
                if self.config.target_key not in row:
                    row[self.config.target_key] = None
                yield row[self.config.id_key], row


def _get_task_name_from_data_url(data_url):
    return data_url.split("/")[-1].split(".")[0]