Datasets:
tau
/

Modalities:
Text
Libraries:
Datasets
yuvalkirstain commited on
Commit
a6a875e
1 Parent(s): 4156bd0

a bit of cleaning

Browse files
Files changed (4) hide show
  1. configs/arxiv.py +35 -0
  2. configs/fs.py +59 -0
  3. configs/scrolls.py +30 -0
  4. fs.py +4 -130
configs/arxiv.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from configs.fs import FSConfig
2
+
3
+
4
+ class ArxivConfig(FSConfig):
5
+ def __init__(self, **kwargs):
6
+ super().__init__(**kwargs)
7
+
8
+ @property
9
+ def id_key(self):
10
+ return "article_id"
11
+
12
+ @property
13
+ def source_key(self):
14
+ return "article_text"
15
+
16
+ @property
17
+ def target_key(self):
18
+ return "abstract_text"
19
+
20
+ @property
21
+ def train_file(self):
22
+ return "train.txt"
23
+
24
+ @property
25
+ def validation_file(self):
26
+ return "val.txt"
27
+
28
+ @property
29
+ def test_file(self):
30
+ return "test.txt"
31
+
32
+ def process(self, example):
33
+ example[self.source_key] = " ".join(example[self.source_key])
34
+ example[self.target_key] = " ".join(example[self.target_key]).replace("<S>", "").replace("</S>", "")
35
+ del example["labels"]
configs/fs.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import abstractmethod
2
+
3
+ import datasets
4
+
5
+
6
+ class FSConfig(datasets.BuilderConfig):
7
+ """BuilderConfig for FS."""
8
+
9
+ def __init__(self, additional_features, data_url, citation, url, **kwargs):
10
+ """BuilderConfig for FS.
11
+ Args:
12
+ additional_features: `list[string]`, list of the features that will appear in the feature dict
13
+ additionally to the self.id_key, self.source_key and self.target_key. Should not include "label".
14
+ data_url: `string`, url to download the zip file from.
15
+ citation: `string`, citation for the data set.
16
+ url: `string`, url for information about the data set.
17
+ label_classes: `list[string]`, the list of classes for the label if the
18
+ label is present as a string. Non-string labels will be cast to either
19
+ 'False' or 'True'.
20
+ **kwargs: keyword arguments forwarded to super.
21
+ """
22
+ super(FSConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
23
+ self.features = [self.id_key, self.source_key, self.target_key] + additional_features
24
+ self.data_url = data_url
25
+ self.citation = citation
26
+ self.url = url
27
+
28
+ @property
29
+ @abstractmethod
30
+ def id_key(self):
31
+ pass
32
+
33
+ @property
34
+ @abstractmethod
35
+ def train_file(self):
36
+ pass
37
+
38
+ @property
39
+ @abstractmethod
40
+ def validation_file(self):
41
+ pass
42
+
43
+ @property
44
+ @abstractmethod
45
+ def test_file(self):
46
+ pass
47
+
48
+ @property
49
+ @abstractmethod
50
+ def source_key(self):
51
+ pass
52
+
53
+ @property
54
+ @abstractmethod
55
+ def target_key(self):
56
+ pass
57
+
58
+ def process(self, example):
59
+ pass
configs/scrolls.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from configs.fs import FSConfig
2
+
3
+
4
+ class ScrollsConfig(FSConfig):
5
+ def __init__(self, **kwargs):
6
+ super().__init__(**kwargs)
7
+
8
+ @property
9
+ def source_key(self):
10
+ return "input"
11
+
12
+ @property
13
+ def target_key(self):
14
+ return "output"
15
+
16
+ @property
17
+ def train_file(self):
18
+ return "train.jsonl"
19
+
20
+ @property
21
+ def validation_file(self):
22
+ return "validation.jsonl"
23
+
24
+ @property
25
+ def test_file(self):
26
+ return "test.jsonl"
27
+
28
+ @property
29
+ def id_key(self):
30
+ return "pid"
fs.py CHANGED
@@ -1,10 +1,5 @@
1
- # coding=utf-8
2
- # Lint as: python3
3
- """The SCROLLS benchmark."""
4
-
5
  import json
6
  import os
7
- from abc import abstractmethod
8
 
9
  import datasets
10
  from citations_and_descriptions import (
@@ -13,131 +8,11 @@ from citations_and_descriptions import (
13
  _ARXIV_CITATION, _ARXIV_DESCRIPTION,
14
  _FS_DESCRIPTION, _FS_CITATION,
15
  )
 
 
16
 
17
 
18
- class FSConfig(datasets.BuilderConfig):
19
- """BuilderConfig for FS."""
20
-
21
- def __init__(self, additional_features, data_url, citation, url, **kwargs):
22
- """BuilderConfig for FS.
23
- Args:
24
- additional_features: `list[string]`, list of the features that will appear in the feature dict
25
- additionally to the self.id_key, self.source_key and self.target_key. Should not include "label".
26
- data_url: `string`, url to download the zip file from.
27
- citation: `string`, citation for the data set.
28
- url: `string`, url for information about the data set.
29
- label_classes: `list[string]`, the list of classes for the label if the
30
- label is present as a string. Non-string labels will be cast to either
31
- 'False' or 'True'.
32
- **kwargs: keyword arguments forwarded to super.
33
- """
34
- super(FSConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
35
- self.features = [self.id_key, self.source_key, self.target_key] + additional_features
36
- self.data_url = data_url
37
- self.citation = citation
38
- self.url = url
39
-
40
- @property
41
- @abstractmethod
42
- def id_key(self):
43
- pass
44
-
45
- @property
46
- @abstractmethod
47
- def train_file(self):
48
- pass
49
-
50
- @property
51
- @abstractmethod
52
- def validation_file(self):
53
- pass
54
-
55
- @property
56
- @abstractmethod
57
- def test_file(self):
58
- pass
59
-
60
- @property
61
- @abstractmethod
62
- def source_key(self):
63
- pass
64
-
65
- @property
66
- @abstractmethod
67
- def target_key(self):
68
- pass
69
-
70
- def process(self, example):
71
- example[self.source_key] = example[self.source_key].strip()
72
- example[self.target_key] = example[self.target_key].strip() if example[
73
- self.target_key] else None
74
-
75
-
76
- class ScrollsConfig(FSConfig):
77
- def __init__(self, **kwargs):
78
- super().__init__(**kwargs)
79
-
80
- @property
81
- def source_key(self):
82
- return "input"
83
-
84
- @property
85
- def target_key(self):
86
- return "output"
87
-
88
- @property
89
- def train_file(self):
90
- return "train.jsonl"
91
-
92
- @property
93
- def validation_file(self):
94
- return "validation.jsonl"
95
-
96
- @property
97
- def test_file(self):
98
- return "test.jsonl"
99
-
100
- @property
101
- def id_key(self):
102
- return "pid"
103
-
104
-
105
- class ArxivConfig(FSConfig):
106
- def __init__(self, **kwargs):
107
- super().__init__(**kwargs)
108
-
109
- @property
110
- def id_key(self):
111
- return "article_id"
112
-
113
- @property
114
- def source_key(self):
115
- return "article_text"
116
-
117
- @property
118
- def target_key(self):
119
- return "abstract_text"
120
-
121
- @property
122
- def train_file(self):
123
- return "train.txt"
124
-
125
- @property
126
- def validation_file(self):
127
- return "val.txt"
128
-
129
- @property
130
- def test_file(self):
131
- return "test.txt"
132
-
133
- def process(self, example):
134
- example[self.source_key] = " ".join(example[self.source_key])
135
- example[self.target_key] = " ".join(example[self.target_key]).replace("<S>", "").replace("</S>", "")
136
- del example["labels"]
137
- super().process(example)
138
-
139
-
140
- class Fs(datasets.GeneratorBasedBuilder):
141
  """The SCROLLS benchmark."""
142
  DEFAULT_WRITER_BATCH_SIZE = 1000 # because Narrative QA is a rather large dataset
143
  BUILDER_CONFIGS = [
@@ -201,8 +76,7 @@ class Fs(datasets.GeneratorBasedBuilder):
201
  datasets.SplitGenerator(
202
  name=datasets.Split.TEST,
203
  gen_kwargs={
204
- "data_file": os.path.join(dl_dir, self.config.test_file) if data_files is None else data_files[
205
- "test"],
206
  },
207
  ),
208
  ]
 
 
 
 
 
1
  import json
2
  import os
 
3
 
4
  import datasets
5
  from citations_and_descriptions import (
 
8
  _ARXIV_CITATION, _ARXIV_DESCRIPTION,
9
  _FS_DESCRIPTION, _FS_CITATION,
10
  )
11
+ from configs.arxiv import ArxivConfig
12
+ from configs.scrolls import ScrollsConfig
13
 
14
 
15
+ class FS(datasets.GeneratorBasedBuilder):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  """The SCROLLS benchmark."""
17
  DEFAULT_WRITER_BATCH_SIZE = 1000 # because Narrative QA is a rather large dataset
18
  BUILDER_CONFIGS = [
 
76
  datasets.SplitGenerator(
77
  name=datasets.Split.TEST,
78
  gen_kwargs={
79
+ "data_file": os.path.join(dl_dir, self.config.test_file),
 
80
  },
81
  ),
82
  ]