Balajb commited on
Commit
359cea8
·
1 Parent(s): 7726005

Upload test-bala.py

Browse files
Files changed (1) hide show
  1. test-bala.py +170 -0
test-bala.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """XSum dataset."""
18
+
19
+
20
+ import json
21
+ import os
22
+
23
+ import datasets
24
+
25
+
26
+ _CITATION = """
27
+ @article{Narayan2018DontGM,
28
+ title={Don't Give Me the Details, Just the Summary! Topic-Aware Convolutional Neural Networks for Extreme Summarization},
29
+ author={Shashi Narayan and Shay B. Cohen and Mirella Lapata},
30
+ journal={ArXiv},
31
+ year={2018},
32
+ volume={abs/1808.08745}
33
+ }
34
+ """
35
+
36
+ _DESCRIPTION = """
37
+ Extreme Summarization (XSum) Dataset.
38
+
39
+ There are three features:
40
+ - document: Input news article.
41
+ - summary: One sentence summary of the article.
42
+ - id: BBC ID of the article.
43
+
44
+ """
45
+
46
+ # From https://github.com/EdinburghNLP/XSum/issues/12
47
+ _URL_DATA = "data/Xhuggies-summary-public-test.tar.gz"
48
+ _URL_SPLITS = (
49
+ "https://raw.githubusercontent.com/EdinburghNLP/XSum/master/XSum-Dataset/XSum-TRAINING-DEV-TEST-SPLIT-90-5-5.json"
50
+ )
51
+
52
+ _DOCUMENT = "document"
53
+ _SUMMARY = "summary"
54
+ _ID = "id"
55
+
56
+ _REMOVE_LINES = set(
57
+ [
58
+ "Share this with\n",
59
+ "Email\n",
60
+ "Facebook\n",
61
+ "Messenger\n",
62
+ "Twitter\n",
63
+ "Pinterest\n",
64
+ "WhatsApp\n",
65
+ "Linkedin\n",
66
+ "LinkedIn\n",
67
+ "Copy this link\n",
68
+ "These are external links and will open in a new window\n",
69
+ ]
70
+ )
71
+
72
+
73
+ class Xsum(datasets.GeneratorBasedBuilder):
74
+ """Extreme Summarization (XSum) Dataset."""
75
+
76
+ # Version 1.2.0 expands coverage, includes ids, and removes web contents.
77
+ VERSION = datasets.Version("1.2.0")
78
+
79
+ def _info(self):
80
+ return datasets.DatasetInfo(
81
+ description=_DESCRIPTION,
82
+ features=datasets.Features(
83
+ {
84
+ _DOCUMENT: datasets.Value("string"),
85
+ _SUMMARY: datasets.Value("string"),
86
+ _ID: datasets.Value("string"),
87
+ }
88
+ ),
89
+ supervised_keys=(_DOCUMENT, _SUMMARY),
90
+ homepage="https://github.com/EdinburghNLP/XSum/tree/master/XSum-Dataset",
91
+ citation=_CITATION,
92
+ )
93
+
94
+ def _split_generators(self, dl_manager):
95
+ """Returns SplitGenerators."""
96
+
97
+ files_to_download = {"data": _URL_DATA, "splits": _URL_SPLITS}
98
+ downloaded_files = dl_manager.download(files_to_download)
99
+
100
+ return [
101
+ datasets.SplitGenerator(
102
+ name=datasets.Split.TRAIN,
103
+ gen_kwargs={
104
+ "split_path": downloaded_files["splits"],
105
+ "split_name": "train",
106
+ "data_dir": "kc-summary-data",
107
+ "files": dl_manager.iter_archive(downloaded_files["data"]),
108
+ },
109
+ ),
110
+ datasets.SplitGenerator(
111
+ name=datasets.Split.VALIDATION,
112
+ gen_kwargs={
113
+ "split_path": downloaded_files["splits"],
114
+ "split_name": "validation",
115
+ "data_dir": "kc-summary-data",
116
+ "files": dl_manager.iter_archive(downloaded_files["data"]),
117
+ },
118
+ ),
119
+ datasets.SplitGenerator(
120
+ name=datasets.Split.TEST,
121
+ gen_kwargs={
122
+ "split_path": downloaded_files["splits"],
123
+ "split_name": "test",
124
+ "data_dir": "kc-summary-data",
125
+ "files": dl_manager.iter_archive(downloaded_files["data"]),
126
+ },
127
+ ),
128
+ ]
129
+
130
+ def _generate_examples(self, split_path, split_name, data_dir, files):
131
+ """Yields examples."""
132
+
133
+ with open(split_path, "r", encoding="utf-8") as f:
134
+ split_ids = json.load(f)
135
+ split_ids = {k: set(v) for k, v in split_ids.items()}
136
+
137
+ for path, f in files:
138
+ if not split_ids[split_name]:
139
+ break
140
+ elif path.startswith(data_dir) and path.endswith(".summary"):
141
+ i = os.path.basename(path).split(".")[0]
142
+ if i in split_ids[split_name]:
143
+ split_ids[split_name].remove(i)
144
+ text = "".join(
145
+ [
146
+ line.decode("utf-8")
147
+ for line in f.readlines()
148
+ if line.decode("utf-8") not in _REMOVE_LINES and line.strip()
149
+ ]
150
+ )
151
+ # Each file follows below format:
152
+ # [SN]URL[SN]
153
+ # http://somelink
154
+ #
155
+ # [SN]TITLE[SN]
156
+ # some intro
157
+ #
158
+ # [SN]FIRST-SENTENCE[SN]
159
+ # some intro
160
+ #
161
+ # [SN]RESTBODY[SN]
162
+ # text line.
163
+ # another text line.
164
+ # "another text line."
165
+
166
+ # According to the following issue, FIRST-SENTENCE
167
+ # is the reference summary and TITLE is unused:
168
+ # https://github.com/EdinburghNLP/XSum/issues/22
169
+ segs = text.split("[SN]")
170
+ yield i, {_DOCUMENT: segs[8].strip(), _SUMMARY: segs[6].strip(), _ID: i}