Datasets:
change download url
Browse filesfix split gen filepath
basic gen examples
- citesum.py +16 -24
citesum.py
CHANGED
@@ -16,8 +16,8 @@
|
|
16 |
# Lint as: python3
|
17 |
"""CiteSum dataset"""
|
18 |
|
19 |
-
import hashlib
|
20 |
import os
|
|
|
21 |
|
22 |
import datasets
|
23 |
|
@@ -49,7 +49,7 @@ _CITATION = """\
|
|
49 |
|
50 |
"""
|
51 |
|
52 |
-
_DOWNLOAD_URL = "https://drive.google.com/
|
53 |
|
54 |
|
55 |
class CiteSumConfig(datasets.BuilderConfig):
|
@@ -92,16 +92,19 @@ class CiteSum(datasets.GeneratorBasedBuilder):
|
|
92 |
)
|
93 |
|
94 |
def _split_generators(self, dl_manager):
|
95 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
96 |
return [
|
97 |
datasets.SplitGenerator(
|
98 |
name=split,
|
99 |
gen_kwargs={
|
100 |
-
"
|
101 |
-
"files_per_archive": [
|
102 |
-
dl_manager.iter_archive(dl_paths["cnn_stories"]),
|
103 |
-
dl_manager.iter_archive(dl_paths["dm_stories"]),
|
104 |
-
],
|
105 |
},
|
106 |
)
|
107 |
for split in [
|
@@ -111,19 +114,8 @@ class CiteSum(datasets.GeneratorBasedBuilder):
|
|
111 |
]
|
112 |
]
|
113 |
|
114 |
-
def _generate_examples(self,
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
hash_from_path = _get_hash_from_path(path)
|
120 |
-
if hash_from_path in urls:
|
121 |
-
article, highlights = _get_art_abs(file, self.config.version)
|
122 |
-
if not article or not highlights:
|
123 |
-
continue
|
124 |
-
yield idx, {
|
125 |
-
_ARTICLE: article,
|
126 |
-
_HIGHLIGHTS: highlights,
|
127 |
-
"id": hash_from_path,
|
128 |
-
}
|
129 |
-
idx += 1
|
|
|
16 |
# Lint as: python3
|
17 |
"""CiteSum dataset"""
|
18 |
|
|
|
19 |
import os
|
20 |
+
import json
|
21 |
|
22 |
import datasets
|
23 |
|
|
|
49 |
|
50 |
"""
|
51 |
|
52 |
+
_DOWNLOAD_URL = "https://drive.google.com/uc?export=download&id=1ndHCREXGSPnDUNllladh9qCtayqbXAfJ"
|
53 |
|
54 |
|
55 |
class CiteSumConfig(datasets.BuilderConfig):
|
|
|
92 |
)
|
93 |
|
94 |
def _split_generators(self, dl_manager):
|
95 |
+
dl_path = dl_manager.download_and_extract(_DOWNLOAD_URL)
|
96 |
+
|
97 |
+
file_mapping = {
|
98 |
+
datasets.Split.TRAIN: "train.json",
|
99 |
+
datasets.Split.VALIDATION: "val.json",
|
100 |
+
datasets.Split.TEST: "test.json",
|
101 |
+
}
|
102 |
+
|
103 |
return [
|
104 |
datasets.SplitGenerator(
|
105 |
name=split,
|
106 |
gen_kwargs={
|
107 |
+
"filepath": os.path.join(dl_path, file_mapping[split]),
|
|
|
|
|
|
|
|
|
108 |
},
|
109 |
)
|
110 |
for split in [
|
|
|
114 |
]
|
115 |
]
|
116 |
|
117 |
+
def _generate_examples(self, filepath):
|
118 |
+
|
119 |
+
with open(filepath, "r") as fp:
|
120 |
+
for idx, line in enumerate(fp.readlines()):
|
121 |
+
yield idx, json.loads(line)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|