holylovenia commited on
Commit
31d9bc6
·
verified ·
1 Parent(s): 285715c

Upload wikilingua.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. wikilingua.py +142 -0
wikilingua.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pickle
3
+ from pathlib import Path
4
+ from typing import Dict, List, Tuple
5
+
6
+ import datasets
7
+
8
+ from seacrowd.utils import schemas
9
+ from seacrowd.utils.configs import SEACrowdConfig
10
+ from seacrowd.utils.constants import Tasks
11
+
12
+ _CITATION = """\
13
+ @inproceedings{
14
+ ladhak-wiki-2020,
15
+ title={WikiLingua: A New Benchmark Dataset for Multilingual Abstractive Summarization},
16
+ author={Faisal Ladhak, Esin Durmus, Claire Cardie and Kathleen McKeown},
17
+ booktitle={Findings of EMNLP, 2020},
18
+ year={2020}
19
+ }
20
+ """
21
+
22
+ _DATASETNAME = "wikilingua"
23
+
24
+ _DESCRIPTION = """\
25
+ We introduce WikiLingua, a large-scale, multilingual dataset for the evaluation of crosslingual abstractive
26
+ summarization systems. We extract article and summary pairs in 18 languages from WikiHow12, a high quality,
27
+ collaborative resource of how-to guides on a diverse set of topics written by human authors. We create gold-standard
28
+ article summary alignments across languages by aligning the images that are used to describe each how-to step in an
29
+ article.
30
+ """
31
+
32
+ _HOMEPAGE = "https://github.com/esdurmus/Wikilingua"
33
+
34
+ _LANGUAGES = ["ind"]
35
+
36
+ _LICENSE = "CC-BY-NC-SA 3.0"
37
+
38
+ _LOCAL = False
39
+
40
+ _URLS = {
41
+ _DATASETNAME: "https://drive.google.com/u/0/uc?id=1PGa8j1_IqxiGTc3SU6NMB38sAzxCPS34&export=download"
42
+ }
43
+
44
+ _SUPPORTED_TASKS = [Tasks.SUMMARIZATION]
45
+
46
+ _SOURCE_VERSION = "1.0.0"
47
+
48
+ _SEACROWD_VERSION = "2024.06.20"
49
+
50
+
51
+ class Wikilingua(datasets.GeneratorBasedBuilder):
52
+ """
53
+ The dataset includes 47,511 articles from WikiHow. Extracted gold-standard article-summary alignments across
54
+ languages by aligning the images that are used to describe each how-to step in an article.
55
+ """
56
+
57
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
58
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
59
+
60
+ BUILDER_CONFIGS = [
61
+ SEACrowdConfig(
62
+ name="wikilingua_source",
63
+ version=SOURCE_VERSION,
64
+ description="wikilingua source schema",
65
+ schema="source",
66
+ subset_id="wikilingua",
67
+ ),
68
+ SEACrowdConfig(
69
+ name="wikilingua_seacrowd_t2t",
70
+ version=SEACROWD_VERSION,
71
+ description="wikilingua Nusantara schema",
72
+ schema="seacrowd_t2t",
73
+ subset_id="wikilingua",
74
+ ),
75
+ ]
76
+
77
+ DEFAULT_CONFIG_NAME = "wikilingua_source"
78
+
79
+ def _info(self) -> datasets.DatasetInfo:
80
+ features = []
81
+ if self.config.schema == "source":
82
+ features = datasets.Features(
83
+ {
84
+ "id": datasets.Value("int64"),
85
+ "link": datasets.Value("string"),
86
+ "main_point": datasets.Value("string"),
87
+ "summary": datasets.Value("string"),
88
+ "document": datasets.Value("string"),
89
+ "english_section_name": datasets.Value("string"),
90
+ "english_url": datasets.Value("string"),
91
+ }
92
+ )
93
+ elif self.config.schema == "seacrowd_t2t":
94
+ features = schemas.text2text_features
95
+
96
+ return datasets.DatasetInfo(
97
+ description=_DESCRIPTION,
98
+ features=features,
99
+ homepage=_HOMEPAGE,
100
+ license=_LICENSE,
101
+ citation=_CITATION,
102
+ )
103
+
104
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
105
+ """Returns SplitGenerators."""
106
+
107
+ urls = _URLS[_DATASETNAME]
108
+ data_dir = dl_manager.download_and_extract(urls)
109
+
110
+ return [
111
+ datasets.SplitGenerator(
112
+ name=datasets.Split.TRAIN,
113
+ gen_kwargs={
114
+ "filepath": os.path.join(data_dir),
115
+ "split": "train",
116
+ },
117
+ ),
118
+ ]
119
+
120
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
121
+ """Yields examples as (key, example) tuples."""
122
+
123
+ if self.config.schema == "source":
124
+ with open(filepath, "rb") as file:
125
+ indonesian_docs = pickle.load(file)
126
+
127
+ _id = 1
128
+ for key_link, articles in indonesian_docs.items():
129
+ for main_point, items in articles.items():
130
+ example = {"id": _id, "link": key_link, "main_point": main_point, "summary": items["summary"], "document": items["document"], "english_section_name": items["english_section_name"], "english_url": items["english_url"]}
131
+ yield _id, example
132
+ _id += 1
133
+ elif self.config.schema == "seacrowd_t2t":
134
+ with open(filepath, "rb") as file:
135
+ indonesian_docs = pickle.load(file)
136
+
137
+ _id = 1
138
+ for key_link, articles in indonesian_docs.items():
139
+ for main_point, items in articles.items():
140
+ example = {"id": _id, "text_1": items["document"], "text_2": items["summary"], "text_1_name": "document", "text_2_name": "summary"}
141
+ yield _id, example
142
+ _id += 1