holylovenia commited on
Commit
378eb49
·
verified ·
1 Parent(s): d9f7616

Upload sap_wat.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. sap_wat.py +177 -0
sap_wat.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import List
3
+
4
+ import datasets
5
+
6
+ from seacrowd.utils import schemas
7
+ from seacrowd.utils.configs import SEACrowdConfig
8
+ from seacrowd.utils.constants import Licenses, Tasks
9
+
10
+ _DATASETNAME = "sap_wat"
11
+
12
+ _LANGUAGES = ["eng", "ind", "zlm", "tha", "vie"]
13
+
14
+ _CITATION = """\
15
+ @inproceedings{buschbeck-exel-2020-parallel,
16
+ title = "A Parallel Evaluation Data Set of Software Documentation with Document Structure Annotation",
17
+ author = "Buschbeck, Bianka and
18
+ Exel, Miriam",
19
+ editor = "Nakazawa, Toshiaki and
20
+ Nakayama, Hideki and
21
+ Ding, Chenchen and
22
+ Dabre, Raj and
23
+ Kunchukuttan, Anoop and
24
+ Pa, Win Pa and
25
+ Bojar, Ond{\v{r}}ej and
26
+ Parida, Shantipriya and
27
+ Goto, Isao and
28
+ Mino, Hidaya and
29
+ Manabe, Hiroshi and
30
+ Sudoh, Katsuhito and
31
+ Kurohashi, Sadao and
32
+ Bhattacharyya, Pushpak",
33
+ booktitle = "Proceedings of the 7th Workshop on Asian Translation",
34
+ month = dec,
35
+ year = "2020",
36
+ address = "Suzhou, China",
37
+ publisher = "Association for Computational Linguistics",
38
+ url = "https://aclanthology.org/2020.wat-1.20",
39
+ pages = "160--169",
40
+ abstract = "This paper accompanies the software documentation data set for machine translation, a parallel
41
+ evaluation data set of data originating from the SAP Help Portal, that we released to the machine translation
42
+ community for research purposes. It offers the possibility to tune and evaluate machine translation systems
43
+ in the domain of corporate software documentation and contributes to the availability of a wider range of
44
+ evaluation scenarios. The data set comprises of the language pairs English to Hindi, Indonesian, Malay and
45
+ Thai, and thus also increases the test coverage for the many low-resource language pairs. Unlike most evaluation
46
+ data sets that consist of plain parallel text, the segments in this data set come with additional metadata that
47
+ describes structural information of the document context. We provide insights into the origin and creation, the
48
+ particularities and characteristics of the data set as well as machine translation results.",
49
+ }
50
+
51
+ """
52
+
53
+ _DESCRIPTION = """The data set originates from the SAP Help Portal that contains documentation for SAP products and user
54
+ assistance for product-related questions. The data has been processed in a way that makes it suitable as development and
55
+ test data for machine translation purposes. The current language scope is English to Hindi, Indonesian, Japanese, Korean,
56
+ Malay, Thai, Vietnamese, Simplified Chinese and Traditional Chinese. For each language pair about 4k segments are available,
57
+ split into development and test data. The segments are provided in their document context and are annotated with additional
58
+ metadata from the document."""
59
+
60
+ _HOMEPAGE = "https://github.com/SAP/software-documentation-data-set-for-machine-translation"
61
+
62
+ _LICENSE = Licenses.CC_BY_NC_4_0.value
63
+
64
+ _URLs = {
65
+ _DATASETNAME: "https://raw.githubusercontent.com/SAP/software-documentation-data-set-for-machine-translation/master/{split}_data/en{lang}/software_documentation.{split}.en{lang}.{appx}"
66
+ }
67
+
68
+ _SUPPORTED_TASKS = [
69
+ Tasks.MACHINE_TRANSLATION
70
+ ]
71
+
72
+ _SOURCE_VERSION = "1.0.0"
73
+ _SEACROWD_VERSION = "2024.06.20"
74
+
75
+ _SUBSET = ["id", "ms", "th", "vi"]
76
+
77
+ _LOCAL = False
78
+
79
+ class SapWatDataset(datasets.GeneratorBasedBuilder):
80
+ """SAP WAT is a software documentation dataset for machine translation. The current language scope is English to Hindi,
81
+ Indonesian, Japanese, Korean, Malay, Thai, Vietnamese, Simplified Chinese and Traditional Chinese. Here, we only consider
82
+ EN-ID, EN-TH, EN-MS, EN-VI"""
83
+
84
+ BUILDER_CONFIGS = [
85
+ SEACrowdConfig(
86
+ name=f"{_DATASETNAME}_en_{lang}_source",
87
+ version=datasets.Version(_SOURCE_VERSION),
88
+ description=f"SAP WAT source schema for EN-{lang.upper()}",
89
+ schema="source",
90
+ subset_id=f"{_DATASETNAME}_en_{lang}",
91
+ )
92
+ for lang in _SUBSET] + [
93
+ SEACrowdConfig(
94
+ name=f"{_DATASETNAME}_en_{lang}_seacrowd_t2t",
95
+ version=datasets.Version(_SEACROWD_VERSION),
96
+ description=f"SAP WAT SEACrowd schema for EN-{lang.upper()}",
97
+ schema="seacrowd_t2t",
98
+ subset_id=f"{_DATASETNAME}_en_{lang}",
99
+ )
100
+ for lang in _SUBSET
101
+ ]
102
+
103
+ DEFAULT_CONFIG_NAME = "sap_wat_en_id_source"
104
+
105
+ def _info(self):
106
+ if self.config.schema == "source":
107
+ features = datasets.Features(
108
+ {
109
+ "id": datasets.Value("string"),
110
+ "text": datasets.Value("string"),
111
+ "label": datasets.Value("string")
112
+ }
113
+ )
114
+ elif self.config.schema == "seacrowd_t2t":
115
+ features = schemas.text2text_features
116
+
117
+ return datasets.DatasetInfo(
118
+ description=_DESCRIPTION,
119
+ features=features,
120
+ homepage=_HOMEPAGE,
121
+ license=_LICENSE,
122
+ citation=_CITATION,
123
+ )
124
+
125
+ def _split_generators(
126
+ self, dl_manager: datasets.DownloadManager
127
+ ) -> List[datasets.SplitGenerator]:
128
+ lang = self.config.name.split("_")[3]
129
+
130
+ splits = {datasets.Split.VALIDATION: "dev", datasets.Split.TEST: "test"}
131
+ data_urls = {
132
+ split: _URLs[_DATASETNAME].format(split=splits[split], lang=lang, appx=lang) for split in splits
133
+ }
134
+ dl_paths = dl_manager.download(data_urls)
135
+
136
+ en_data_urls = {
137
+ split: _URLs[_DATASETNAME].format(split=splits[split], lang=lang, appx="en") for split in splits
138
+ }
139
+ en_dl_paths = dl_manager.download(en_data_urls)
140
+ return [
141
+ datasets.SplitGenerator(
142
+ name=split,
143
+ gen_kwargs={"filepath": dl_paths[split], "en_filepath": en_dl_paths[split]},
144
+ )
145
+ for split in splits
146
+ ]
147
+
148
+ def _generate_examples(self, filepath: Path, en_filepath: Path):
149
+ with open(en_filepath, "r") as f:
150
+ lines_1 = f.readlines()
151
+ with open(filepath, "r") as f:
152
+ lines_2 = f.readlines()
153
+
154
+ if self.config.schema == "source":
155
+ for _id, (line_1, line_2) in enumerate(zip(lines_1, lines_2)):
156
+ ex = {
157
+ "id": _id,
158
+ "text": line_1.strip(),
159
+ "label": line_2.strip()
160
+ }
161
+ yield _id, ex
162
+
163
+ elif self.config.schema == "seacrowd_t2t":
164
+ lang = self.config.name.split("_")[3]
165
+ lang_name = _LANGUAGES[_SUBSET.index(lang)+1]
166
+
167
+ for _id, (line_1, line_2) in enumerate(zip(lines_1, lines_2)):
168
+ ex = {
169
+ "id": _id,
170
+ "text_1": line_1.strip(),
171
+ "text_2": line_2.strip(),
172
+ "text_1_name": 'eng',
173
+ "text_2_name": lang_name,
174
+ }
175
+ yield _id, ex
176
+ else:
177
+ raise ValueError(f"Invalid config: {self.config.name}")