Datasets:

Languages:
Burmese
ArXiv:
License:
holylovenia commited on
Commit
eb48830
·
verified ·
1 Parent(s): 84995f7

Upload my_paraphrase.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. my_paraphrase.py +200 -0
my_paraphrase.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from pathlib import Path
17
+ from typing import Dict, List, Tuple
18
+
19
+ import datasets
20
+ import pandas as pd
21
+
22
+ from seacrowd.utils import schemas
23
+ from seacrowd.utils.configs import SEACrowdConfig
24
+ from seacrowd.utils.constants import Licenses, Tasks
25
+
26
+ _CITATION = """\
27
+ @article{htay2022deep,
28
+ title={Deep Siamese Neural Network Vs Random Forest for Myanmar Language Paraphrase Classification},
29
+ author={Htay, Myint Myint and Thu, Ye Kyaw and Thant, Hnin Aye and Supnithi, Thepchai},
30
+ journal={Journal of Intelligent Informatics and Smart Technology},
31
+ year={2022}
32
+ }
33
+ """
34
+
35
+ _DATASETNAME = "my_paraphrase"
36
+
37
+ _DESCRIPTION = """\
38
+ The myParaphrase corpus is intended for the task of assessing whether pairs of Burmese sentences exhibit similar meanings \
39
+ or are paraphrases. It encompasses 40461 pairs for training, along with 1000 pairs for an open test and an additional 1000 pairs \
40
+ for a closed test. If a pair of sentences in Burmese is considered a paraphrase, it is labeled with "1"; if not, they receive a label of "0."
41
+ """
42
+
43
+ _HOMEPAGE = "https://github.com/ye-kyaw-thu/myParaphrase"
44
+
45
+ _LANGUAGES = ["mya"]
46
+
47
+ _LICENSE = Licenses.CC_BY_NC_SA_4_0.value
48
+ _LOCAL = False
49
+
50
+ _URLS = {
51
+ _DATASETNAME: [
52
+ "https://github.com/ye-kyaw-thu/myParaphrase/raw/main/corpus/ver1.0/csv-qqp/train.csv",
53
+ "https://github.com/ye-kyaw-thu/myParaphrase/raw/main/corpus/ver1.0/csv-qqp/open-test.final.manual.csv",
54
+ "https://github.com/ye-kyaw-thu/myParaphrase/raw/main/corpus/ver1.0/csv-qqp/closed-test.csv",
55
+ ],
56
+ }
57
+
58
+ _SUPPORTED_TASKS = [Tasks.PARAPHRASING]
59
+ _SOURCE_VERSION = "1.0.0"
60
+ _SEACROWD_VERSION = "2024.06.20"
61
+ _TAGS = [0, 1]
62
+
63
+
64
+ class MyParaphraseDataset(datasets.GeneratorBasedBuilder):
65
+ """The "myParaphrase" corpus is a Burmese dataset used for paraphrase identification. \
66
+ It includes 40,461 training pairs and 2,000 test pairs. Pairs are labeled "1" for paraphrases and "0" otherwise."""
67
+
68
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
69
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
70
+ SEACROWD_SCHEMA_NAME = "t2t"
71
+
72
+ BUILDER_CONFIGS = [
73
+ SEACrowdConfig(
74
+ name=f"{_DATASETNAME}_source", # source
75
+ version=SOURCE_VERSION,
76
+ description=f"{_DATASETNAME} source schema",
77
+ schema="source",
78
+ subset_id=f"{_DATASETNAME}_paraphrase",
79
+ ),
80
+ SEACrowdConfig(
81
+ name=f"{_DATASETNAME}_seacrowd_{SEACROWD_SCHEMA_NAME}", # schema
82
+ version=SEACROWD_VERSION,
83
+ description=f"{_DATASETNAME} SEACrowd schema",
84
+ schema=f"seacrowd_{SEACROWD_SCHEMA_NAME}",
85
+ subset_id=f"{_DATASETNAME}_paraphrase",
86
+ ),
87
+ SEACrowdConfig(
88
+ name=f"{_DATASETNAME}_non_paraphrase_source", # source
89
+ version=SEACROWD_VERSION,
90
+ description=f"{_DATASETNAME} SEACrowd schema",
91
+ schema="source",
92
+ subset_id=f"{_DATASETNAME}_non_paraphrase",
93
+ ),
94
+ SEACrowdConfig(
95
+ name=f"{_DATASETNAME}_non_paraphrase_seacrowd_{SEACROWD_SCHEMA_NAME}", # schema
96
+ version=SEACROWD_VERSION,
97
+ description=f"{_DATASETNAME} SEACrowd schema",
98
+ schema=f"seacrowd_{SEACROWD_SCHEMA_NAME}",
99
+ subset_id=f"{_DATASETNAME}_non_paraphrase",
100
+ ),
101
+ SEACrowdConfig(
102
+ name=f"{_DATASETNAME}_all_source", # source
103
+ version=SOURCE_VERSION,
104
+ description=f"{_DATASETNAME} source schema",
105
+ schema="source",
106
+ subset_id=f"{_DATASETNAME}_all",
107
+ ),
108
+ SEACrowdConfig(
109
+ name=f"{_DATASETNAME}_all_seacrowd_{SEACROWD_SCHEMA_NAME}", # schema
110
+ version=SEACROWD_VERSION,
111
+ description=f"{_DATASETNAME} SEACrowd schema",
112
+ schema=f"seacrowd_{SEACROWD_SCHEMA_NAME}",
113
+ subset_id=f"{_DATASETNAME}_all",
114
+ ),
115
+ ]
116
+
117
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_seacrowd_{SEACROWD_SCHEMA_NAME}"
118
+
119
+ def _info(self) -> datasets.DatasetInfo:
120
+ if self.config.schema.endswith("_source"):
121
+ features = datasets.Features({"id": datasets.Value("int32"), "paraphrase1": datasets.Value("string"), "paraphrase2": datasets.Value("string"), "is_paraphrase": datasets.Value("int32")})
122
+
123
+ elif self.config.schema.endswith(self.SEACROWD_SCHEMA_NAME):
124
+ features = schemas.text2text_features
125
+
126
+ else:
127
+ raise ValueError
128
+
129
+ return datasets.DatasetInfo(
130
+ description=_DESCRIPTION,
131
+ features=features,
132
+ homepage=_HOMEPAGE,
133
+ license=_LICENSE,
134
+ citation=_CITATION,
135
+ )
136
+
137
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
138
+ urls = _URLS[_DATASETNAME]
139
+ train = dl_manager.download(urls[0])
140
+ open_test = dl_manager.download(urls[1])
141
+ closed_test = dl_manager.download(urls[2])
142
+
143
+ return [
144
+ datasets.SplitGenerator(
145
+ name=datasets.Split.TRAIN,
146
+ # Whatever you put in gen_kwargs will be passed to _generate_examples
147
+ gen_kwargs={
148
+ "filepath": train,
149
+ "split": "train",
150
+ },
151
+ ),
152
+ datasets.SplitGenerator(
153
+ name=datasets.Split.TEST,
154
+ gen_kwargs={
155
+ "filepath": closed_test,
156
+ "split": "test",
157
+ },
158
+ ),
159
+ datasets.SplitGenerator(
160
+ name=datasets.Split.VALIDATION,
161
+ gen_kwargs={
162
+ "filepath": open_test,
163
+ "split": "dev",
164
+ },
165
+ ),
166
+ ]
167
+
168
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
169
+ """Yields examples as (key, example) tuples."""
170
+
171
+ columns = ["id", "paraphrase1", "paraphrase2", "is_paraphrase"]
172
+ dataset = pd.read_csv(filepath, header=None)
173
+ dataset.columns = columns
174
+ dataset = dataset.dropna()
175
+
176
+ dataset["is_paraphrase"] = dataset["is_paraphrase"].astype(int)
177
+
178
+ if self.config.schema in [
179
+ "paraphrase_source",
180
+ "non_paraphrase_source",
181
+ "all_source",
182
+ # "source"
183
+ ]:
184
+ for i, row in dataset.iterrows():
185
+ yield i, {"id": i, "paraphrase1": row["paraphrase1"], "paraphrase2": row["paraphrase2"], "is_paraphrase": row["is_paraphrase"]}
186
+
187
+ elif self.config.schema == f"seacrowd_paraphrase_{self.SEACROWD_SCHEMA_NAME}":
188
+ for i, row in dataset[dataset["is_paraphrase"] == 1].iterrows():
189
+ yield i, {"id": i, "text_1": row["paraphrase1"], "text_2": row["paraphrase2"], "text_1_name": "anchor_text", "text_2_name": "paraphrased_text"}
190
+
191
+ elif self.config.schema == f"seacrowd_non_paraphrase_{self.SEACROWD_SCHEMA_NAME}":
192
+ for i, row in dataset[dataset["is_paraphrase"] == 0].iterrows():
193
+ yield i, {"id": i, "text_1": row["paraphrase1"], "text_2": row["paraphrase2"], "text_1_name": "anchor_text", "text_2_name": "non_paraphrased_text"}
194
+
195
+ elif self.config.schema == f"seacrowd_all_{self.SEACROWD_SCHEMA_NAME}":
196
+ for i, row in dataset.iterrows():
197
+ yield i, {"id": i, "text_1": row["paraphrase1"], "text_2": row["paraphrase2"], "text_1_name": "anchor_text", "text_2_name": "paraphrased_text" if row["is_paraphrase"] else "non_paraphrased_text"}
198
+
199
+ else:
200
+ raise ValueError