Yaxin commited on
Commit
839637f
·
1 Parent(s): 65165d2

Create new file

Browse files
Files changed (1) hide show
  1. SemEval2016Task5NLTK.py +294 -0
SemEval2016Task5NLTK.py ADDED
@@ -0,0 +1,294 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """The Multilingual SemEval2016 Task5 Reviews Corpus"""
17
+
18
+ import datasets
19
+
20
+ _CITATION = """\
21
+ @inproceedings{pontiki2016semeval,
22
+ title={Semeval-2016 task 5: Aspect based sentiment analysis},
23
+ author={Pontiki, Maria and Galanis, Dimitrios and Papageorgiou, Haris and Androutsopoulos, Ion and Manandhar, Suresh and Al-Smadi, Mohammad and Al-Ayyoub, Mahmoud and Zhao, Yanyan and Qin, Bing and De Clercq, Orph{\'e}e and others},
24
+ booktitle={International workshop on semantic evaluation},
25
+ pages={19--30},
26
+ year={2016}
27
+ }
28
+ """
29
+
30
+ _LICENSE = """\
31
+ Please click on the homepage URL for license details.
32
+ """
33
+
34
+ _DESCRIPTION = """\
35
+ A collection of SemEval2016 specifically designed to aid research in multilingual Aspect Based Sentiment Analysis.
36
+ """
37
+
38
+ _CONFIG = [
39
+ # restaruants Domain
40
+ "restaurants_english",
41
+ "restaurants_french",
42
+ "restaurants_spanish",
43
+ "restaurants_russian",
44
+ "restaurants_dutch",
45
+ "restaurants_turkish",
46
+
47
+ # hotels domain
48
+ "hotels_arabic",
49
+
50
+ # Consumer Electronics Domain
51
+ "mobilephones_dutch",
52
+ "mobilephones_chinese",
53
+ "laptops_english",
54
+ "digitalcameras_chinese"
55
+ ]
56
+
57
+ _VERSION = "0.1.0"
58
+
59
+ _HOMEPAGE_URL = "https://alt.qcri.org/semeval2016/task5/index.php?id=data-and-tools/"
60
+ _DOWNLOAD_URL = "https://raw.githubusercontent.com/YaxinCui/ABSADataset/main/SemEval2016Task5Corrected/{split}/{domain}_{split}_{lang}.xml"
61
+
62
+
63
+ class SemEval2016Task5NLTKConfig(datasets.BuilderConfig):
64
+ """BuilderConfig for SemEval2016Config."""
65
+
66
+ def __init__(self, _CONFIG, **kwargs):
67
+ super(SemEval2016Task5NLTKConfig, self).__init__(version=datasets.Version(_VERSION, ""), **kwargs),
68
+ self.configs = _CONFIG
69
+
70
+
71
+ class SemEval2016Task5NLTK(datasets.GeneratorBasedBuilder):
72
+ """The Multilingual SemEval2016 ABSA Corpus"""
73
+
74
+ BUILDER_CONFIGS = [
75
+ SemEval2016Task5NLTKConfig(
76
+ name="All",
77
+ _CONFIG=_CONFIG,
78
+ description="A collection of SemEval2016 specifically designed to aid research in multilingual Aspect Based Sentiment Analysis.",
79
+ )
80
+ ] + [
81
+ SemEval2016Task5NLTKConfig(
82
+ name=config,
83
+ _CONFIG=[config],
84
+ description=f"{config} of SemEval2016 specifically designed to aid research in multilingual Aspect Based Sentiment Analysis",
85
+ )
86
+ for config in _CONFIG
87
+ ]
88
+
89
+ BUILDER_CONFIG_CLASS = SemEval2016Task5NLTKConfig
90
+ DEFAULT_CONFIG_NAME = "restaurants_english"
91
+
92
+ def _info(self):
93
+ return datasets.DatasetInfo(
94
+ description=_DESCRIPTION,
95
+ features=datasets.Features(
96
+ {'text': datasets.Value(dtype='string'),
97
+ 'opinions': [
98
+ {'category': datasets.Value(dtype='string'),
99
+ 'from': datasets.Value(dtype='string'),
100
+ 'polarity': datasets.Value(dtype='string'),
101
+ 'target': datasets.Value(dtype='string'),
102
+ 'to': datasets.Value(dtype='string')}
103
+ ],
104
+ 'tokens': [datasets.Value(dtype='string')],
105
+ 'ATESP_BIEOS_tags': [datasets.Value(dtype='string')],
106
+ 'ATESP_BIO_tags': [datasets.Value(dtype='string')],
107
+ 'ATE_BIEOS_tags': [datasets.Value(dtype='string')],
108
+ 'ATE_BIO_tags': [datasets.Value(dtype='string')],
109
+
110
+ 'domain': datasets.Value(dtype='string'),
111
+ 'reviewId': datasets.Value(dtype='string'),
112
+ 'sentenceId': datasets.Value(dtype='string')
113
+ }
114
+ ),
115
+ supervised_keys=None,
116
+ license=_LICENSE,
117
+ homepage=_HOMEPAGE_URL,
118
+ citation=_CITATION,
119
+ )
120
+
121
+ def _split_generators(self, dl_manager):
122
+
123
+ lang_list = []
124
+ domain_list = []
125
+
126
+ for config in self.config.configs:
127
+ domain_list.append(config.split('_')[0])
128
+ lang_list.append(config.split('_')[1])
129
+
130
+ train_urls = [_DOWNLOAD_URL.format(split="train", domain=config.split('_')[0], lang=config.split('_')[1]) for config in self.config.configs]
131
+ dev_urls = [_DOWNLOAD_URL.format(split="trial", domain=config.split('_')[0], lang=config.split('_')[1]) for config in self.config.configs]
132
+ test_urls = [_DOWNLOAD_URL.format(split="test", domain=config.split('_')[0], lang=config.split('_')[1]) for config in self.config.configs]
133
+
134
+ train_paths = dl_manager.download_and_extract(train_urls)
135
+ dev_paths = dl_manager.download_and_extract(dev_urls)
136
+ test_paths = dl_manager.download_and_extract(test_urls)
137
+
138
+ return [
139
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"file_paths": train_paths, "lang_list": lang_list, "domain_list": domain_list}),
140
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"file_paths": dev_paths, "lang_list": lang_list, "domain_list": domain_list}),
141
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"file_paths": test_paths, "lang_list": lang_list, "domain_list": domain_list}),
142
+ ]
143
+
144
+ def _generate_examples(self, file_paths, lang_list, domain_list):
145
+ row_count = 0
146
+ assert len(file_paths)==len(lang_list) and len(lang_list)==len(domain_list)
147
+
148
+ for i in range(len(file_paths)):
149
+ file_path, domain, language = file_paths[i], domain_list[i], lang_list[i]
150
+ semEvalDataset = SemEvalXMLDataset(file_path, language, domain)
151
+
152
+ for example in semEvalDataset.SentenceWithOpinions:
153
+
154
+ yield row_count, example
155
+ row_count += 1
156
+
157
+
158
+ # 输入:xlm文件的文件路径
159
+ # 输出:一个DataSet,每个样例包含[reviewid, sentenceId, text, UniOpinions]
160
+ # 每个样例包含的Opinion,是一个列表,包含的是单个Opinion的详情
161
+
162
+ from xml.dom.minidom import parse
163
+
164
+ class SemEvalXMLDataset():
165
+ def __init__(self, file_name, language, domain):
166
+ # 获得SentenceWithOpinions,一个List包含(reviewId, sentenceId, text, Opinions)
167
+
168
+ self.SentenceWithOpinions = []
169
+ self.xml_path = file_name
170
+
171
+ self.sentenceXmlList = parse(self.xml_path).getElementsByTagName('sentence')
172
+
173
+ for sentenceXml in self.sentenceXmlList:
174
+ reviewId = sentenceXml.getAttribute("id").split(':')[0]
175
+ sentenceId = sentenceXml.getAttribute("id")
176
+ if len(sentenceXml.getElementsByTagName("text")[0].childNodes) < 1:
177
+ # skip no reviews part
178
+ continue
179
+ text = sentenceXml.getElementsByTagName("text")[0].childNodes[0].nodeValue
180
+ OpinionXmlList = sentenceXml.getElementsByTagName("Opinion")
181
+ Opinions = []
182
+ for opinionXml in OpinionXmlList:
183
+ # some text maybe have no opinion
184
+ target = opinionXml.getAttribute("target")
185
+ category = opinionXml.getAttribute("category")
186
+ polarity = opinionXml.getAttribute("polarity")
187
+ from_ = opinionXml.getAttribute("from")
188
+ to = opinionXml.getAttribute("to")
189
+
190
+ opinionDict = {
191
+ "target": target,
192
+ "category": category,
193
+ "polarity": polarity,
194
+ "from": from_,
195
+ "to": to
196
+ }
197
+ Opinions.append(opinionDict)
198
+
199
+ Opinions.sort(key=lambda x: x["from"])
200
+ # 从小到大排序
201
+ example = {
202
+ "text": text,
203
+ "opinions": Opinions,
204
+ "domain": domain,
205
+ "reviewId": reviewId,
206
+ "sentenceId": sentenceId
207
+ }
208
+ example = addTokenAndLabel(example)
209
+ self.SentenceWithOpinions.append(example)
210
+
211
+ import nltk
212
+
213
+ def clearOpinion(example):
214
+ opinions = example['opinions']
215
+ skipNullOpinions = []
216
+ # 去掉NULL的opinion
217
+ for opinion in opinions:
218
+ targetKey = 'target'
219
+ target = opinion[targetKey]
220
+ from_ = opinion['from']
221
+ to = opinion['to']
222
+ # skill NULL
223
+ if target.lower() == 'null' or target == '' or from_ == to:
224
+ continue
225
+ skipNullOpinions.append(opinion)
226
+
227
+ # delete repeate Opinions
228
+ skipNullOpinions.sort(key=lambda x: int(x['from'])) # 从小到大排序
229
+ UniOpinions = []
230
+ for opinion in skipNullOpinions:
231
+ if len(UniOpinions) < 1:
232
+ UniOpinions.append(opinion)
233
+ else:
234
+ if opinion['from'] != UniOpinions[-1]['from'] and opinion['to'] != UniOpinions[-1]['to']:
235
+ UniOpinions.append(opinion)
236
+ return UniOpinions
237
+
238
+
239
+ def addTokenAndLabel(example):
240
+ tokens = []
241
+ labels = []
242
+
243
+ text = example['text']
244
+ UniOpinions = clearOpinion(example)
245
+ text_begin = 0
246
+
247
+ for aspect in UniOpinions:
248
+ polarity = aspect['polarity'][:3].upper()
249
+ pre_O_tokens = nltk.word_tokenize(text[text_begin: int(aspect['from'])])
250
+ tokens.extend(pre_O_tokens)
251
+ labels.extend(['O']*len(pre_O_tokens))
252
+
253
+ BIES_tokens = nltk.word_tokenize(text[int(aspect['from']): int(aspect['to'])])
254
+ tokens.extend(BIES_tokens)
255
+
256
+ assert len(BIES_tokens) > 0, print('error in BIES_tokens length')
257
+
258
+ if len(BIES_tokens)==1:
259
+ labels.append('S-'+polarity)
260
+ elif len(BIES_tokens)==2:
261
+ labels.append('B-'+polarity)
262
+ labels.append('E-'+polarity)
263
+ else:
264
+ labels.append('B-'+polarity)
265
+ labels.extend(['I-'+polarity]*(len(BIES_tokens)-2))
266
+ labels.append('E-'+polarity)
267
+
268
+ text_begin = int(aspect['to'])
269
+
270
+
271
+ pre_O_tokens = nltk.word_tokenize(text[text_begin: ])
272
+ labels.extend(['O']*len(pre_O_tokens))
273
+ tokens.extend(pre_O_tokens)
274
+
275
+ example['tokens'] = tokens
276
+ example['ATESP_BIEOS_tags'] = labels
277
+
278
+ ATESP_BIO_labels = []
279
+ for label in labels:
280
+ ATESP_BIO_labels.append(label.replace('E-', 'I-').replace('S-', 'B-'))
281
+ example['ATESP_BIO_tags'] = ATESP_BIO_labels
282
+
283
+
284
+ ATE_BIEOS_labels = []
285
+ for label in labels:
286
+ ATE_BIEOS_labels.append(label[0])
287
+ example['ATE_BIEOS_tags'] = ATE_BIEOS_labels
288
+
289
+ ATE_BIO_labels = []
290
+ for label in ATESP_BIO_labels:
291
+ ATE_BIO_labels.append(label[0])
292
+ example['ATE_BIO_tags'] = ATE_BIO_labels
293
+
294
+ return example