BahAdoR0101 commited on
Commit
62eaed1
1 Parent(s): ecd9cf2

Upload conll2003job.py

Browse files
Files changed (1) hide show
  1. conll2003job.py +245 -0
conll2003job.py ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """Introduction to the CoNLL-2003 Shared Task: Language-Independent Named Entity Recognition"""
18
+
19
+ import os
20
+
21
+ import datasets
22
+
23
+
24
+ logger = datasets.logging.get_logger(__name__)
25
+
26
+
27
+ _CITATION = """\
28
+ @inproceedings{tjong-kim-sang-de-meulder-2003-introduction,
29
+ title = "Introduction to the {C}o{NLL}-2003 Shared Task: Language-Independent Named Entity Recognition",
30
+ author = "Tjong Kim Sang, Erik F. and
31
+ De Meulder, Fien",
32
+ booktitle = "Proceedings of the Seventh Conference on Natural Language Learning at {HLT}-{NAACL} 2003",
33
+ year = "2003",
34
+ url = "https://www.aclweb.org/anthology/W03-0419",
35
+ pages = "142--147",
36
+ }
37
+ """
38
+
39
+ _DESCRIPTION = """\
40
+ The shared task of CoNLL-2003 concerns language-independent named entity recognition. We will concentrate on
41
+ four types of named entities: persons, locations, organizations and names of miscellaneous entities that do
42
+ not belong to the previous three groups.
43
+
44
+ The CoNLL-2003 shared task data files contain four columns separated by a single space. Each word has been put on
45
+ a separate line and there is an empty line after each sentence. The first item on each line is a word, the second
46
+ a part-of-speech (POS) tag, the third a syntactic chunk tag and the fourth the named entity tag. The chunk tags
47
+ and the named entity tags have the format I-TYPE which means that the word is inside a phrase of type TYPE. Only
48
+ if two phrases of the same type immediately follow each other, the first word of the second phrase will have tag
49
+ B-TYPE to show that it starts a new phrase. A word with tag O is not part of a phrase. Note the dataset uses IOB2
50
+ tagging scheme, whereas the original dataset uses IOB1.
51
+
52
+ For more details see https://www.clips.uantwerpen.be/conll2003/ner/ and https://www.aclweb.org/anthology/W03-0419
53
+ """
54
+ _URL = "https://raw.githubusercontent.com/bahador14/dataset/main/conll2003job.zip"
55
+ _TRAINING_FILE = "train.txt"
56
+ _DEV_FILE = "valid.txt"
57
+ _TEST_FILE = "test.txt"
58
+
59
+
60
+ class Conll2003JobConfig(datasets.BuilderConfig):
61
+ """BuilderConfig for Conll2003Job"""
62
+
63
+ def __init__(self, **kwargs):
64
+ """BuilderConfig forConll2003Job.
65
+
66
+ Args:
67
+ **kwargs: keyword arguments forwarded to super.
68
+ """
69
+ super(Conll2003JobConfig, self).__init__(**kwargs)
70
+
71
+
72
+ class Conll2003Job(datasets.GeneratorBasedBuilder):
73
+ """Conll2003Job dataset."""
74
+
75
+ BUILDER_CONFIGS = [
76
+ Conll2003JobConfig(name="conll2003job", version=datasets.Version("1.0.0"), description="Conll2003Job dataset"),
77
+ ]
78
+
79
+ def _info(self):
80
+ return datasets.DatasetInfo(
81
+ description=_DESCRIPTION,
82
+ features=datasets.Features(
83
+ {
84
+ "id": datasets.Value("string"),
85
+ "tokens": datasets.Sequence(datasets.Value("string")),
86
+ "pos_tags": datasets.Sequence(
87
+ datasets.features.ClassLabel(
88
+ names=[
89
+ '"',
90
+ "''",
91
+ "#",
92
+ "$",
93
+ "(",
94
+ ")",
95
+ ",",
96
+ ".",
97
+ ":",
98
+ "``",
99
+ "CC",
100
+ "CD",
101
+ "DT",
102
+ "EX",
103
+ "FW",
104
+ "IN",
105
+ "JJ",
106
+ "JJR",
107
+ "JJS",
108
+ "LS",
109
+ "MD",
110
+ "NN",
111
+ "NNP",
112
+ "NNPS",
113
+ "NNS",
114
+ "NN|SYM",
115
+ "PDT",
116
+ "POS",
117
+ "PRP",
118
+ "PRP$",
119
+ "RB",
120
+ "RBR",
121
+ "RBS",
122
+ "RP",
123
+ "SYM",
124
+ "TO",
125
+ "UH",
126
+ "VB",
127
+ "VBD",
128
+ "VBG",
129
+ "VBN",
130
+ "VBP",
131
+ "VBZ",
132
+ "WDT",
133
+ "WP",
134
+ "WP$",
135
+ "WRB",
136
+ ]
137
+ )
138
+ ),
139
+ "chunk_tags": datasets.Sequence(
140
+ datasets.features.ClassLabel(
141
+ names=[
142
+ "O",
143
+ "B-ADJP",
144
+ "I-ADJP",
145
+ "B-ADVP",
146
+ "I-ADVP",
147
+ "B-CONJP",
148
+ "I-CONJP",
149
+ "B-INTJ",
150
+ "I-INTJ",
151
+ "B-LST",
152
+ "I-LST",
153
+ "B-NP",
154
+ "I-NP",
155
+ "B-PP",
156
+ "I-PP",
157
+ "B-PRT",
158
+ "I-PRT",
159
+ "B-SBAR",
160
+ "I-SBAR",
161
+ "B-UCP",
162
+ "I-UCP",
163
+ "B-VP",
164
+ "I-VP",
165
+ ]
166
+ )
167
+ ),
168
+ "ner_tags": datasets.Sequence(
169
+ datasets.features.ClassLabel(
170
+ names=[
171
+ "O",
172
+ "B-PER",
173
+ "I-PER",
174
+ "B-ORG",
175
+ "I-ORG",
176
+ "B-LOC",
177
+ "I-LOC",
178
+ "B-MISC",
179
+ "I-MISC",
180
+ "B-JOB_TITLE",
181
+ "I-JOB_TITLE",
182
+ ]
183
+ )
184
+ ),
185
+ }
186
+ ),
187
+ supervised_keys=None,
188
+ homepage="https://www.aclweb.org/anthology/W03-0419/",
189
+ citation=_CITATION,
190
+ )
191
+
192
+ def _split_generators(self, dl_manager):
193
+ """Returns SplitGenerators."""
194
+ downloaded_file = dl_manager.download_and_extract(_URL)
195
+ data_files = {
196
+ "train": os.path.join(downloaded_file, _TRAINING_FILE),
197
+ "dev": os.path.join(downloaded_file, _DEV_FILE),
198
+ "test": os.path.join(downloaded_file, _TEST_FILE),
199
+ }
200
+
201
+ return [
202
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_files["train"]}),
203
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_files["dev"]}),
204
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_files["test"]}),
205
+ ]
206
+
207
+ def _generate_examples(self, filepath):
208
+ logger.info("⏳ Generating examples from = %s", filepath)
209
+ with open(filepath, encoding="utf-8") as f:
210
+ guid = 0
211
+ tokens = []
212
+ pos_tags = []
213
+ chunk_tags = []
214
+ ner_tags = []
215
+ for line in f:
216
+ if line.startswith("-DOCSTART-") or line == "" or line == "\n":
217
+ if tokens:
218
+ yield guid, {
219
+ "id": str(guid),
220
+ "tokens": tokens,
221
+ "pos_tags": pos_tags,
222
+ "chunk_tags": chunk_tags,
223
+ "ner_tags": ner_tags,
224
+ }
225
+ guid += 1
226
+ tokens = []
227
+ pos_tags = []
228
+ chunk_tags = []
229
+ ner_tags = []
230
+ else:
231
+ # conll2003job tokens are space separated
232
+ splits = line.split(" ")
233
+ tokens.append(splits[0])
234
+ pos_tags.append(splits[1])
235
+ chunk_tags.append(splits[2])
236
+ ner_tags.append(splits[3].rstrip())
237
+ # last example
238
+ if tokens:
239
+ yield guid, {
240
+ "id": str(guid),
241
+ "tokens": tokens,
242
+ "pos_tags": pos_tags,
243
+ "chunk_tags": chunk_tags,
244
+ "ner_tags": ner_tags,
245
+ }