Update scripts to use data.zip instead of local data.
Browse files- ESSAI.py +38 -27
- data.zip +2 -2
- data/ESSAI_neg.txt +0 -0
- data/ESSAI_spec.txt +0 -0
ESSAI.py
CHANGED
@@ -11,7 +11,7 @@ _CITATION = """\
|
|
11 |
url={http://clementdalloux.fr/?page_id=28},
|
12 |
journal={Clément Dalloux},
|
13 |
author={Dalloux, Clément}
|
14 |
-
}
|
15 |
"""
|
16 |
|
17 |
_DESCRIPTION = """\
|
@@ -39,17 +39,24 @@ _HOMEPAGE = "https://clementdalloux.fr/?page_id=28"
|
|
39 |
|
40 |
_LICENSE = 'Data User Agreement'
|
41 |
|
|
|
|
|
|
|
42 |
class ESSAI(datasets.GeneratorBasedBuilder):
|
43 |
|
44 |
-
DEFAULT_CONFIG_NAME = "
|
45 |
|
46 |
BUILDER_CONFIGS = [
|
47 |
-
datasets.BuilderConfig(name="pos", version="1.0.0",
|
|
|
48 |
|
49 |
-
datasets.BuilderConfig(name="cls", version="1.0.0",
|
|
|
50 |
|
51 |
-
datasets.BuilderConfig(name="ner_spec", version="1.0.0",
|
52 |
-
|
|
|
|
|
53 |
]
|
54 |
|
55 |
def _info(self):
|
@@ -63,11 +70,19 @@ class ESSAI(datasets.GeneratorBasedBuilder):
|
|
63 |
"tokens": [datasets.Value("string")],
|
64 |
"lemmas": [datasets.Value("string")],
|
65 |
"pos_tags": [datasets.features.ClassLabel(
|
66 |
-
names
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
)],
|
68 |
}
|
69 |
)
|
70 |
-
|
71 |
elif self.config.name.find("cls") != -1:
|
72 |
|
73 |
features = datasets.Features(
|
@@ -76,11 +91,11 @@ class ESSAI(datasets.GeneratorBasedBuilder):
|
|
76 |
"document_id": datasets.Value("string"),
|
77 |
"tokens": [datasets.Value("string")],
|
78 |
"label": datasets.features.ClassLabel(
|
79 |
-
names
|
80 |
),
|
81 |
}
|
82 |
)
|
83 |
-
|
84 |
elif self.config.name.find("ner") != -1:
|
85 |
|
86 |
if self.config.name.find("_spec") != -1:
|
@@ -95,7 +110,7 @@ class ESSAI(datasets.GeneratorBasedBuilder):
|
|
95 |
"tokens": [datasets.Value("string")],
|
96 |
"lemmas": [datasets.Value("string")],
|
97 |
"ner_tags": [datasets.features.ClassLabel(
|
98 |
-
names
|
99 |
)],
|
100 |
}
|
101 |
)
|
@@ -111,12 +126,8 @@ class ESSAI(datasets.GeneratorBasedBuilder):
|
|
111 |
|
112 |
def _split_generators(self, dl_manager):
|
113 |
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
else:
|
118 |
-
data_dir = self.config.data_dir
|
119 |
-
|
120 |
return [
|
121 |
datasets.SplitGenerator(
|
122 |
name=datasets.Split.TRAIN,
|
@@ -180,7 +191,7 @@ class ESSAI(datasets.GeneratorBasedBuilder):
|
|
180 |
id_doc, id_word, word, lemma, tag = splitted[0:5]
|
181 |
if len(splitted) >= 8:
|
182 |
tag = splitted[6]
|
183 |
-
|
184 |
if tag == "@card@":
|
185 |
print(splitted)
|
186 |
|
@@ -201,13 +212,13 @@ class ESSAI(datasets.GeneratorBasedBuilder):
|
|
201 |
id_words.append(id_word)
|
202 |
words.append(word)
|
203 |
lemmas.append(lemma)
|
204 |
-
POS_tags.append('B-'
|
205 |
|
206 |
dic = {
|
207 |
-
"id_docs":
|
208 |
"id_words": id_words,
|
209 |
-
"words":
|
210 |
-
"lemmas":
|
211 |
"POS_tags": POS_tags,
|
212 |
}
|
213 |
|
@@ -247,7 +258,7 @@ class ESSAI(datasets.GeneratorBasedBuilder):
|
|
247 |
continue
|
248 |
|
249 |
id_doc, id_word, word, lemma, _ = line.split("\t")[0:5]
|
250 |
-
tag = line.replace("\n","").split("\t")[-1]
|
251 |
|
252 |
if tag == "***" or tag == "_":
|
253 |
tag = "O"
|
@@ -265,10 +276,10 @@ class ESSAI(datasets.GeneratorBasedBuilder):
|
|
265 |
ner_tags.append(tag)
|
266 |
|
267 |
dic = {
|
268 |
-
"id_docs":
|
269 |
"id_words": id_words,
|
270 |
-
"words":
|
271 |
-
"lemmas":
|
272 |
"ner_tags": ner_tags,
|
273 |
}
|
274 |
|
@@ -361,4 +372,4 @@ class ESSAI(datasets.GeneratorBasedBuilder):
|
|
361 |
|
362 |
for r in all_res:
|
363 |
if r["id"] in allowed_ids:
|
364 |
-
yield r["id"], r
|
|
|
11 |
url={http://clementdalloux.fr/?page_id=28},
|
12 |
journal={Clément Dalloux},
|
13 |
author={Dalloux, Clément}
|
14 |
+
}
|
15 |
"""
|
16 |
|
17 |
_DESCRIPTION = """\
|
|
|
39 |
|
40 |
_LICENSE = 'Data User Agreement'
|
41 |
|
42 |
+
_URL = "data.zip"
|
43 |
+
|
44 |
+
|
45 |
class ESSAI(datasets.GeneratorBasedBuilder):
|
46 |
|
47 |
+
DEFAULT_CONFIG_NAME = "pos"
|
48 |
|
49 |
BUILDER_CONFIGS = [
|
50 |
+
datasets.BuilderConfig(name="pos", version="1.0.0",
|
51 |
+
description="The ESSAI corpora - POS Speculation task"),
|
52 |
|
53 |
+
datasets.BuilderConfig(name="cls", version="1.0.0",
|
54 |
+
description="The ESSAI corpora - CLS Negation / Speculation task"),
|
55 |
|
56 |
+
datasets.BuilderConfig(name="ner_spec", version="1.0.0",
|
57 |
+
description="The ESSAI corpora - NER Speculation task"),
|
58 |
+
datasets.BuilderConfig(name="ner_neg", version="1.0.0",
|
59 |
+
description="The ESSAI corpora - NER Negation task"),
|
60 |
]
|
61 |
|
62 |
def _info(self):
|
|
|
70 |
"tokens": [datasets.Value("string")],
|
71 |
"lemmas": [datasets.Value("string")],
|
72 |
"pos_tags": [datasets.features.ClassLabel(
|
73 |
+
names=[
|
74 |
+
'B-ABR', 'B-ADJ', 'B-ADV', 'B-DET:ART', 'B-DET:POS', 'B-INT',
|
75 |
+
'B-KON', 'B-NAM', 'B-NN', 'B-NOM', 'B-NUM', 'B-PREF', 'B-PRO',
|
76 |
+
'B-PRO:DEM', 'B-PRO:IND', 'B-PRO:PER', 'B-PRO:POS',
|
77 |
+
'B-PRO:REL', 'B-PRP', 'B-PRP:det', 'B-PUN', 'B-PUN:cit',
|
78 |
+
'B-SENT', 'B-SYM', 'B-VER:', 'B-VER:cond', 'B-VER:futu',
|
79 |
+
'B-VER:impf', 'B-VER:infi', 'B-VER:pper', 'B-VER:ppre',
|
80 |
+
'B-VER:pres', 'B-VER:simp', 'B-VER:subi', 'B-VER:subp'
|
81 |
+
],
|
82 |
)],
|
83 |
}
|
84 |
)
|
85 |
+
|
86 |
elif self.config.name.find("cls") != -1:
|
87 |
|
88 |
features = datasets.Features(
|
|
|
91 |
"document_id": datasets.Value("string"),
|
92 |
"tokens": [datasets.Value("string")],
|
93 |
"label": datasets.features.ClassLabel(
|
94 |
+
names=['negation_speculation', 'negation', 'neutral', 'speculation'],
|
95 |
),
|
96 |
}
|
97 |
)
|
98 |
+
|
99 |
elif self.config.name.find("ner") != -1:
|
100 |
|
101 |
if self.config.name.find("_spec") != -1:
|
|
|
110 |
"tokens": [datasets.Value("string")],
|
111 |
"lemmas": [datasets.Value("string")],
|
112 |
"ner_tags": [datasets.features.ClassLabel(
|
113 |
+
names=names,
|
114 |
)],
|
115 |
}
|
116 |
)
|
|
|
126 |
|
127 |
def _split_generators(self, dl_manager):
|
128 |
|
129 |
+
data_dir = dl_manager.download_and_extract(_URL).rstrip("/")
|
130 |
+
|
|
|
|
|
|
|
|
|
131 |
return [
|
132 |
datasets.SplitGenerator(
|
133 |
name=datasets.Split.TRAIN,
|
|
|
191 |
id_doc, id_word, word, lemma, tag = splitted[0:5]
|
192 |
if len(splitted) >= 8:
|
193 |
tag = splitted[6]
|
194 |
+
|
195 |
if tag == "@card@":
|
196 |
print(splitted)
|
197 |
|
|
|
212 |
id_words.append(id_word)
|
213 |
words.append(word)
|
214 |
lemmas.append(lemma)
|
215 |
+
POS_tags.append(f'B-{tag}')
|
216 |
|
217 |
dic = {
|
218 |
+
"id_docs": np.array(list(map(int, id_docs))),
|
219 |
"id_words": id_words,
|
220 |
+
"words": words,
|
221 |
+
"lemmas": lemmas,
|
222 |
"POS_tags": POS_tags,
|
223 |
}
|
224 |
|
|
|
258 |
continue
|
259 |
|
260 |
id_doc, id_word, word, lemma, _ = line.split("\t")[0:5]
|
261 |
+
tag = line.replace("\n", "").split("\t")[-1]
|
262 |
|
263 |
if tag == "***" or tag == "_":
|
264 |
tag = "O"
|
|
|
276 |
ner_tags.append(tag)
|
277 |
|
278 |
dic = {
|
279 |
+
"id_docs": np.array(list(map(int, id_docs))),
|
280 |
"id_words": id_words,
|
281 |
+
"words": words,
|
282 |
+
"lemmas": lemmas,
|
283 |
"ner_tags": ner_tags,
|
284 |
}
|
285 |
|
|
|
372 |
|
373 |
for r in all_res:
|
374 |
if r["id"] in allowed_ids:
|
375 |
+
yield r["id"], r
|
data.zip
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:afcf4b94dae2ad4cc5e01be3592d12f1ace422629cee6f8192600f37c28b43c0
|
3 |
+
size 1911010
|
data/ESSAI_neg.txt
DELETED
The diff for this file is too large to render.
See raw diff
|
|
data/ESSAI_spec.txt
DELETED
The diff for this file is too large to render.
See raw diff
|
|