Update CAS.py
Browse files
CAS.py
CHANGED
@@ -59,6 +59,39 @@ _HOMEPAGE = "https://clementdalloux.fr/?page_id=28"
|
|
59 |
|
60 |
_LICENSE = 'Data User Agreement'
|
61 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
class CAS(datasets.GeneratorBasedBuilder):
|
63 |
|
64 |
DEFAULT_CONFIG_NAME = "pos_spec"
|
@@ -86,6 +119,11 @@ class CAS(datasets.GeneratorBasedBuilder):
|
|
86 |
"pos_tags": [datasets.features.ClassLabel(
|
87 |
names = ['B-INT', 'B-PRO:DEM', 'B-VER:impf', 'B-VER:ppre', 'B-PRP:det', 'B-KON', 'B-VER:pper', 'B-PRP', 'B-PRO:IND', 'B-VER:simp', 'B-VER:con', 'B-SENT', 'B-VER:futu', 'B-PRO:PER', 'B-VER:infi', 'B-ADJ', 'B-NAM', 'B-NUM', 'B-PUN:cit', 'B-PRO:REL', 'B-VER:subi', 'B-ABR', 'B-NOM', 'B-VER:pres', 'B-DET:ART', 'B-VER:cond', 'B-VER:subp', 'B-DET:POS', 'B-ADV', 'B-SYM', 'B-PUN'],
|
88 |
)],
|
|
|
|
|
|
|
|
|
|
|
89 |
}
|
90 |
)
|
91 |
|
@@ -120,6 +158,11 @@ class CAS(datasets.GeneratorBasedBuilder):
|
|
120 |
"ner_tags": [datasets.features.ClassLabel(
|
121 |
names = names,
|
122 |
)],
|
|
|
|
|
|
|
|
|
|
|
123 |
}
|
124 |
)
|
125 |
|
@@ -249,6 +292,7 @@ class CAS(datasets.GeneratorBasedBuilder):
|
|
249 |
"tokens": tokens,
|
250 |
"lemmas": text_lemmas,
|
251 |
"pos_tags": pos_tags,
|
|
|
252 |
})
|
253 |
unique_id_doc.append(doc_id)
|
254 |
|
@@ -308,6 +352,7 @@ class CAS(datasets.GeneratorBasedBuilder):
|
|
308 |
"tokens": tokens,
|
309 |
"lemmas": text_lemmas,
|
310 |
"ner_tags": ner_tags,
|
|
|
311 |
})
|
312 |
|
313 |
key += 1
|
|
|
59 |
|
60 |
_LICENSE = 'Data User Agreement'
|
61 |
|
62 |
+
class StringIndex:
|
63 |
+
|
64 |
+
def __init__(self, vocab):
|
65 |
+
|
66 |
+
self.vocab_struct = {}
|
67 |
+
|
68 |
+
print("Start building the index!")
|
69 |
+
for t in vocab:
|
70 |
+
|
71 |
+
if len(t) == 0:
|
72 |
+
continue
|
73 |
+
|
74 |
+
# Index terms by their first letter and length
|
75 |
+
key = (t[0], len(t))
|
76 |
+
|
77 |
+
if (key in self.vocab_struct) == False:
|
78 |
+
self.vocab_struct[key] = []
|
79 |
+
|
80 |
+
self.vocab_struct[key].append(t)
|
81 |
+
|
82 |
+
print("Finished building the index!")
|
83 |
+
|
84 |
+
def find(self, t):
|
85 |
+
|
86 |
+
key = (t[0], len(t))
|
87 |
+
|
88 |
+
if (key in self.vocab_struct) == False:
|
89 |
+
return "is_oov"
|
90 |
+
|
91 |
+
return "is_not_oov" if t in self.vocab_struct[key] else "is_oov"
|
92 |
+
|
93 |
+
_VOCAB = StringIndex(vocab=open("./vocabulary_nachos_lowercased.txt","r").read().split("\n"))
|
94 |
+
|
95 |
class CAS(datasets.GeneratorBasedBuilder):
|
96 |
|
97 |
DEFAULT_CONFIG_NAME = "pos_spec"
|
|
|
119 |
"pos_tags": [datasets.features.ClassLabel(
|
120 |
names = ['B-INT', 'B-PRO:DEM', 'B-VER:impf', 'B-VER:ppre', 'B-PRP:det', 'B-KON', 'B-VER:pper', 'B-PRP', 'B-PRO:IND', 'B-VER:simp', 'B-VER:con', 'B-SENT', 'B-VER:futu', 'B-PRO:PER', 'B-VER:infi', 'B-ADJ', 'B-NAM', 'B-NUM', 'B-PUN:cit', 'B-PRO:REL', 'B-VER:subi', 'B-ABR', 'B-NOM', 'B-VER:pres', 'B-DET:ART', 'B-VER:cond', 'B-VER:subp', 'B-DET:POS', 'B-ADV', 'B-SYM', 'B-PUN'],
|
121 |
)],
|
122 |
+
"is_oov": datasets.Sequence(
|
123 |
+
datasets.features.ClassLabel(
|
124 |
+
names=['is_not_oov', 'is_oov'],
|
125 |
+
),
|
126 |
+
),
|
127 |
}
|
128 |
)
|
129 |
|
|
|
158 |
"ner_tags": [datasets.features.ClassLabel(
|
159 |
names = names,
|
160 |
)],
|
161 |
+
"is_oov": datasets.Sequence(
|
162 |
+
datasets.features.ClassLabel(
|
163 |
+
names=['is_not_oov', 'is_oov'],
|
164 |
+
),
|
165 |
+
),
|
166 |
}
|
167 |
)
|
168 |
|
|
|
292 |
"tokens": tokens,
|
293 |
"lemmas": text_lemmas,
|
294 |
"pos_tags": pos_tags,
|
295 |
+
"is_oov": [_VOCAB.find(tt.lower()) for tt in tokens],
|
296 |
})
|
297 |
unique_id_doc.append(doc_id)
|
298 |
|
|
|
352 |
"tokens": tokens,
|
353 |
"lemmas": text_lemmas,
|
354 |
"ner_tags": ner_tags,
|
355 |
+
"is_oov": [_VOCAB.find(tt.lower()) for tt in tokens],
|
356 |
})
|
357 |
|
358 |
key += 1
|