qanastek commited on
Commit
9a5b6ab
·
1 Parent(s): 0441b71

Create CAS.py

Browse files
Files changed (1) hide show
  1. CAS.py +387 -0
CAS.py ADDED
@@ -0,0 +1,387 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+
4
+ import datasets
5
+ import numpy as np
6
+
7
+ _CITATION = """\
8
+ @inproceedings{grabar-etal-2018-cas,
9
+ title = {{CAS}: {F}rench Corpus with Clinical Cases},
10
+ author = {Grabar, Natalia and Claveau, Vincent and Dalloux, Cl{\'e}ment},
11
+ year = 2018,
12
+ month = oct,
13
+ booktitle = {
14
+ Proceedings of the Ninth International Workshop on Health Text Mining and
15
+ Information Analysis
16
+ },
17
+ publisher = {Association for Computational Linguistics},
18
+ address = {Brussels, Belgium},
19
+ pages = {122--128},
20
+ doi = {10.18653/v1/W18-5614},
21
+ url = {https://aclanthology.org/W18-5614},
22
+ abstract = {
23
+ Textual corpora are extremely important for various NLP applications as
24
+ they provide information necessary for creating, setting and testing these
25
+ applications and the corresponding tools. They are also crucial for
26
+ designing reliable methods and reproducible results. Yet, in some areas,
27
+ such as the medical area, due to confidentiality or to ethical reasons, it
28
+ is complicated and even impossible to access textual data representative of
29
+ those produced in these areas. We propose the CAS corpus built with
30
+ clinical cases, such as they are reported in the published scientific
31
+ literature in French. We describe this corpus, currently containing over
32
+ 397,000 word occurrences, and the existing linguistic and semantic
33
+ annotations.
34
+ }
35
+ }
36
+ """
37
+
38
+ _DESCRIPTION = """\
39
+ We manually annotated two corpora from the biomedical field. The ESSAI corpus \
40
+ contains clinical trial protocols in French. They were mainly obtained from the \
41
+ National Cancer Institute The typical protocol consists of two parts: the \
42
+ summary of the trial, which indicates the purpose of the trial and the methods \
43
+ applied; and a detailed description of the trial with the inclusion and \
44
+ exclusion criteria. The CAS corpus contains clinical cases published in \
45
+ scientific literature and training material. They are published in different \
46
+ journals from French-speaking countries (France, Belgium, Switzerland, Canada, \
47
+ African countries, tropical countries) and are related to various medical \
48
+ specialties (cardiology, urology, oncology, obstetrics, pulmonology, \
49
+ gastro-enterology). The purpose of clinical cases is to describe clinical \
50
+ situations of patients. Hence, their content is close to the content of clinical \
51
+ narratives (description of diagnoses, treatments or procedures, evolution, \
52
+ family history, expected audience, etc.). In clinical cases, the negation is \
53
+ frequently used for describing the patient signs, symptoms, and diagnosis. \
54
+ Speculation is present as well but less frequently.
55
+ This version only contain the annotated CAS corpus
56
+ """
57
+
58
+ _HOMEPAGE = "https://clementdalloux.fr/?page_id=28"
59
+
60
+ _LICENSE = 'Data User Agreement'
61
+
62
+ class CAS(datasets.GeneratorBasedBuilder):
63
+
64
+ DEFAULT_CONFIG_NAME = "pos_spec"
65
+
66
+ BUILDER_CONFIGS = [
67
+ datasets.BuilderConfig(name="pos", version="1.0.0", description="The CAS corpora - POS Speculation task"),
68
+
69
+ datasets.BuilderConfig(name="cls", version="1.0.0", description="The CAS corpora - CLS Negation / Speculation task"),
70
+
71
+ datasets.BuilderConfig(name="ner_spec", version="1.0.0", description="The CAS corpora - NER Speculation task"),
72
+ datasets.BuilderConfig(name="ner_neg", version="1.0.0", description="The CAS corpora - NER Negation task"),
73
+ ]
74
+
75
+ def _info(self):
76
+
77
+ if self.config.name.find("pos") != -1:
78
+
79
+ features = datasets.Features(
80
+ {
81
+ "id": datasets.Value("string"),
82
+ "document_id": datasets.Value("string"),
83
+ "tokens": [datasets.Value("string")],
84
+ "lemmas": [datasets.Value("string")],
85
+ # "pos_tags": [datasets.Value("string")],
86
+ "pos_tags": [datasets.features.ClassLabel(
87
+ names = ['B-INT', 'B-PRO:DEM', 'B-VER:impf', 'B-VER:ppre', 'B-PRP:det', 'B-KON', 'B-VER:pper', 'B-PRP', 'B-PRO:IND', 'B-VER:simp', 'B-VER:con', 'B-SENT', 'B-VER:futu', 'B-PRO:PER', 'B-VER:infi', 'B-ADJ', 'B-NAM', 'B-NUM', 'B-PUN:cit', 'B-PRO:REL', 'B-VER:subi', 'B-ABR', 'B-NOM', 'B-VER:pres', 'B-DET:ART', 'B-VER:cond', 'B-VER:subp', 'B-DET:POS', 'B-ADV', 'B-SYM', 'B-PUN'],
88
+ )],
89
+ }
90
+ )
91
+
92
+ elif self.config.name.find("cls") != -1:
93
+
94
+ features = datasets.Features(
95
+ {
96
+ "id": datasets.Value("string"),
97
+ "document_id": datasets.Value("string"),
98
+ "tokens": [datasets.Value("string")],
99
+ # "label": datasets.Value("string"),
100
+ "label": datasets.features.ClassLabel(
101
+ names = ['negation_speculation', 'speculation', 'neutral', 'negation'],
102
+ ),
103
+ }
104
+ )
105
+
106
+ elif self.config.name.find("ner") != -1:
107
+
108
+ if self.config.name.find("_spec") != -1:
109
+ names = ['O', 'B_xcope_inc', 'I_xcope_inc']
110
+ elif self.config.name.find("_neg") != -1:
111
+ names = ['O', 'B_scope_neg', 'I_scope_neg']
112
+
113
+ features = datasets.Features(
114
+ {
115
+ "id": datasets.Value("string"),
116
+ "document_id": datasets.Value("string"),
117
+ "tokens": [datasets.Value("string")],
118
+ "lemmas": [datasets.Value("string")],
119
+ # "ner_tags": [datasets.Value("string")],
120
+ "ner_tags": [datasets.features.ClassLabel(
121
+ names = names,
122
+ )],
123
+ }
124
+ )
125
+
126
+ return datasets.DatasetInfo(
127
+ description=_DESCRIPTION,
128
+ features=features,
129
+ supervised_keys=None,
130
+ homepage=_HOMEPAGE,
131
+ license=str(_LICENSE),
132
+ citation=_CITATION,
133
+ )
134
+
135
+ def _split_generators(self, dl_manager):
136
+
137
+ if self.config.data_dir is None:
138
+ raise ValueError("This is a local dataset. Please pass the data_dir kwarg to load_dataset.")
139
+
140
+ else:
141
+ data_dir = self.config.data_dir
142
+
143
+ return [
144
+ datasets.SplitGenerator(
145
+ name=datasets.Split.TRAIN,
146
+ gen_kwargs={
147
+ "datadir": data_dir,
148
+ "split": "train",
149
+ },
150
+ ),
151
+ datasets.SplitGenerator(
152
+ name=datasets.Split.VALIDATION,
153
+ gen_kwargs={
154
+ "datadir": data_dir,
155
+ "split": "validation",
156
+ },
157
+ ),
158
+ datasets.SplitGenerator(
159
+ name=datasets.Split.TEST,
160
+ gen_kwargs={
161
+ "datadir": data_dir,
162
+ "split": "test",
163
+ },
164
+ ),
165
+ ]
166
+
167
+ def _generate_examples(self, datadir, split):
168
+
169
+ all_res = []
170
+
171
+ key = 0
172
+
173
+ subset = self.config.name.split("_")[-1]
174
+
175
+ unique_id_doc = []
176
+
177
+ if self.config.name.find("ner") != -1:
178
+ docs = [f"CAS_{subset}.txt"]
179
+ else:
180
+ docs = ["CAS_neg.txt", "CAS_spec.txt"]
181
+
182
+ for file in docs:
183
+
184
+ filename = os.path.join(datadir, file)
185
+
186
+ if self.config.name.find("pos") != -1:
187
+
188
+ id_docs = []
189
+ id_words = []
190
+ words = []
191
+ lemmas = []
192
+ POS_tags = []
193
+
194
+ with open(filename) as f:
195
+
196
+ for line in f.readlines():
197
+
198
+ splitted = line.split("\t")
199
+
200
+ if len(splitted) < 5:
201
+ continue
202
+
203
+ id_doc, id_word, word, lemma, tag = splitted[0:5]
204
+ if len(splitted) >= 8:
205
+ tag = splitted[6]
206
+
207
+ if tag == "@card@":
208
+ print(splitted)
209
+
210
+ if word == "@card@":
211
+ print(splitted)
212
+
213
+ if lemma == "000" and tag == "@card@":
214
+ tag = "NUM"
215
+ word = "100 000"
216
+ lemma = "100 000"
217
+ elif lemma == "45" and tag == "@card@":
218
+ tag = "NUM"
219
+
220
+ # if id_doc in id_docs:
221
+ # continue
222
+
223
+ id_docs.append(id_doc)
224
+ id_words.append(id_word)
225
+ words.append(word)
226
+ lemmas.append(lemma)
227
+ POS_tags.append('B-'+tag)
228
+
229
+ dic = {
230
+ "id_docs": np.array(list(map(int, id_docs))),
231
+ "id_words": id_words,
232
+ "words": words,
233
+ "lemmas": lemmas,
234
+ "POS_tags": POS_tags,
235
+ }
236
+
237
+ for doc_id in set(dic["id_docs"]):
238
+
239
+ indexes = np.argwhere(dic["id_docs"] == doc_id)[:, 0]
240
+ tokens = [dic["words"][id] for id in indexes]
241
+ text_lemmas = [dic["lemmas"][id] for id in indexes]
242
+ pos_tags = [dic["POS_tags"][id] for id in indexes]
243
+
244
+ if doc_id not in unique_id_doc:
245
+
246
+ all_res.append({
247
+ "id": str(doc_id),
248
+ "document_id": doc_id,
249
+ "tokens": tokens,
250
+ "lemmas": text_lemmas,
251
+ "pos_tags": pos_tags,
252
+ })
253
+ unique_id_doc.append(doc_id)
254
+
255
+ # key += 1
256
+
257
+ elif self.config.name.find("ner") != -1:
258
+
259
+ id_docs = []
260
+ id_words = []
261
+ words = []
262
+ lemmas = []
263
+ ner_tags = []
264
+
265
+ with open(filename) as f:
266
+
267
+ for line in f.readlines():
268
+
269
+ if len(line.split("\t")) < 5:
270
+ continue
271
+
272
+ id_doc, id_word, word, lemma, _ = line.split("\t")[0:5]
273
+ tag = line.replace("\n","").split("\t")[-1]
274
+
275
+ if tag == "***" or tag == "_":
276
+ tag = "O"
277
+ elif tag == "I_xcope_inc_":
278
+ tag = "I_xcope_inc"
279
+ # elif tag == "v":
280
+ # tag = "I_scope_spec"
281
+ # elif tag == "z":
282
+ # tag = "O"
283
+
284
+ id_docs.append(id_doc)
285
+ id_words.append(id_word)
286
+ words.append(word)
287
+ lemmas.append(lemma)
288
+ ner_tags.append(tag)
289
+
290
+ dic = {
291
+ "id_docs": np.array(list(map(int, id_docs))),
292
+ "id_words": id_words,
293
+ "words": words,
294
+ "lemmas": lemmas,
295
+ "ner_tags": ner_tags,
296
+ }
297
+
298
+ for doc_id in set(dic["id_docs"]):
299
+
300
+ indexes = np.argwhere(dic["id_docs"] == doc_id)[:, 0]
301
+ tokens = [dic["words"][id] for id in indexes]
302
+ text_lemmas = [dic["lemmas"][id] for id in indexes]
303
+ ner_tags = [dic["ner_tags"][id] for id in indexes]
304
+
305
+ all_res.append({
306
+ "id": key,
307
+ "document_id": doc_id,
308
+ "tokens": tokens,
309
+ "lemmas": text_lemmas,
310
+ "ner_tags": ner_tags,
311
+ })
312
+
313
+ key += 1
314
+
315
+ elif self.config.name.find("cls") != -1:
316
+
317
+ f_in = open(filename, "r")
318
+ conll = [
319
+ [b.split("\t") for b in a.split("\n")]
320
+ for a in f_in.read().split("\n\n")
321
+ ]
322
+ f_in.close()
323
+
324
+ classe = "negation" if filename.find("_neg") != -1 else "speculation"
325
+
326
+ for document in conll:
327
+
328
+ if document == [""]:
329
+ continue
330
+
331
+ identifier = document[0][0]
332
+
333
+ unique = list(set([w[-1] for w in document]))
334
+ tokens = [sent[2] for sent in document if len(sent) > 1]
335
+
336
+ if "***" in unique:
337
+ l = "neutral"
338
+ elif "_" in unique:
339
+ l = classe
340
+
341
+ if identifier in unique_id_doc and l == 'neutral':
342
+ continue
343
+
344
+ elif identifier in unique_id_doc and l != 'neutral':
345
+
346
+ index_l = unique_id_doc.index(identifier)
347
+
348
+ if all_res[index_l]["label"] != "neutral":
349
+ l = "negation_speculation"
350
+
351
+ all_res[index_l] = {
352
+ "id": str(identifier),
353
+ "document_id": identifier,
354
+ "tokens": tokens,
355
+ "label": l,
356
+ }
357
+
358
+ else:
359
+
360
+ all_res.append({
361
+ "id": str(identifier),
362
+ "document_id": identifier,
363
+ "tokens": tokens,
364
+ "label": l,
365
+ })
366
+
367
+ unique_id_doc.append(identifier)
368
+
369
+ ids = [r["id"] for r in all_res]
370
+
371
+ random.seed(4)
372
+ random.shuffle(ids)
373
+ random.shuffle(ids)
374
+ random.shuffle(ids)
375
+
376
+ train, validation, test = np.split(ids, [int(len(ids)*0.70), int(len(ids)*0.80)])
377
+
378
+ if split == "train":
379
+ allowed_ids = list(train)
380
+ elif split == "validation":
381
+ allowed_ids = list(validation)
382
+ elif split == "test":
383
+ allowed_ids = list(test)
384
+
385
+ for r in all_res:
386
+ if r["id"] in allowed_ids:
387
+ yield r["id"], r