qanastek commited on
Commit
ca99815
·
1 Parent(s): af18ef3

Upload huggingface.co_datasets_Dr-BERT_ESSAI_raw_main_ESSAI.py

Browse files
huggingface.co_datasets_Dr-BERT_ESSAI_raw_main_ESSAI.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+
4
+ import datasets
5
+ import numpy as np
6
+
7
+ _CITATION = """\
8
+ @misc{
9
+ dalloux,
10
+ title={Datasets – Clément Dalloux},
11
+ url={http://clementdalloux.fr/?page_id=28},
12
+ journal={Clément Dalloux},
13
+ author={Dalloux, Clément}
14
+ }
15
+ """
16
+
17
+ _DESCRIPTION = """\
18
+ We manually annotated two corpora from the biomedical field. The ESSAI corpus \
19
+ contains clinical trial protocols in French. They were mainly obtained from the \
20
+ National Cancer Institute The typical protocol consists of two parts: the \
21
+ summary of the trial, which indicates the purpose of the trial and the methods \
22
+ applied; and a detailed description of the trial with the inclusion and \
23
+ exclusion criteria. The CAS corpus contains clinical cases published in \
24
+ scientific literature and training material. They are published in different \
25
+ journals from French-speaking countries (France, Belgium, Switzerland, Canada, \
26
+ African countries, tropical countries) and are related to various medical \
27
+ specialties (cardiology, urology, oncology, obstetrics, pulmonology, \
28
+ gastro-enterology). The purpose of clinical cases is to describe clinical \
29
+ situations of patients. Hence, their content is close to the content of clinical \
30
+ narratives (description of diagnoses, treatments or procedures, evolution, \
31
+ family history, expected audience, etc.). In clinical cases, the negation is \
32
+ frequently used for describing the patient signs, symptoms, and diagnosis. \
33
+ Speculation is present as well but less frequently.
34
+
35
+ This version only contain the annotated ESSAI corpus
36
+ """
37
+
38
+ _HOMEPAGE = "https://clementdalloux.fr/?page_id=28"
39
+
40
+ _LICENSE = 'Data User Agreement'
41
+
42
+ class ESSAI(datasets.GeneratorBasedBuilder):
43
+
44
+ DEFAULT_CONFIG_NAME = "source"
45
+
46
+ BUILDER_CONFIGS = [
47
+ datasets.BuilderConfig(name="source", version="1.0.0", description="The ESSAI corpora"),
48
+ ]
49
+
50
+ def _info(self):
51
+
52
+ features = datasets.Features(
53
+ {
54
+ "id": datasets.Value("string"),
55
+ "document_id": datasets.Value("string"),
56
+ "tokens": [datasets.Value("string")],
57
+ "lemmas": [datasets.Value("string")],
58
+ "pos_tags": [datasets.features.ClassLabel(
59
+ names = ['VER:pper', 'VER:subi', 'VER:cond', 'INT', 'VER:infi', 'PUN:cit', 'ITAC', 'PUN', 'VER:ppre', 'VER:pres', 'PRO:REL', 'ADJ', 'VER:subp', 'NN', 'PREF', 'PRP', 'PRO:IND', 'PRO:POS', 'DET:POS', 'VER:futu', 'PRO:DEM', 'KON', 'DET:ART', 'VER:', 'PRP:det', 'PRO', 'FAG', 'NOM', 'SYM', 'VER:impf', 'CIT02-HM', 'SENT', 'Bayer', 'VER:simp', 'ADV', 'bayer', '@card@', 'PRO:PER', 'NUM', 'ABR', 'NAM'],
60
+ )],
61
+ "label": datasets.features.ClassLabel(
62
+ names = ['negation', 'speculation'],
63
+ ),
64
+ }
65
+ )
66
+
67
+ return datasets.DatasetInfo(
68
+ description=_DESCRIPTION,
69
+ features=features,
70
+ supervised_keys=None,
71
+ homepage=_HOMEPAGE,
72
+ license=str(_LICENSE),
73
+ citation=_CITATION,
74
+ )
75
+
76
+ def _split_generators(self, dl_manager):
77
+
78
+ if self.config.data_dir is None:
79
+ raise ValueError("This is a local dataset. Please pass the data_dir kwarg to load_dataset.")
80
+
81
+ else:
82
+ data_dir = self.config.data_dir
83
+
84
+ return [
85
+ datasets.SplitGenerator(
86
+ name=datasets.Split.TRAIN,
87
+ gen_kwargs={
88
+ "datadir": data_dir,
89
+ "split": "train",
90
+ },
91
+ ),
92
+ datasets.SplitGenerator(
93
+ name=datasets.Split.VALIDATION,
94
+ gen_kwargs={
95
+ "datadir": data_dir,
96
+ "split": "validation",
97
+ },
98
+ ),
99
+ datasets.SplitGenerator(
100
+ name=datasets.Split.TEST,
101
+ gen_kwargs={
102
+ "datadir": data_dir,
103
+ "split": "test",
104
+ },
105
+ ),
106
+ ]
107
+
108
+ def _generate_examples(self, datadir, split):
109
+
110
+ all_res = []
111
+
112
+ key = 0
113
+
114
+ for file in ["ESSAI_neg.txt", "ESSAI_spec.txt"]:
115
+
116
+ label = "negation" if "neg" in file else "speculation"
117
+ id_docs = []
118
+ id_words = []
119
+ words = []
120
+ lemmas = []
121
+ POS_tags = []
122
+
123
+ with open(os.path.join(datadir, file)) as f:
124
+
125
+ for line in f.readlines():
126
+
127
+ if len(line.split("\t")) < 5:
128
+ continue
129
+
130
+ id_doc, id_word, word, lemma, tag = line.split("\t")[0:5]
131
+
132
+ id_docs.append(id_doc)
133
+ id_words.append(id_word)
134
+ words.append(word)
135
+ lemmas.append(lemma)
136
+ POS_tags.append(tag)
137
+
138
+ dic = {
139
+ "id_docs": np.array(list(map(int, id_docs))),
140
+ "id_words": id_words,
141
+ "words": words,
142
+ "lemmas": lemmas,
143
+ "POS_tags": POS_tags,
144
+ }
145
+
146
+ for doc_id in set(dic["id_docs"]):
147
+
148
+ indexes = np.argwhere(dic["id_docs"] == doc_id)[:, 0]
149
+ tokens = [dic["words"][id] for id in indexes]
150
+ text_lemmas = [dic["lemmas"][id] for id in indexes]
151
+ pos_tags = [dic["POS_tags"][id] for id in indexes]
152
+
153
+ all_res.append({
154
+ "id": key,
155
+ "document_id": doc_id,
156
+ "tokens": tokens,
157
+ "lemmas": text_lemmas,
158
+ "pos_tags": pos_tags,
159
+ "label": label,
160
+ })
161
+
162
+ key += 1
163
+
164
+ ids = [r["id"] for r in all_res]
165
+
166
+ random.seed(4)
167
+ random.shuffle(ids)
168
+ random.shuffle(ids)
169
+ random.shuffle(ids)
170
+
171
+ train, validation, test = np.split(ids, [int(len(ids)*0.70), int(len(ids)*0.80)])
172
+
173
+ if split == "train":
174
+ allowed_ids = list(train)
175
+ elif split == "validation":
176
+ allowed_ids = list(validation)
177
+ elif split == "test":
178
+ allowed_ids = list(test)
179
+
180
+ for r in all_res:
181
+ if r["id"] in allowed_ids:
182
+ yield r["id"], r