qanastek commited on
Commit
2cf777a
·
1 Parent(s): b829cac

Update E3C.py

Browse files
Files changed (1) hide show
  1. E3C.py +112 -43
E3C.py CHANGED
@@ -41,26 +41,45 @@ class E3C(datasets.GeneratorBasedBuilder):
41
  BUILDER_CONFIGS = [
42
  datasets.BuilderConfig(name=f"{lang}", version="1.0.0", description=f"The {lang} subset of the E3C corpus") for lang in _LANGUAGES
43
  ]
 
 
44
 
45
  def _info(self):
 
 
 
46
 
47
- features = datasets.Features(
48
- {
49
- "id": datasets.Value("string"),
50
- "text": datasets.Value("string"),
51
- "tokens": datasets.Sequence(datasets.Value("string")),
52
- "ner_clinical_tags": datasets.Sequence(
53
- datasets.features.ClassLabel(
54
- names=["O","B-CLINENTITY","I-CLINENTITY"],
 
 
 
55
  ),
56
- ),
57
- "ner_temporal_tags": datasets.Sequence(
58
- datasets.features.ClassLabel(
59
- names=["O", "B-EVENT", "B-ACTOR", "B-BODYPART", "B-TIMEX3", "B-RML", "I-EVENT", "I-ACTOR", "I-BODYPART", "I-TIMEX3", "I-RML"],
 
 
 
 
 
 
 
 
 
 
60
  ),
61
- ),
62
- }
63
- )
 
64
 
65
  return datasets.DatasetInfo(
66
  description=_DESCRIPTION,
@@ -73,29 +92,57 @@ class E3C(datasets.GeneratorBasedBuilder):
73
 
74
  data_dir = dl_manager.download_and_extract(_URL)
75
 
76
- return [
77
- datasets.SplitGenerator(
78
- name=datasets.Split.TRAIN,
79
- gen_kwargs={
80
- "filepath": os.path.join(data_dir, "E3C-Corpus-2.0.0/data_annotation", self.config.name, "layer1"),
81
- "split": "train",
82
- },
83
- ),
84
- datasets.SplitGenerator(
85
- name=datasets.Split.VALIDATION,
86
- gen_kwargs={
87
- "filepath": os.path.join(data_dir, "E3C-Corpus-2.0.0/data_annotation", self.config.name, "layer1"),
88
- "split": "validation",
89
- },
90
- ),
91
- datasets.SplitGenerator(
92
- name=datasets.Split.TEST,
93
- gen_kwargs={
94
- "filepath": os.path.join(data_dir, "E3C-Corpus-2.0.0/data_validation", self.config.name, "layer2"),
95
- "split": "test",
96
- },
97
- ),
98
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99
 
100
  @staticmethod
101
  def get_annotations(entities: ResultSet, text: str) -> list:
@@ -204,7 +251,33 @@ class E3C(datasets.GeneratorBasedBuilder):
204
 
205
  key += 1
206
 
207
- if split != "test":
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
208
 
209
  ids = [r["id"] for r in all_res]
210
 
@@ -223,7 +296,3 @@ class E3C(datasets.GeneratorBasedBuilder):
223
  for r in all_res:
224
  if r["id"] in allowed_ids:
225
  yield r["id"], r
226
- else:
227
-
228
- for r in all_res:
229
- yield r["id"], r
 
41
  BUILDER_CONFIGS = [
42
  datasets.BuilderConfig(name=f"{lang}", version="1.0.0", description=f"The {lang} subset of the E3C corpus") for lang in _LANGUAGES
43
  ]
44
+
45
+ DEFAULT_CONFIG_NAME = "clinical"
46
 
47
  def _info(self):
48
+
49
+ if self.config.name == "default":
50
+ self.config.name = self.DEFAULT_CONFIG_NAME
51
 
52
+ if self.config.name == "clinical":
53
+
54
+ features = datasets.Features(
55
+ {
56
+ "id": datasets.Value("string"),
57
+ "text": datasets.Value("string"),
58
+ "tokens": datasets.Sequence(datasets.Value("string")),
59
+ "ner_clinical_tags": datasets.Sequence(
60
+ datasets.features.ClassLabel(
61
+ names=["O","B-CLINENTITY","I-CLINENTITY"],
62
+ ),
63
  ),
64
+ }
65
+ )
66
+
67
+ elif self.config.name == "temporal":
68
+
69
+ features = datasets.Features(
70
+ {
71
+ "id": datasets.Value("string"),
72
+ "text": datasets.Value("string"),
73
+ "tokens": datasets.Sequence(datasets.Value("string")),
74
+ "ner_temporal_tags": datasets.Sequence(
75
+ datasets.features.ClassLabel(
76
+ names=["O", "B-EVENT", "B-ACTOR", "B-BODYPART", "B-TIMEX3", "B-RML", "I-EVENT", "I-ACTOR", "I-BODYPART", "I-TIMEX3", "I-RML"],
77
+ ),
78
  ),
79
+ }
80
+ )
81
+
82
+
83
 
84
  return datasets.DatasetInfo(
85
  description=_DESCRIPTION,
 
92
 
93
  data_dir = dl_manager.download_and_extract(_URL)
94
 
95
+ if self.config.name == "clinical":
96
+
97
+ return [
98
+ datasets.SplitGenerator(
99
+ name=datasets.Split.TRAIN,
100
+ gen_kwargs={
101
+ "filepath": os.path.join(data_dir, "E3C-Corpus-2.0.0/data_annotation", self.config.name, "layer2"),
102
+ "split": "train",
103
+ },
104
+ ),
105
+ datasets.SplitGenerator(
106
+ name=datasets.Split.VALIDATION,
107
+ gen_kwargs={
108
+ "filepath": os.path.join(data_dir, "E3C-Corpus-2.0.0/data_annotation", self.config.name, "layer2"),
109
+ "split": "validation",
110
+ },
111
+ ),
112
+ datasets.SplitGenerator(
113
+ name=datasets.Split.TEST,
114
+ gen_kwargs={
115
+ "filepath": os.path.join(data_dir, "E3C-Corpus-2.0.0/data_annotation", self.config.name, "layer1"),
116
+ "split": "test",
117
+ },
118
+ ),
119
+ ]
120
+
121
+ elif self.config.name == "temporal":
122
+
123
+ return [
124
+ datasets.SplitGenerator(
125
+ name=datasets.Split.TRAIN,
126
+ gen_kwargs={
127
+ "filepath": os.path.join(data_dir, "E3C-Corpus-2.0.0/data_annotation", self.config.name, "layer1"),
128
+ "split": "train",
129
+ },
130
+ ),
131
+ datasets.SplitGenerator(
132
+ name=datasets.Split.VALIDATION,
133
+ gen_kwargs={
134
+ "filepath": os.path.join(data_dir, "E3C-Corpus-2.0.0/data_annotation", self.config.name, "layer1"),
135
+ "split": "validation",
136
+ },
137
+ ),
138
+ datasets.SplitGenerator(
139
+ name=datasets.Split.TEST,
140
+ gen_kwargs={
141
+ "filepath": os.path.join(data_dir, "E3C-Corpus-2.0.0/data_annotation", self.config.name, "layer1"),
142
+ "split": "test",
143
+ },
144
+ ),
145
+ ]
146
 
147
  @staticmethod
148
  def get_annotations(entities: ResultSet, text: str) -> list:
 
251
 
252
  key += 1
253
 
254
+ if self.config.name == "clinical":
255
+
256
+ if split != "test":
257
+
258
+ ids = [r["id"] for r in all_res]
259
+
260
+ random.seed(4)
261
+ random.shuffle(ids)
262
+ random.shuffle(ids)
263
+ random.shuffle(ids)
264
+
265
+ train, validation = np.split(ids, [int(len(ids)*0.8738)])
266
+
267
+ if split == "train":
268
+ allowed_ids = list(train)
269
+ elif split == "validation":
270
+ allowed_ids = list(validation)
271
+
272
+ for r in all_res:
273
+ if r["id"] in allowed_ids:
274
+ yield r["id"], r
275
+ else:
276
+
277
+ for r in all_res:
278
+ yield r["id"], r
279
+
280
+ elif self.config.name == "temporal":
281
 
282
  ids = [r["id"] for r in all_res]
283
 
 
296
  for r in all_res:
297
  if r["id"] in allowed_ids:
298
  yield r["id"], r