qanastek commited on
Commit
f9d887b
·
1 Parent(s): efb5260

Update ESSAI.py

Browse files
Files changed (1) hide show
  1. ESSAI.py +54 -29
ESSAI.py CHANGED
@@ -46,8 +46,7 @@ class ESSAI(datasets.GeneratorBasedBuilder):
46
  BUILDER_CONFIGS = [
47
  datasets.BuilderConfig(name="pos", version="1.0.0", description="The ESSAI corpora - POS Speculation task"),
48
 
49
- datasets.BuilderConfig(name="cls_spec", version="1.0.0", description="The ESSAI corpora - CLS Speculation task"),
50
- datasets.BuilderConfig(name="cls_neg", version="1.0.0", description="The ESSAI corpora - CLS Negation task"),
51
 
52
  datasets.BuilderConfig(name="ner_spec", version="1.0.0", description="The ESSAI corpora - NER Speculation task"),
53
  datasets.BuilderConfig(name="ner_neg", version="1.0.0", description="The ESSAI corpora - NER Negation task"),
@@ -63,10 +62,9 @@ class ESSAI(datasets.GeneratorBasedBuilder):
63
  "document_id": datasets.Value("string"),
64
  "tokens": [datasets.Value("string")],
65
  "lemmas": [datasets.Value("string")],
66
- "pos_tags": [datasets.Value("string")],
67
- # "pos_tags": [datasets.features.ClassLabel(
68
- # names = ['VER:pper', 'VER:subi', 'VER:cond', 'INT', 'VER:infi', 'PUN:cit', 'ITAC', 'PUN', 'VER:ppre', 'VER:pres', 'PRO:REL', 'ADJ', 'VER:subp', 'NN', 'PREF', 'PRP', 'PRO:IND', 'PRO:POS', 'DET:POS', 'VER:futu', 'PRO:DEM', 'KON', 'DET:ART', 'VER:', 'PRP:det', 'PRO', 'FAG', 'NOM', 'SYM', 'VER:impf', 'CIT02-HM', 'SENT', 'Bayer', 'VER:simp', 'ADV', 'bayer', '@card@', 'PRO:PER', 'NUM', 'ABR', 'NAM'],
69
- # )],
70
  }
71
  )
72
 
@@ -77,25 +75,28 @@ class ESSAI(datasets.GeneratorBasedBuilder):
77
  "id": datasets.Value("string"),
78
  "document_id": datasets.Value("string"),
79
  "tokens": [datasets.Value("string")],
80
- "label": datasets.Value("string"),
81
- # "label": datasets.features.ClassLabel(
82
- # names = ['VER:pper', 'VER:subi', 'VER:cond', 'INT', 'VER:infi', 'PUN:cit', 'ITAC', 'PUN', 'VER:ppre', 'VER:pres', 'PRO:REL', 'ADJ', 'VER:subp', 'NN', 'PREF', 'PRP', 'PRO:IND', 'PRO:POS', 'DET:POS', 'VER:futu', 'PRO:DEM', 'KON', 'DET:ART', 'VER:', 'PRP:det', 'PRO', 'FAG', 'NOM', 'SYM', 'VER:impf', 'CIT02-HM', 'SENT', 'Bayer', 'VER:simp', 'ADV', 'bayer', '@card@', 'PRO:PER', 'NUM', 'ABR', 'NAM'],
83
- # ),
84
  }
85
  )
86
 
87
  elif self.config.name.find("ner") != -1:
88
 
 
 
 
 
 
89
  features = datasets.Features(
90
  {
91
  "id": datasets.Value("string"),
92
  "document_id": datasets.Value("string"),
93
  "tokens": [datasets.Value("string")],
94
  "lemmas": [datasets.Value("string")],
95
- "ner_tags": [datasets.Value("string")],
96
- # "ner_tags": [datasets.features.ClassLabel(
97
- # names = ['VER:pper', 'VER:subi', 'VER:cond', 'INT', 'VER:infi', 'PUN:cit', 'ITAC', 'PUN', 'VER:ppre', 'VER:pres', 'PRO:REL', 'ADJ', 'VER:subp', 'NN', 'PREF', 'PRP', 'PRO:IND', 'PRO:POS', 'DET:POS', 'VER:futu', 'PRO:DEM', 'KON', 'DET:ART', 'VER:', 'PRP:det', 'PRO', 'FAG', 'NOM', 'SYM', 'VER:impf', 'CIT02-HM', 'SENT', 'Bayer', 'VER:simp', 'ADV', 'bayer', '@card@', 'PRO:PER', 'NUM', 'ABR', 'NAM'],
98
- # )],
99
  }
100
  )
101
 
@@ -150,10 +151,10 @@ class ESSAI(datasets.GeneratorBasedBuilder):
150
 
151
  unique_id_doc = []
152
 
153
- if self.config.name.find("pos") != -1:
154
- docs = ["ESSAI_neg.txt", "ESSAI_spec.txt"]
155
- else:
156
  docs = [f"ESSAI_{subset}.txt"]
 
 
157
 
158
  for file in docs:
159
 
@@ -182,7 +183,10 @@ class ESSAI(datasets.GeneratorBasedBuilder):
182
 
183
  if tag == "@card@":
184
  print(splitted)
185
-
 
 
 
186
  if lemma == "000" and tag == "@card@":
187
  tag = "NUM"
188
  word = "100 000"
@@ -244,12 +248,15 @@ class ESSAI(datasets.GeneratorBasedBuilder):
244
 
245
  id_doc, id_word, word, lemma, _ = line.split("\t")[0:5]
246
  tag = line.replace("\n","").split("\t")[-1]
 
247
  if tag == "***" or tag == "_":
248
  tag = "O"
249
  elif tag == "v":
250
  tag = "I_scope_spec"
251
  elif tag == "z":
252
  tag = "O"
 
 
253
 
254
  id_docs.append(id_doc)
255
  id_words.append(id_word)
@@ -291,9 +298,7 @@ class ESSAI(datasets.GeneratorBasedBuilder):
291
  ]
292
  f_in.close()
293
 
294
- classe = "negation" if self.config.name.find("neg") != -1 else "speculation"
295
-
296
- all_res = []
297
 
298
  for document in conll:
299
 
@@ -303,20 +308,40 @@ class ESSAI(datasets.GeneratorBasedBuilder):
303
  identifier = document[0][0]
304
 
305
  unique = list(set([w[-1] for w in document]))
306
- # print(document)
307
  tokens = [sent[2] for sent in document if len(sent) > 1]
308
 
309
  if "***" in unique:
310
- l = "none"
311
  elif "_" in unique:
312
  l = classe
313
 
314
- all_res.append({
315
- "id": str(identifier),
316
- "document_id": identifier,
317
- "tokens": tokens,
318
- "label": l,
319
- })
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
320
 
321
  ids = [r["id"] for r in all_res]
322
 
 
46
  BUILDER_CONFIGS = [
47
  datasets.BuilderConfig(name="pos", version="1.0.0", description="The ESSAI corpora - POS Speculation task"),
48
 
49
+ datasets.BuilderConfig(name="cls", version="1.0.0", description="The ESSAI corpora - CLS Negation / Speculation task"),
 
50
 
51
  datasets.BuilderConfig(name="ner_spec", version="1.0.0", description="The ESSAI corpora - NER Speculation task"),
52
  datasets.BuilderConfig(name="ner_neg", version="1.0.0", description="The ESSAI corpora - NER Negation task"),
 
62
  "document_id": datasets.Value("string"),
63
  "tokens": [datasets.Value("string")],
64
  "lemmas": [datasets.Value("string")],
65
+ "pos_tags": [datasets.features.ClassLabel(
66
+ names = ['INT', 'PRO:POS', 'PRP', 'SENT', 'PRO', 'ABR', 'VER:pres', 'KON', 'SYM', 'DET:POS', 'VER:', 'PRO:IND', 'NAM', 'ADV', 'PRO:DEM', 'NN', 'PRO:PER', 'VER:pper', 'VER:ppre', 'PUN', 'VER:simp', 'PREF', 'NUM', 'VER:futu', 'NOM', 'VER:impf', 'VER:subp', 'VER:infi', 'DET:ART', 'PUN:cit', 'ADJ', 'PRP:det', 'PRO:REL', 'VER:cond', 'VER:subi'],
67
+ )],
 
68
  }
69
  )
70
 
 
75
  "id": datasets.Value("string"),
76
  "document_id": datasets.Value("string"),
77
  "tokens": [datasets.Value("string")],
78
+ "label": datasets.features.ClassLabel(
79
+ names = ['negation_speculation', 'negation', 'neutral', 'speculation'],
80
+ ),
 
81
  }
82
  )
83
 
84
  elif self.config.name.find("ner") != -1:
85
 
86
+ if self.config.name.find("_spec") != -1:
87
+ names = ['O', 'B_cue_spec', 'B_scope_spec', 'I_scope_spec']
88
+ elif self.config.name.find("_neg") != -1:
89
+ names = ['O', 'B_cue_neg', 'B_scope_neg', 'I_scope_neg']
90
+
91
  features = datasets.Features(
92
  {
93
  "id": datasets.Value("string"),
94
  "document_id": datasets.Value("string"),
95
  "tokens": [datasets.Value("string")],
96
  "lemmas": [datasets.Value("string")],
97
+ "ner_tags": [datasets.features.ClassLabel(
98
+ names = names,
99
+ )],
 
100
  }
101
  )
102
 
 
151
 
152
  unique_id_doc = []
153
 
154
+ if self.config.name.find("ner") != -1:
 
 
155
  docs = [f"ESSAI_{subset}.txt"]
156
+ else:
157
+ docs = ["ESSAI_neg.txt", "ESSAI_spec.txt"]
158
 
159
  for file in docs:
160
 
 
183
 
184
  if tag == "@card@":
185
  print(splitted)
186
+
187
+ if word == "@card@":
188
+ print(splitted)
189
+
190
  if lemma == "000" and tag == "@card@":
191
  tag = "NUM"
192
  word = "100 000"
 
248
 
249
  id_doc, id_word, word, lemma, _ = line.split("\t")[0:5]
250
  tag = line.replace("\n","").split("\t")[-1]
251
+
252
  if tag == "***" or tag == "_":
253
  tag = "O"
254
  elif tag == "v":
255
  tag = "I_scope_spec"
256
  elif tag == "z":
257
  tag = "O"
258
+ elif tag == "I_scope_spec_":
259
+ tag = "I_scope_spec"
260
 
261
  id_docs.append(id_doc)
262
  id_words.append(id_word)
 
298
  ]
299
  f_in.close()
300
 
301
+ classe = "negation" if filename.find("_neg") != -1 else "speculation"
 
 
302
 
303
  for document in conll:
304
 
 
308
  identifier = document[0][0]
309
 
310
  unique = list(set([w[-1] for w in document]))
 
311
  tokens = [sent[2] for sent in document if len(sent) > 1]
312
 
313
  if "***" in unique:
314
+ l = "neutral"
315
  elif "_" in unique:
316
  l = classe
317
 
318
+ if identifier in unique_id_doc and l == 'neutral':
319
+ continue
320
+
321
+ elif identifier in unique_id_doc and l != 'neutral':
322
+
323
+ index_l = unique_id_doc.index(identifier)
324
+
325
+ if all_res[index_l]["label"] != "neutral":
326
+ l = "negation_speculation"
327
+
328
+ all_res[index_l] = {
329
+ "id": str(identifier),
330
+ "document_id": identifier,
331
+ "tokens": tokens,
332
+ "label": l,
333
+ }
334
+
335
+ else:
336
+
337
+ all_res.append({
338
+ "id": str(identifier),
339
+ "document_id": identifier,
340
+ "tokens": tokens,
341
+ "label": l,
342
+ })
343
+
344
+ unique_id_doc.append(identifier)
345
 
346
  ids = [r["id"] for r in all_res]
347