Upload 40 files
Browse files- .gitattributes +14 -0
- BN-Bangla/bn_dev.conll +0 -0
- BN-Bangla/bn_test.conll +3 -0
- BN-Bangla/bn_train.conll +0 -0
- DE-German/de_dev.conll +0 -0
- DE-German/de_test.conll +3 -0
- DE-German/de_train.conll +0 -0
- EN-English/en_dev.conll +0 -0
- EN-English/en_test.conll +3 -0
- EN-English/en_train.conll +0 -0
- ES-Spanish/es_dev.conll +0 -0
- ES-Spanish/es_test.conll +3 -0
- ES-Spanish/es_train.conll +0 -0
- FA-Farsi/fa_dev.conll +0 -0
- FA-Farsi/fa_test.conll +3 -0
- FA-Farsi/fa_train.conll +0 -0
- HI-Hindi/hi_dev.conll +0 -0
- HI-Hindi/hi_test.conll +3 -0
- HI-Hindi/hi_train.conll +0 -0
- KO-Korean/ko_dev.conll +0 -0
- KO-Korean/ko_test.conll +3 -0
- KO-Korean/ko_train.conll +0 -0
- MIX_Code_mixed/mix_dev.conll +0 -0
- MIX_Code_mixed/mix_test.conll +3 -0
- MIX_Code_mixed/mix_train.conll +0 -0
- MULTI_Multilingual/multi_dev.conll +0 -0
- MULTI_Multilingual/multi_test.conll +3 -0
- MULTI_Multilingual/multi_train.conll +3 -0
- NL-Dutch/nl_dev.conll +0 -0
- NL-Dutch/nl_test.conll +3 -0
- NL-Dutch/nl_train.conll +0 -0
- RU-Russian/ru_dev.conll +0 -0
- RU-Russian/ru_test.conll +3 -0
- RU-Russian/ru_train.conll +0 -0
- TR-Turkish/tr_dev.conll +0 -0
- TR-Turkish/tr_test.conll +3 -0
- TR-Turkish/tr_train.conll +0 -0
- ZH-Chinese/zh_dev.conll +0 -0
- ZH-Chinese/zh_test.conll +3 -0
- ZH-Chinese/zh_train.conll +0 -0
- multiconer_v1.py +201 -0
.gitattributes
CHANGED
@@ -53,3 +53,17 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
53 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
54 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
55 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
54 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
55 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
56 |
+
BN-Bangla/bn_test.conll filter=lfs diff=lfs merge=lfs -text
|
57 |
+
DE-German/de_test.conll filter=lfs diff=lfs merge=lfs -text
|
58 |
+
EN-English/en_test.conll filter=lfs diff=lfs merge=lfs -text
|
59 |
+
ES-Spanish/es_test.conll filter=lfs diff=lfs merge=lfs -text
|
60 |
+
FA-Farsi/fa_test.conll filter=lfs diff=lfs merge=lfs -text
|
61 |
+
HI-Hindi/hi_test.conll filter=lfs diff=lfs merge=lfs -text
|
62 |
+
KO-Korean/ko_test.conll filter=lfs diff=lfs merge=lfs -text
|
63 |
+
MIX_Code_mixed/mix_test.conll filter=lfs diff=lfs merge=lfs -text
|
64 |
+
MULTI_Multilingual/multi_test.conll filter=lfs diff=lfs merge=lfs -text
|
65 |
+
MULTI_Multilingual/multi_train.conll filter=lfs diff=lfs merge=lfs -text
|
66 |
+
NL-Dutch/nl_test.conll filter=lfs diff=lfs merge=lfs -text
|
67 |
+
RU-Russian/ru_test.conll filter=lfs diff=lfs merge=lfs -text
|
68 |
+
TR-Turkish/tr_test.conll filter=lfs diff=lfs merge=lfs -text
|
69 |
+
ZH-Chinese/zh_test.conll filter=lfs diff=lfs merge=lfs -text
|
BN-Bangla/bn_dev.conll
ADDED
The diff for this file is too large to render.
See raw diff
|
|
BN-Bangla/bn_test.conll
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f2b69970e95a325bc9e3386f15c601aaa5fc60cebeb016f2c93a3ab0393f078b
|
3 |
+
size 23495327
|
BN-Bangla/bn_train.conll
ADDED
The diff for this file is too large to render.
See raw diff
|
|
DE-German/de_dev.conll
ADDED
The diff for this file is too large to render.
See raw diff
|
|
DE-German/de_test.conll
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8c034007967a9a8433c151adaf44302003462e2487a8af14d53a919de0b35f8d
|
3 |
+
size 35310250
|
DE-German/de_train.conll
ADDED
The diff for this file is too large to render.
See raw diff
|
|
EN-English/en_dev.conll
ADDED
The diff for this file is too large to render.
See raw diff
|
|
EN-English/en_test.conll
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3fe2cea15070671e5693782032e2f2636135fac234061a775e1d3b1065c2883b
|
3 |
+
size 35459883
|
EN-English/en_train.conll
ADDED
The diff for this file is too large to render.
See raw diff
|
|
ES-Spanish/es_dev.conll
ADDED
The diff for this file is too large to render.
See raw diff
|
|
ES-Spanish/es_test.conll
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:44e256c6482844f16ed0bc183735c09f72ff79eda48e0fae9646fe32da67663e
|
3 |
+
size 37356960
|
ES-Spanish/es_train.conll
ADDED
The diff for this file is too large to render.
See raw diff
|
|
FA-Farsi/fa_dev.conll
ADDED
The diff for this file is too large to render.
See raw diff
|
|
FA-Farsi/fa_test.conll
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ee036966fdbad0993fa7902fc0472323cf90d9b572ece6b7240d161f491d8250
|
3 |
+
size 29294624
|
FA-Farsi/fa_train.conll
ADDED
The diff for this file is too large to render.
See raw diff
|
|
HI-Hindi/hi_dev.conll
ADDED
The diff for this file is too large to render.
See raw diff
|
|
HI-Hindi/hi_test.conll
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e3683e64ab09cf8e50185d655d10776ca7909ea60d41d4b8fdb2cf8180b33295
|
3 |
+
size 26542669
|
HI-Hindi/hi_train.conll
ADDED
The diff for this file is too large to render.
See raw diff
|
|
KO-Korean/ko_dev.conll
ADDED
The diff for this file is too large to render.
See raw diff
|
|
KO-Korean/ko_test.conll
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:191ea2b990ea78fb93b75cdf6aebb743479e52f63888e705f94aaa7cf60f9b01
|
3 |
+
size 27726762
|
KO-Korean/ko_train.conll
ADDED
The diff for this file is too large to render.
See raw diff
|
|
MIX_Code_mixed/mix_dev.conll
ADDED
The diff for this file is too large to render.
See raw diff
|
|
MIX_Code_mixed/mix_test.conll
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a8aa35c58fb37b465fa5ab1c76744593e4cd44d8054cef944d87f79c4b2aa609
|
3 |
+
size 18380185
|
MIX_Code_mixed/mix_train.conll
ADDED
The diff for this file is too large to render.
See raw diff
|
|
MULTI_Multilingual/multi_dev.conll
ADDED
The diff for this file is too large to render.
See raw diff
|
|
MULTI_Multilingual/multi_test.conll
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8b950834448478554263dee52b8fee1f065da1b0bbf288ebb576d022b4eccdc2
|
3 |
+
size 85354515
|
MULTI_Multilingual/multi_train.conll
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ca35608a5552946601b896c66223baff7f97f7addf18fcae6a88d2a6345d5631
|
3 |
+
size 53219838
|
NL-Dutch/nl_dev.conll
ADDED
The diff for this file is too large to render.
See raw diff
|
|
NL-Dutch/nl_test.conll
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c5337c2019eaeee7d93bac779902aa64b5b0f5e033833de473455302edcd8f39
|
3 |
+
size 34697044
|
NL-Dutch/nl_train.conll
ADDED
The diff for this file is too large to render.
See raw diff
|
|
RU-Russian/ru_dev.conll
ADDED
The diff for this file is too large to render.
See raw diff
|
|
RU-Russian/ru_test.conll
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ebe5ed4f8ee888613a3e43526ecb5bfd8e0eddbfcd65fc51d086ff8ee75956cd
|
3 |
+
size 44878363
|
RU-Russian/ru_train.conll
ADDED
The diff for this file is too large to render.
See raw diff
|
|
TR-Turkish/tr_dev.conll
ADDED
The diff for this file is too large to render.
See raw diff
|
|
TR-Turkish/tr_test.conll
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d6cf2a81d668195f3ea07a604e7932ca8876a1001a2e7a9929c00978bf17efbb
|
3 |
+
size 16602565
|
TR-Turkish/tr_train.conll
ADDED
The diff for this file is too large to render.
See raw diff
|
|
ZH-Chinese/zh_dev.conll
ADDED
The diff for this file is too large to render.
See raw diff
|
|
ZH-Chinese/zh_test.conll
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:caeb986137982d47a9a87579cfb7dd141a6547ea8a446f9f471a6a104dbbf13f
|
3 |
+
size 25146238
|
ZH-Chinese/zh_train.conll
ADDED
The diff for this file is too large to render.
See raw diff
|
|
multiconer_v1.py
ADDED
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
"""SemEval 2022 Task 2: MultiCoNER II: Multilingual Complex Named Entity Recognition"""
|
3 |
+
|
4 |
+
import datasets
|
5 |
+
|
6 |
+
logger = datasets.logging.get_logger(__name__)
|
7 |
+
|
8 |
+
_CITATION = """\
|
9 |
+
@inproceedings{multiconer2-report,
|
10 |
+
title={{SemEval-2023 Task 2: Fine-grained Multilingual Named Entity Recognition (MultiCoNER 2)}},
|
11 |
+
author={Fetahu, Besnik and Kar, Sudipta and Chen, Zhiyu and Rokhlenko, Oleg and Malmasi, Shervin},
|
12 |
+
booktitle={Proceedings of the 17th International Workshop on Semantic Evaluation (SemEval-2023)},
|
13 |
+
year={2023},
|
14 |
+
publisher={Association for Computational Linguistics},
|
15 |
+
}
|
16 |
+
@article{multiconer2-data,
|
17 |
+
title={{MultiCoNER v2: a Large Multilingual dataset for Fine-grained and Noisy Named Entity Recognition}},
|
18 |
+
author={Fetahu, Besnik and Chen, Zhiyu and Kar, Sudipta and Rokhlenko, Oleg and Malmasi, Shervin},
|
19 |
+
year={2023},
|
20 |
+
}
|
21 |
+
"""
|
22 |
+
|
23 |
+
_DESCRIPTION = """\
|
24 |
+
Complex named entities (NE), like the titles of creative works, are not simple nouns and pose challenges for NER systems (Ashwini and Choi, 2014). They can take the form of any linguistic constituent, like an imperative clause (“Dial M for Murder”), and do not look like traditional NEs (Persons, Locations, etc.). This syntactic ambiguity makes it challenging to recognize them based on context. We organized the MultiCoNER task (Malmasi et al., 2022) at SemEval-2022 to address these challenges in 11 languages, receiving a very positive community response with 34 system papers. Results confirmed the challenges of processing complex and long-tail NEs: even the largest pre-trained Transformers did not achieve top performance without external knowledge. The top systems infused transformers with knowledge bases and gazetteers. However, such solutions are brittle against out of knowledge-base entities and noisy scenarios like the presence of spelling mistakes and typos. We propose MultiCoNER II which represents novel challenges through new tasks that emphasize the shortcomings of the current top models.
|
25 |
+
MultiCoNER II features complex NER in these languages:
|
26 |
+
1. English
|
27 |
+
2. Spanish
|
28 |
+
3. Hindi
|
29 |
+
4. Bangla
|
30 |
+
5. Chinese
|
31 |
+
6. Swedish
|
32 |
+
7. Farsi
|
33 |
+
8. French
|
34 |
+
9. Italian
|
35 |
+
10. Portugese
|
36 |
+
11. Ukranian
|
37 |
+
12. German
|
38 |
+
For more details see https://multiconer.github.io/
|
39 |
+
## References
|
40 |
+
* Sandeep Ashwini and Jinho D. Choi. 2014. Targetable named entity recognition in social media. CoRR, abs/1408.0782.
|
41 |
+
* Shervin Malmasi, Anjie Fang, Besnik Fetahu, Sudipta Kar, Oleg Rokhlenko. 2022. SemEval-2022 Task 11: Multilingual Complex Named Entity Recognition (MultiCoNER).
|
42 |
+
"""
|
43 |
+
_URL = "https://huggingface.co/datasets/MultiCoNER/multiconer_v2/resolve/main"
|
44 |
+
|
45 |
+
code_vs_lang_map = {"en": "English",
|
46 |
+
"es": "Spanish",
|
47 |
+
"pt": "Portuguese",
|
48 |
+
"uk": "Ukrainian",
|
49 |
+
"sv": "Swedish",
|
50 |
+
"fr": "French",
|
51 |
+
"fa": "Farsi",
|
52 |
+
"de": "German",
|
53 |
+
"zh": "Chinese",
|
54 |
+
"hi": "Hindi",
|
55 |
+
"bn": "Bangla",
|
56 |
+
"it": "Italian",
|
57 |
+
"multi": "Multilingual"}
|
58 |
+
|
59 |
+
label_vs_code_map = {"Bangla (BN)": 'bn',
|
60 |
+
"Chinese (ZH)": 'zh',
|
61 |
+
"English (EN)": 'en',
|
62 |
+
"Spanish (ES)": 'es',
|
63 |
+
"Swedish (SV)": 'sv',
|
64 |
+
"French (FR)": 'fr',
|
65 |
+
"Farsi (FA)": 'fa',
|
66 |
+
"German (DE)": 'de',
|
67 |
+
"Portuguese (PT)": 'pt',
|
68 |
+
"Hindi (HI)": 'hi',
|
69 |
+
"Italian (IT)": 'it',
|
70 |
+
"Ukrainian (UK)": 'uk',
|
71 |
+
"Multilingual (MULTI)": 'multi'}
|
72 |
+
|
73 |
+
|
74 |
+
class MultiCoNER2Config(datasets.BuilderConfig):
|
75 |
+
"""BuilderConfig for MultiCoNER2"""
|
76 |
+
|
77 |
+
def __init__(self, **kwargs):
|
78 |
+
"""BuilderConfig for MultiCoNER2.
|
79 |
+
Args:
|
80 |
+
**kwargs: keyword arguments forwarded to super.
|
81 |
+
"""
|
82 |
+
super(MultiCoNER2Config, self).__init__(**kwargs)
|
83 |
+
|
84 |
+
|
85 |
+
class MultiCoNER2(datasets.GeneratorBasedBuilder):
|
86 |
+
"""MultiCoNER2 dataset."""
|
87 |
+
|
88 |
+
BUILDER_CONFIGS = [
|
89 |
+
MultiCoNER2Config(name="Bangla (BN)", version=datasets.Version("1.0.0"),
|
90 |
+
description="MultiCoNER2 Bangla dataset"),
|
91 |
+
MultiCoNER2Config(name="English (EN)", version=datasets.Version("1.0.0"),
|
92 |
+
description="MultiCoNER2 English dataset"),
|
93 |
+
MultiCoNER2Config(name="Farsi (FA)", version=datasets.Version("1.0.0"),
|
94 |
+
description="MultiCoNER2 Farsi dataset"),
|
95 |
+
|
96 |
+
MultiCoNER2Config(name="German (DE)", version=datasets.Version("1.0.0"),
|
97 |
+
description="MultiCoNER2 German dataset"),
|
98 |
+
MultiCoNER2Config(name="Hindi (HI)", version=datasets.Version("1.0.0"),
|
99 |
+
description="MultiCoNER2 Hindi dataset"),
|
100 |
+
MultiCoNER2Config(name="Spanish (ES)", version=datasets.Version("1.0.0"),
|
101 |
+
description="MultiCoNER2 Spanish dataset"),
|
102 |
+
|
103 |
+
MultiCoNER2Config(name="Multilingual (MULTI)", version=datasets.Version("1.0.0"),
|
104 |
+
description="MultiCoNER2 Multilingual dataset"),
|
105 |
+
]
|
106 |
+
|
107 |
+
def _info(self):
|
108 |
+
return datasets.DatasetInfo(
|
109 |
+
description=_DESCRIPTION,
|
110 |
+
features=datasets.Features(
|
111 |
+
{
|
112 |
+
"id": datasets.Value("string"),
|
113 |
+
"sample_id": datasets.Value("string"),
|
114 |
+
"tokens": datasets.Sequence(datasets.Value("string")),
|
115 |
+
"ner_tags": datasets.Sequence(datasets.Value("string")),
|
116 |
+
"ner_tags_index": datasets.Sequence(
|
117 |
+
datasets.features.ClassLabel(
|
118 |
+
names=[
|
119 |
+
'O',
|
120 |
+
'B-PER',
|
121 |
+
'I-PER',
|
122 |
+
'B-LOC',
|
123 |
+
'I-LOC',
|
124 |
+
'B-GRP',
|
125 |
+
'I-GRP',
|
126 |
+
'B-CORP',
|
127 |
+
'I-CORP',
|
128 |
+
'B-PROD',
|
129 |
+
'I-PROD',
|
130 |
+
'B-CW',
|
131 |
+
'I-CW',
|
132 |
+
|
133 |
+
]
|
134 |
+
)
|
135 |
+
),
|
136 |
+
}
|
137 |
+
),
|
138 |
+
supervised_keys=None,
|
139 |
+
homepage="https://multiconer.github.io",
|
140 |
+
citation=_CITATION,
|
141 |
+
)
|
142 |
+
|
143 |
+
def _split_generators(self, dl_manager):
|
144 |
+
"""Returns SplitGenerators."""
|
145 |
+
urls_to_download = {
|
146 |
+
"train": f"{_URL}/{label_vs_code_map[self.config.name].upper()}-{code_vs_lang_map[label_vs_code_map[self.config.name]]}/{label_vs_code_map[self.config.name]}_train.conll",
|
147 |
+
"dev": f"{_URL}/{label_vs_code_map[self.config.name].upper()}-{code_vs_lang_map[label_vs_code_map[self.config.name]]}/{label_vs_code_map[self.config.name]}_dev.conll",
|
148 |
+
"test": f"{_URL}/{label_vs_code_map[self.config.name].upper()}-{code_vs_lang_map[label_vs_code_map[self.config.name]]}/{label_vs_code_map[self.config.name]}_test.conll",
|
149 |
+
}
|
150 |
+
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
151 |
+
|
152 |
+
return [
|
153 |
+
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
|
154 |
+
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
|
155 |
+
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
|
156 |
+
]
|
157 |
+
|
158 |
+
def _generate_examples(self, filepath):
|
159 |
+
logger.info("⏳ Generating examples from = %s", filepath)
|
160 |
+
|
161 |
+
with open(filepath) as f:
|
162 |
+
guid = -1
|
163 |
+
s_id = None
|
164 |
+
tokens = []
|
165 |
+
ner_tags = []
|
166 |
+
|
167 |
+
for line in f:
|
168 |
+
if line.strip().startswith("# id"):
|
169 |
+
s_id = line.split()[2].strip()
|
170 |
+
guid += 1
|
171 |
+
tokens = []
|
172 |
+
ner_tags = []
|
173 |
+
elif ' _ _ ' in line:
|
174 |
+
# Separator is " _ _ "
|
175 |
+
splits = line.split(" _ _ ")
|
176 |
+
tokens.append(splits[0].strip())
|
177 |
+
ner_tags.append(splits[1].strip())
|
178 |
+
elif len(line.strip()) == 0:
|
179 |
+
if s_id and len(tokens) >= 1 and len(tokens) == len(ner_tags):
|
180 |
+
yield guid, {
|
181 |
+
"id": guid,
|
182 |
+
"sample_id": s_id,
|
183 |
+
"tokens": tokens,
|
184 |
+
"ner_tags_index": ner_tags,
|
185 |
+
"ner_tags": ner_tags,
|
186 |
+
}
|
187 |
+
s_id = None
|
188 |
+
tokens = []
|
189 |
+
ner_tags = []
|
190 |
+
else:
|
191 |
+
continue
|
192 |
+
|
193 |
+
if s_id and len(tokens) >= 1 and len(tokens) == len(ner_tags):
|
194 |
+
yield guid, {
|
195 |
+
"id": guid,
|
196 |
+
"sample_id": s_id,
|
197 |
+
"tokens": tokens,
|
198 |
+
"ner_tags_index": ner_tags,
|
199 |
+
"ner_tags": ner_tags,
|
200 |
+
}
|
201 |
+
|