File size: 4,441 Bytes
23ca1ed 3f15978 23ca1ed 3f15978 23ca1ed |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 |
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """
@InProceedings{huggingface:dataset,
title = {Luganda, Kanuri, and Hausa NER Dataset},
author = {multiple authors},
year = {2022}
}
"""
_DESCRIPTION = """
LugandaPII is a dataset that includes named entities such as PERSON, ORG, LOCATION, NORP, USERID, and DATE.
The dataset is available in Lum, Kanuri, and Hausa languages, distributed across train, validation, and test splits.
"""
_URL = "https://github.com/EricPeter/pii/raw/main/data"
_TRAINING_FILE = "train.txt"
_VAL_FILE = "val.txt"
_TEST_FILE = "test.txt"
class LugPIIConfig(datasets.BuilderConfig):
"""Configuration for LugandaPII dataset."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
class Masakhaner(datasets.GeneratorBasedBuilder):
"""Generator for Masakhaner dataset."""
BUILDER_CONFIGS = [
LugPIIConfig(name="lug", version=datasets.Version("1.0.0"), description="PII NER dataset for Luganda."),
LugPIIConfig(name="hau", version=datasets.Version("1.0.0"), description="PII NER dataset for Hausa."),
LugPIIConfig(name="knr", version=datasets.Version("1.0.0"), description="PII NER dataset for Kanuri."),
LugPIIConfig(name="lum", version=datasets.Version("1.0.0"), description="PII NER dataset for Lum."),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({
"id": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(datasets.features.ClassLabel(names=['B-DATE',
'B-GOVT_ID',
'B-LOC',
'B-LOCATION',
'B-NORP',
'B-ORG',
'B-PERSON',
'B-USERID',
'B-USER_ID',
'I-DATE',
'I-GOVT_ID',
'I-LOC',
'I-LOCATION',
'I-NORP',
'I-ORG',
'I-PERSON',
'I-USERID',
'I-USER_ID',
'L-DATE',
'L-GOVT_ID',
'L-LOC',
'L-LOCATION',
'L-NORP',
'L-ORG',
'L-PERSON',
'L-USERID',
'L-USER_ID',
'O',
'U-DATE',
'U-GOVT_ID',
'U-LOCATION',
'U-NORP',
'U-ORG',
'U-PERSON',
'U-USERID'])),
}),
supervised_keys=None,
homepage="",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
lang_code = self.config.name # 'lug', 'hau', 'knr', 'lum'
urls_to_download = {
"train": f"{_URL}/{lang_code}/{_TRAINING_FILE}",
"val": f"{_URL}/{lang_code}/{_VAL_FILE}",
"test": f"{_URL}/{lang_code}/{_TEST_FILE}"
}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["val"]}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]})
]
def _generate_examples(self, filepath):
logger.info("Generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as f:
guid = 0
tokens = []
ner_tags = []
for line in f:
if line.strip() == "":
if tokens:
yield guid, {"id": str(guid), "tokens": tokens, "ner_tags": ner_tags}
guid += 1
tokens = []
ner_tags = []
continue
splits = line.strip().split()
tokens.append(splits[0])
ner_tags.append(splits[1])
if tokens:
yield guid, {"id": str(guid), "tokens": tokens, "ner_tags": ner_tags}
|