File size: 5,365 Bytes
627b999 a13b715 9122c6a a13b715 9122c6a a13b715 627b999 9122c6a a13b715 627b999 a13b715 9122c6a ede8d6b a13b715 627b999 9122c6a a13b715 9122c6a 627b999 9122c6a 627b999 a13b715 627b999 a13b715 627b999 9122c6a a13b715 c3de774 ede8d6b a13b715 ddd15c1 a13b715 c3de774 a13b715 9122c6a a13b715 9122c6a a13b715 60430f6 9122c6a a13b715 627b999 c3de774 627b999 c3de774 ede8d6b c3de774 627b999 c3de774 9122c6a 627b999 c3de774 627b999 c3de774 ede8d6b c3de774 627b999 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 |
# Loading script for the Ancora NER dataset.
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """ """
_DESCRIPTION = """AnCora Catalan NER.
This is a dataset for Named Eentity Reacognition (NER) from Ancora corpus adapted for
Machine Learning and Language Model evaluation purposes.
Since multiwords (including Named Entites) in the original Ancora corpus are aggregated as
a single lexical item using underscores (e.g. "Ajuntament_de_Barcelona")
we splitted them to align with word-per-line format, and added conventional Begin-Inside-Outside (IOB)
tags to mark and classify Named Entites.
We did not filter out the different categories of NEs from Ancora (weak and strong).
We did 6 minor edits by hand.
AnCora corpus is used under [CC-by] (https://creativecommons.org/licenses/by/4.0/) licence.
This dataset was developed by BSC TeMU as part of the AINA project, and to enrich the Catalan Language Understanding Benchmark (CLUB).
"""
_HOMEPAGE = """https://zenodo.org/record/4762031"""
_URL = "https://huggingface.co/datasets/anishka/CodeSwitching-TE-EN/resolve/main/"
_TRAINING_FILE = "te_en-code_switch-train.conllu"
_DEV_FILE = "te_en-code_switch-dev.conllu"
_TEST_FILE = "te_en-code_switch-test.conllu"
class AncoraCaNerConfig(datasets.BuilderConfig):
""" Builder config for the Ancora Ca NER dataset """
def __init__(self, **kwargs):
"""BuilderConfig for AncoraCaNer.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(AncoraCaNerConfig, self).__init__(**kwargs)
class AncoraCaNer(datasets.GeneratorBasedBuilder):
""" AncoraCaNer dataset."""
BUILDER_CONFIGS = [
AncoraCaNerConfig(
name="AncoraCaNer",
version=datasets.Version("2.0.0"),
description="AncoraCaNer dataset"
),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"idx": datasets.Value("string"),
"text": datasets.Sequence(datasets.Value("string")),
"upos": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"NOUN",
"PUNCT",
"ADP",
"NUM",
"SYM",
"SCONJ",
"ADJ",
"PART",
"DET",
"CCONJ",
"PROPN",
"PRON",
"X",
"_",
"ADV",
"INTJ",
"VERB",
"AUX",
]
)
),
"xpos": datasets.Sequence(datasets.Value("string")),
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
urls_to_download = {
"train": f"{_URL}{_TRAINING_FILE}",
"dev": f"{_URL}{_DEV_FILE}",
"test": f"{_URL}{_TEST_FILE}",
}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
]
def _generate_examples(self, filepath):
logger.info("⏳ Generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as f:
guid = 0
tokens = []
pos_tags = []
for line in f:
if line.startswith("-DOCSTART-") or line == "" or line == "\n" or line.startswith("#"):
if tokens:
yield guid, {
"idx": str(guid),
"text": tokens,
"upos": pos_tags,
"xpos": pos_tags,
}
guid += 1
tokens = []
pos_tags = []
else:
# AncoraCaNer tokens are space separated
splits = line.split('\t')
tokens.append(splits[1])
pos_tags.append(splits[3].rstrip())
# last example
yield guid, {
"idx": str(guid),
"text": tokens,
"upos": pos_tags,
"xpos": pos_tags,
}
|