File size: 3,604 Bytes
aced5d2 a13b715 aced5d2 a13b715 aced5d2 a13b715 aced5d2 a13b715 aced5d2 a13b715 aced5d2 3157f1b a13b715 aced5d2 a13b715 aced5d2 a13b715 aced5d2 a13b715 aced5d2 a13b715 aced5d2 a13b715 aced5d2 a13b715 aced5d2 a13b715 aced5d2 a13b715 aced5d2 a13b715 aced5d2 a13b715 aced5d2 a13b715 aced5d2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 |
# Loading script for the Telugu-English Codeswitch Transliterate dataset
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """ """
_DESCRIPTION = """Telugu English POS Codeswitch dataset.
"""
_HOMEPAGE = """https://zenodo.org/record/4762031"""
_URL = "https://huggingface.co/datasets/anishka/CodeSwitching-TE-EN/resolve/main/"
_TRAINING_FILE = "TE_EN.conllu"
class TeEnCodeSwitchConfig(datasets.BuilderConfig):
""" Builder config for the Ancora Ca NER dataset """
def __init__(self, **kwargs):
"""BuilderConfig for TeEnCodeSwitch.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(TeEnCodeSwitchConfig, self).__init__(**kwargs)
class TeEnCodeSwitch(datasets.GeneratorBasedBuilder):
""" Te-En-CodeSwitch dataset."""
BUILDER_CONFIGS = [
TeEnCodeSwitchConfig(
name="Te-En-CodeSwitch",
version=datasets.Version("0.0.1"),
description="Te-En-CodeSwitch dataset"
),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"B-LOC",
"B-MISC",
"B-ORG",
"B-PER",
"I-LOC",
"I-MISC",
"I-ORG",
"I-PER",
"O"
]
)
),
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
urls_to_download = {
"train": f"{_URL}{_TRAINING_FILE}",
}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
print ("Downloading files: ")
print (urls_to_download)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
]
def _generate_examples(self, filepath):
logger.info("⏳ Generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as f:
guid = 0
tokens = []
ner_tags = []
for line in f:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
if tokens:
yield guid, {
"id": str(guid),
"tokens": tokens,
"ner_tags": ner_tags,
}
guid += 1
tokens = []
ner_tags = []
else:
# TeEnCodeSwitch tokens are space separated
splits = line.split('\t')
tokens.append(splits[0])
ner_tags.append(splits[1].rstrip())
# last example
yield guid, {
"id": str(guid),
"tokens": tokens,
"ner_tags": ner_tags,
}
|