|
|
|
import datasets |
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
_CITATION = """ """ |
|
|
|
_DESCRIPTION = """Telugu English POS Codeswitch dataset. |
|
""" |
|
|
|
_HOMEPAGE = "" |
|
|
|
_URL = "https://huggingface.co/datasets/anishka/CodeSwitching-TE-EN/blob/main/" |
|
_TRAINING_FILE = "TWT-train.conllu" |
|
_DEV_FILE = "TWT-dev.conllu" |
|
_TEST_FILE = "TWT-test.conllu" |
|
|
|
|
|
class TeEnCodeSwitchConfig(datasets.BuilderConfig): |
|
""" Builder config for the Ancora Ca NER dataset """ |
|
|
|
def __init__(self, **kwargs): |
|
"""BuilderConfig for TeEnCodeSwitch. |
|
Args: |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super(TeEnCodeSwitchConfig, self).__init__(**kwargs) |
|
|
|
|
|
class TeEnCodeSwitch(datasets.GeneratorBasedBuilder): |
|
""" Te-En-CodeSwitch dataset.""" |
|
|
|
BUILDER_CONFIGS = [ |
|
TeEnCodeSwitchConfig( |
|
name="Te-En-CodeSwitch", |
|
version=datasets.Version("0.0.1"), |
|
description="Te-En-CodeSwitch dataset" |
|
), |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"id": datasets.Value("string"), |
|
"tokens": datasets.Sequence(datasets.Value("string")), |
|
"ner_tags": datasets.Sequence( |
|
datasets.features.ClassLabel( |
|
names=[ |
|
"NOUN", |
|
"PUNCT", |
|
"ADP", |
|
"NUM", |
|
"SYM", |
|
"SCONJ", |
|
"ADJ", |
|
"PART", |
|
"DET", |
|
"CCONJ", |
|
"PROPN", |
|
"PRON", |
|
"X", |
|
"_", |
|
"ADV", |
|
"INTJ", |
|
"VERB", |
|
"AUX", |
|
] |
|
) |
|
), |
|
"xpos": datasets.Sequence(datasets.Value("string")), |
|
"feats": datasets.Sequence(datasets.Value("string")), |
|
"head": datasets.Sequence(datasets.Value("string")), |
|
"deprel": datasets.Sequence(datasets.Value("string")), |
|
"deps": datasets.Sequence(datasets.Value("string")), |
|
"misc": datasets.Sequence(datasets.Value("string")), |
|
} |
|
), |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
urls_to_download = { |
|
"train": f"{_URL}{_TRAINING_FILE}", |
|
"dev": f"{_URL}{_DEV_FILE}", |
|
"test": f"{_URL}{_TEST_FILE}", |
|
} |
|
downloaded_files = dl_manager.download_and_extract(urls_to_download) |
|
print ("Downloading files: ") |
|
print (urls_to_download) |
|
|
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}), |
|
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}), |
|
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}), |
|
] |
|
|
|
def _generate_examples(self, filepath): |
|
id = 0 |
|
for path in filepath: |
|
with open(path, "r", encoding="utf-8") as data_file: |
|
tokenlist = list(conllu.parse_incr(data_file)) |
|
for sent in tokenlist: |
|
if "sent_id" in sent.metadata: |
|
idx = sent.metadata["sent_id"] |
|
else: |
|
idx = id |
|
|
|
tokens = [token["form"] for token in sent] |
|
|
|
if "text" in sent.metadata: |
|
txt = sent.metadata["text"] |
|
else: |
|
txt = " ".join(tokens) |
|
|
|
yield id, { |
|
"idx": str(idx), |
|
"text": txt, |
|
"tokens": [token["form"] for token in sent], |
|
"lemmas": [token["lemma"] for token in sent], |
|
"upos": [token["upos"] for token in sent], |
|
"xpos": [token["xpos"] for token in sent], |
|
"feats": [str(token["feats"]) for token in sent], |
|
"head": [str(token["head"]) for token in sent], |
|
"deprel": [str(token["deprel"]) for token in sent], |
|
"deps": [str(token["deps"]) for token in sent], |
|
"misc": [str(token["misc"]) for token in sent], |
|
} |
|
id += 1 |
|
|