Datasets:

ArXiv:
License:
CrossSum / CrossSum.py
abhik1505040's picture
Update CrossSum.py
4ba594d verified
raw
history blame
4.55 kB
"""CrossSum cross-lingual abstractive summarization dataset."""
import json
import os
import datasets
_CITATION = """\
@article{hasan2021crosssum,
author = {Tahmid Hasan and Abhik Bhattacharjee and Wasi Uddin Ahmad and Yuan-Fang Li and Yong-bin Kang and Rifat Shahriyar},
title = {CrossSum: Beyond English-Centric Cross-Lingual Abstractive Text Summarization for 1500+ Language Pairs},
journal = {CoRR},
volume = {abs/2112.08804},
year = {2021},
url = {https://arxiv.org/abs/2112.08804},
eprinttype = {arXiv},
eprint = {2112.08804}
}
"""
_DESCRIPTION = """\
We present CrossSum, a large-scale dataset
comprising 1.70 million cross-lingual article summary samples in 1500+ language-pairs
constituting 45 languages. We use the multilingual XL-Sum dataset and align identical
articles written in different languages via crosslingual retrieval using a language-agnostic
representation model.
"""
_HOMEPAGE = "https://github.com/csebuetnlp/CrossSum"
_LICENSE = "Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License (CC BY-NC-SA 4.0)"
_URL = "https://huggingface.co/datasets/csebuetnlp/CrossSum/resolve/main/data/{}-{}_CrossSum.tar.bz2"
_LANGUAGES = [
"oromo",
"french",
"amharic",
"arabic",
"azerbaijani",
"bengali",
"burmese",
"chinese_simplified",
"chinese_traditional",
"welsh",
"english",
"kirundi",
"gujarati",
"hausa",
"hindi",
"igbo",
"indonesian",
"japanese",
"korean",
"kyrgyz",
"marathi",
"spanish",
"scottish_gaelic",
"nepali",
"pashto",
"persian",
"pidgin",
"portuguese",
"punjabi",
"russian",
"serbian_cyrillic",
"serbian_latin",
"sinhala",
"somali",
"swahili",
"tamil",
"telugu",
"thai",
"tigrinya",
"turkish",
"ukrainian",
"urdu",
"uzbek",
"vietnamese",
"yoruba",
]
class Crosssum(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="{}-{}".format(src_lang, tgt_lang),
version=datasets.Version("1.0.0")
)
for src_lang in _LANGUAGES
for tgt_lang in _LANGUAGES
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"source_url": datasets.Value("string"),
"target_url": datasets.Value("string"),
"summary": datasets.Value("string"),
"text": datasets.Value("string"),
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
license=_LICENSE,
version=self.VERSION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
lang_pairs = str(self.config.name)
url = _URL.format(*lang_pairs.split("-"))
data_dir = dl_manager.download_and_extract(url)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": os.path.join(data_dir, lang_pairs + "_train.jsonl"),
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": os.path.join(data_dir, lang_pairs + "_test.jsonl"),
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": os.path.join(data_dir, lang_pairs + "_val.jsonl"),
},
),
]
def _generate_examples(self, filepath):
"""Yields examples as (key, example) tuples."""
with open(filepath, encoding="utf-8") as f:
idx_ = -1
for idx_, row in enumerate(f):
data = json.loads(row)
yield idx_, {
"source_url": data["source_url"],
"target_url": data["target_url"],
"summary": data["summary"],
"text": data["text"],
}
# empty splits for extremely low-resource pairs
if idx_ == -1:
yield 0, {
"source_url": "",
"target_url": "",
"summary": "",
"text": "",
}