Datasets:
File size: 7,026 Bytes
37d39fe |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 |
import json
import os
import datasets
_CITATION = """\
@article{scialom2020mlsum,
title={MLSUM: The Multilingual Summarization Corpus},
author={Scialom, Thomas and Dray, Paul-Alexis and Lamprier, Sylvain and Piwowarski, Benjamin and Staiano, Jacopo},
journal={arXiv preprint arXiv:2004.14900},
year={2020}
}
"""
_DESCRIPTION = """\
This is the MLSUM subset of the GEM benchmark. MLSUM is the first large-scale MultiLingual SUMmarization dataset.
Obtained from online newspapers, it contains 1.5M+ article/summary pairs in five different languages -- namely, French, German, Spanish, Russian, Turkish.
Together with English newspapers from the popular CNN/Daily mail dataset, the collected data form a large scale multilingual dataset which can enable new research directions for the text summarization community.
We report cross-lingual comparative analyses based on state-of-the-art systems.
These highlight existing biases which motivate the use of a multi-lingual dataset.
"""
_URL = "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/"
_LANG = ["de", "es"]
_URLs = {
"de": {
"train": "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/de_train.zip",
"validation": "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/de_val.zip",
"test": "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/de_test.zip",
"bad_ids": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_mlsum_bad_ids_fixed.json",
"challenge_set": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/mlsum_de.zip",
},
"es": {
"train": "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/es_train.zip",
"validation": "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/es_val.zip",
"test": "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/es_test.zip",
"bad_ids": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_mlsum_bad_ids_fixed.json",
"challenge_set": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/mlsum_es.zip",
},
}
class Mlsum(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name=lang,
version=datasets.Version("1.0.0"),
description="",
)
for lang in _LANG
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"gem_id": datasets.Value("string"),
"gem_parent_id": datasets.Value("string"),
"text": datasets.Value("string"),
"topic": datasets.Value("string"),
"url": datasets.Value("string"),
"title": datasets.Value("string"),
"date": datasets.Value("string"),
"target": datasets.Value("string"),
"references": [datasets.Value("string")],
}
),
supervised_keys=None,
homepage="",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_dir = dl_manager.download_and_extract(_URLs[self.config.name])
lang = str(self.config.name)
challenge_sets = [
("challenge_train_sample", f"train_mlsum_{lang}_RandomSample500.json"),
("challenge_validation_sample", f"validation_mlsum_{lang}_RandomSample500.json"),
("challenge_test_covid", f"{lang}_test_covid19_cleaned.jsonl"),
]
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": os.path.join(dl_dir["train"], lang + "_train.jsonl"),
"split": "train",
"lang": lang,
"filepaths": dl_dir["bad_ids"],
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": os.path.join(dl_dir["validation"], lang + "_val.jsonl"),
"split": "validation",
"lang": lang,
"filepaths": dl_dir["bad_ids"],
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": os.path.join(dl_dir["test"], lang + "_test.jsonl"),
"split": "test",
"lang": lang,
"filepaths": dl_dir["bad_ids"],
},
),
] + [
datasets.SplitGenerator(
name=challenge_split,
gen_kwargs={
"filepath": os.path.join(dl_dir["challenge_set"], f"mlsum_{self.config.name}", filename),
"split": challenge_split,
},
)
for challenge_split, filename in challenge_sets
]
def _generate_examples(self, filepath, split, filepaths=None, lang=None):
"""Yields examples."""
if split in ["train", "validation", "test", "challenge_test_covid"]:
if split == "challenge_test_covid":
bad_ids = {}
else:
bad_ids_dct = json.load(open(filepaths, encoding="utf-8"))
bad_ids = dict((bad_url, True) for _, bad_url in bad_ids_dct[f"{lang}-{split}"])
with open(filepath, encoding="utf-8") as f:
id_ = -1
for line in f:
data = json.loads(line)
if data["url"] in bad_ids:
continue
else:
id_ += 1
yield id_, {
"gem_id": f"{self.config.name}-{split}-{id_}",
"gem_parent_id": f"{self.config.name}-{split}-{id_}",
"text": data["text"],
"target": data["summary"],
"references": [] if split == "train" else [data["summary"]],
"topic": data["topic"],
"url": data["url"],
"title": data["title"],
"date": data["date"],
}
else:
exples = json.load(open(filepath, encoding="utf-8"))
if isinstance(exples, dict):
assert len(exples) == 1, "multiple entries found"
exples = list(exples.values())[0]
for id_, exple in enumerate(exples):
if len(exple) == 0:
continue
exple["gem_parent_id"] = exple["gem_id"]
exple["gem_id"] = f"{self.config.name}-{split}-{id_}"
yield id_, exple
|