CSMD / CSMD.py
davebulaval's picture
Update CSMD.py
a0f767c
raw
history blame
6.57 kB
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CSMD: a dataset for assessing meaning preservation between sentences"""
import csv
import datasets
from datasets import load_dataset
_CITATION = """\
@ARTICLE{10.3389/frai.2023.1223924,
AUTHOR={Beauchemin, David and Saggion, Horacio and Khoury, Richard},
TITLE={{MeaningBERT: Assessing Meaning Preservation Between Sentences}},
JOURNAL={Frontiers in Artificial Intelligence},
VOLUME={6},
YEAR={2023},
URL={https://www.frontiersin.org/articles/10.3389/frai.2023.1223924},
DOI={10.3389/frai.2023.1223924},
ISSN={2624-8212},
}
"""
_DESCRIPTION = """\
Continuous Scale Meaning Dataset (CSMD) is a dataset for assessing meaning preservation between sentences.
"""
_HOMEPAGE = "https://github.com/GRAAL-Research/csmd"
_LICENSE = "Attribution 4.0 International (CC BY 4.0)"
_URL_LIST = [
(
"meaning.train",
"https://github.com/GRAAL-Research/csmd/blob/main/dataset/meaning/train.tsv",
),
(
"meaning.dev",
"https://github.com/GRAAL-Research/csmd/blob/main/dataset/meaning/dev.tsv",
),
(
"meaning.test",
"https://github.com/GRAAL-Research/csmd/blob/main/dataset/meaning/test.tsv",
),
(
"meaning_with_data_augmentation.train",
"https://github.com/GRAAL-Research/csmd/blob/main/dataset/meaning_with_data_augmentation/train.tsv",
),
(
"meaning_with_data_augmentation.dev",
"https://github.com/GRAAL-Research/csmd/blob/main/dataset/meaning_with_data_augmentation/dev.tsv",
),
(
"meaning_with_data_augmentation.test",
"https://github.com/GRAAL-Research/csmd/blob/main/dataset/meaning_with_data_augmentation/test.tsv",
),
(
"identical",
"https://github.com/GRAAL-Research/csmd/blob/main/dataset/holdout/identical.tsv",
),
(
"unrelated",
"https://github.com/GRAAL-Research/csmd/blob/main/dataset/holdout/unrelated.tsv",
),
]
_URLs = dict(_URL_LIST)
class CSMD(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("2.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="meaning",
version=VERSION,
description="An instance consists of 1,355 meaning preservation triplets (Document, simplification, "
"label).",
),
datasets.BuilderConfig(
name="meaning_with_data_augmentation",
version=VERSION,
description="An instance consists of 1,355 meaning preservation triplets (Document, simplification, label) "
"along with 1,355 data augmentation triplets (Document, Document, 1) and 1,355 data "
"augmentation triplets (Document, Unrelated Document, 0) (See the sanity checks in our "
"article).",
),
datasets.BuilderConfig(
name="meaning_holdout_identical",
version=VERSION,
description="An instance consists of 359 meaning holdout preservation identical triplets (Document, "
"Document, 1) based on the ASSET Simplification dataset.",
),
datasets.BuilderConfig(
name="meaning_holdout_unrelated",
version=VERSION,
description="An instance consists of 359 meaning holdout preservation unrelated triplets (Document, "
"Unrelated Document, 0) based on the ASSET Simplification dataset.",
),
]
DEFAULT_CONFIG_NAME = "meaning"
def _info(self):
features = datasets.Features(
{
"document": datasets.Value(dtype="string"),
"simplification": datasets.Value(dtype="string"),
"labels": datasets.Value(dtype="string"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
data_dir = dl_manager.download_and_extract(_URLs)
if self.config.name in ("meaning", "meaning_with_data_augmentation"):
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepaths": data_dir,
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepaths": data_dir,
"split": "valid",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepaths": data_dir, "split": "test"},
),
]
elif self.config.name in ("identical", "unrelated"):
return [
datasets.SplitGenerator(
name=f"{self.config.name}_{datasets.Split.TEST}",
gen_kwargs={
"filepaths": data_dir,
"split": "test",
},
),
]
def _generate_examples(self, filepaths, split):
with open(filepaths[split], encoding="utf-8") as f:
reader = csv.reader(f, delimiter="\t")
for id_, row in enumerate(reader):
if id_ == 0:
# Columns header
keys = row[:]
else:
res = dict([(k, v) for k, v in zip(keys, row)])
for k in ["document", "simplification", "labels"]:
res[k] = int(res[k])
yield (
id_ - 1
), res # Minus 1, since first idx is the columns header