me_q_sum / me_q_sum.py
Clémentine
init
beb5ee9
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Covid Dialog dataset in English and Chinese"""
import copy
import os
import re
import textwrap
import datasets
# BibTeX citation
_CITATION = """\
@Inproceedings{MeQSum,
author = {Asma {Ben Abacha} and Dina Demner-Fushman},
title = {On the Summarization of Consumer Health Questions},
booktitle = {Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, ACL 2019,
Florence, Italy, July 28th - August 2},
year = {2019},
abstract = {Question understanding is one of the main challenges in question answering. In real world applications,
users often submit natural language questions that are longer than needed and include peripheral information that
increases the complexity of the question, leading to substantially more false positives in answer retrieval. In this
paper, we study neural abstractive models for medical question summarization. We introduce the MeQSum corpus of
1,000 summarized consumer health questions. We explore data augmentation methods and evaluate state-of-the-art
neural abstractive models on this new task. In particular, we show that semantic augmentation from question datasets
improves the overall performance, and that pointer-generator networks outperform sequence-to-sequence attentional
models on this task, with a ROUGE-1 score of 44.16%. We also present a detailed error analysis and discuss
directions for improvement that are specific to question summarization.}}
"""
# Official description of the dataset
_DESCRIPTION = textwrap.dedent(
"""
From "On the Summarization of Consumer Health Questions" (Abacha et al.), MeQSum is a corpus of 1,000 summarized
consumer health questions.
The following is an example from the dataset:
Question:
SUBJECT: inversion of long arm chromasome7 MESSAGE: My son has been diagnosed with inversion of long arm
chromasome 7 and down syndrome . please could you give me information on the chromasome 7 please because
our doctors have not yet mentioned it
Summary:
Where can I find information on chromosome 7?
"""
)
# Link to an official homepage for the dataset here
_HOMEPAGE = "https://worksheets.codalab.org/rest/bundles/0xd98a53314314445b96b4d703bb2d8c8c/contents/blob/"
_LICENSE = ""
import datasets
import os
import json
class MeQSum(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [datasets.BuilderConfig(name="default", version=datasets.Version("1.0.0"), description=_DESCRIPTION)]
def _info(self):
features = datasets.Features(
{
"query": datasets.Value("string"),
"answer": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
description=f"Covid Dialogue dataset, as preprocessed and shuffled in HELM",
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
test_target = dl_manager.download("test.source")
test_source = dl_manager.download("test.source")
train_source = dl_manager.download("train.source")
train_target = dl_manager.download("train.target")
val_source = dl_manager.download("val.source")
val_target = dl_manager.download("val.target")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"target": train_target, "source": train_source},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"target": val_target, "source": val_source},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"target": test_target, "source": test_source},
),
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, source, target):
with open(source, encoding="utf-8") as f_source:
with open(target, encoding="utf-8") as f_target:
for idx, (s, t) in enumerate(zip(f_source, f_target)):
yield idx, {"query": s, "answer": t}