|
import datasets |
|
from pathlib import Path |
|
import pandas as pd |
|
from sklearn.model_selection import train_test_split |
|
|
|
_DESCRIPTION = """\ |
|
MIMICause Dataset: A dataset for representation and automatic extraction of causal relation types from clinical notes. |
|
The dataset has 2714 samples having both explicit and implicit causality in which entities are in the same sentence or different sentences. |
|
The dataset has following nine semantic causal relations (with directionality) between entitities E1 and E2 in a text snippet: |
|
(1) Cause(E1,E2) |
|
(2) Cause(E2,E1) |
|
(3) Enable(E1,E2) |
|
(4) Enable(E2,E1) |
|
(5) Prevent(E1,E2) |
|
(6) Prevent(E2,E1) |
|
(7) Hinder(E1,E2) |
|
(8) Hinder(E2,E1) |
|
(9) Other |
|
""" |
|
|
|
_DOWNLOAD_URL = "https://portal.dbmi.hms.harvard.edu/projects/n2c2-nlp/" |
|
|
|
_CITATION = """\ |
|
@inproceedings{khetan-etal-2022-mimicause, |
|
title={MIMICause: Representation and automatic extraction of causal relation types from clinical notes}, |
|
author={Vivek Khetan and Md Imbesat Hassan Rizvi and Jessica Huber and Paige Bartusiak and Bogdan Sacaleanu and Andrew Fano}, |
|
booktitle ={Findings of the Association for Computational Linguistics: ACL 2022}, |
|
month={may}, |
|
year={2022}, |
|
publisher={Association for Computational Linguistics}, |
|
address={Dublin, The Republic of Ireland}, |
|
url={}, |
|
doi={}, |
|
pages={}, |
|
} |
|
""" |
|
|
|
|
|
class MIMICAUSE(datasets.GeneratorBasedBuilder): |
|
|
|
VERSION = datasets.utils.Version("1.0.0") |
|
manual_download_instructions = ( |
|
"The MIMICause dataset requires manual download of the mimicause.zip " |
|
"file from the Community Annotations Downloads of the DBMI Data Portal" |
|
f" ({_DOWNLOAD_URL}) after signing their agreement forms, which is a " |
|
"quick and easy procedure. Then provide the path of the downloaded " |
|
"mimicause.zip file." |
|
) |
|
|
|
def _info(self): |
|
|
|
features = datasets.Features( |
|
{ |
|
"E1": datasets.Value("string"), |
|
"E2": datasets.Value("string"), |
|
"Text": datasets.Value("large_string"), |
|
"Label": datasets.features.ClassLabel( |
|
names=[ |
|
"Cause(E1,E2)", |
|
"Cause(E2,E1)", |
|
"Enable(E1,E2)", |
|
"Enable(E2,E1)", |
|
"Prevent(E1,E2)", |
|
"Prevent(E2,E1)", |
|
"Hinder(E1,E2)", |
|
"Hinder(E2,E1)", |
|
"Other", |
|
], |
|
), |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
supervised_keys=None, |
|
homepage=_DOWNLOAD_URL, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
r""" |
|
The dataset is split first in 90:10 ratio. The 90% split is further split |
|
in 80:20 ratio. Thus the final split ratio is Train:Val:Test :: 72:18:10. |
|
""" |
|
|
|
filepath = dl_manager.download_and_extract(dl_manager.manual_dir) |
|
filepath = Path(filepath) / "Annotations.csv" |
|
|
|
data_df = pd.read_csv(filepath) |
|
data_df = data_df.fillna("") |
|
|
|
train_df, test_df = train_test_split( |
|
data_df, test_size=0.1, stratify=data_df.Label, random_state=42 |
|
) |
|
train_df, val_df = train_test_split( |
|
train_df, test_size=0.2, stratify=train_df.Label, random_state=42 |
|
) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, gen_kwargs={"df": train_df} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, gen_kwargs={"df": val_df} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, gen_kwargs={"df": test_df} |
|
), |
|
] |
|
|
|
def _generate_examples(self, df): |
|
for idx, row in df.iterrows(): |
|
yield idx, row.to_dict() |
|
|