Datasets:

ArXiv:
License:
File size: 3,987 Bytes
6823c0d
 
 
850dcfd
6823c0d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
850dcfd
6823c0d
 
850dcfd
6823c0d
 
 
 
 
 
 
 
 
 
 
 
 
850dcfd
6823c0d
 
 
850dcfd
 
87615ea
 
 
 
850dcfd
6823c0d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
850dcfd
6823c0d
 
 
 
 
 
 
 
 
 
 
 
850dcfd
 
 
 
6823c0d
 
850dcfd
 
 
 
 
 
 
 
 
 
 
6823c0d
 
 
850dcfd
 
 
 
 
 
 
 
6823c0d
 
850dcfd
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
import datasets
from pathlib import Path
import pandas as pd
from sklearn.model_selection import train_test_split

_DESCRIPTION = """\
MIMICause Dataset: A dataset for representation and automatic extraction of causal relation types from clinical notes.
The dataset has 2714 samples having both explicit and implicit causality in which entities are in the same sentence or different sentences.
The dataset has following nine semantic causal relations (with directionality) between entitities E1 and E2 in a text snippet:
(1) Cause(E1,E2)
(2) Cause(E2,E1)
(3) Enable(E1,E2)
(4) Enable(E2,E1)
(5) Prevent(E1,E2)
(6) Prevent(E2,E1)
(7) Hinder(E1,E2)
(8) Hinder(E2,E1)
(9) Other
"""

_DOWNLOAD_URL = "https://portal.dbmi.hms.harvard.edu/projects/n2c2-nlp/"

_CITATION = """\
@inproceedings{khetan-etal-2022-mimicause,
    title={MIMICause: Representation and automatic extraction of causal relation types from clinical notes},
    author={Vivek Khetan and Md Imbesat Hassan Rizvi and Jessica Huber and Paige Bartusiak and Bogdan Sacaleanu and Andrew Fano},
    booktitle ={Findings of the Association for Computational Linguistics: ACL 2022},
    month={may},
    year={2022},
    publisher={Association for Computational Linguistics},
    address={Dublin, The Republic of Ireland},
    url={},
    doi={},
    pages={},
}
"""


class MIMICAUSE(datasets.GeneratorBasedBuilder):

    VERSION = datasets.utils.Version("1.0.0")
    manual_download_instructions = (
        "The MIMICause dataset requires manual download of the mimicause.zip "
        "file from the Community Annotations Downloads of the DBMI Data Portal"
        f" ({_DOWNLOAD_URL}) after signing their agreement forms, which is a "
        "quick and easy procedure. Then provide the path of the downloaded "
        "mimicause.zip file."
    )

    def _info(self):

        features = datasets.Features(
            {
                "E1": datasets.Value("string"),
                "E2": datasets.Value("string"),
                "Text": datasets.Value("large_string"),
                "Label": datasets.features.ClassLabel(
                    names=[
                        "Cause(E1,E2)",
                        "Cause(E2,E1)",
                        "Enable(E1,E2)",
                        "Enable(E2,E1)",
                        "Prevent(E1,E2)",
                        "Prevent(E2,E1)",
                        "Hinder(E1,E2)",
                        "Hinder(E2,E1)",
                        "Other",
                    ],
                ),
            }
        )

        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            supervised_keys=None,
            homepage=_DOWNLOAD_URL,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        r"""
        The dataset is split first in 90:10 ratio. The 90% split is further split 
        in 80:20 ratio. Thus the final split ratio is Train:Val:Test :: 72:18:10.
        """

        filepath = dl_manager.download_and_extract(dl_manager.manual_dir)
        filepath = Path(filepath) / "Annotations.csv"

        data_df = pd.read_csv(filepath)
        data_df = data_df.fillna("")

        train_df, test_df = train_test_split(
            data_df, test_size=0.1, stratify=data_df.Label, random_state=42
        )
        train_df, val_df = train_test_split(
            train_df, test_size=0.2, stratify=train_df.Label, random_state=42
        )

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN, gen_kwargs={"df": train_df}
            ),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION, gen_kwargs={"df": val_df}
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST, gen_kwargs={"df": test_df}
            ),
        ]

    def _generate_examples(self, df):
        for idx, row in df.iterrows():
            yield idx, row.to_dict()