imbesat-rizvi
commited on
Commit
•
850dcfd
1
Parent(s):
6823c0d
created Train, Validation and Test dataset split
Browse files- dataset_info.json +1 -1
- mimicause.py +36 -17
dataset_info.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"description": "MIMICause Dataset: A dataset for representation and automatic extraction of causal relation types from clinical notes.\nThe dataset has 2714 samples having both explicit and implicit causality in which entities are in the same sentence or different sentences.\nThe dataset has following nine semantic causal relations (with directionality) between entitities E1 and E2 in a text snippet:\n(1) Cause(E1,E2)\n(2) Cause(E2,E1)\n(3) Enable(E1,E2)\n(4) Enable(E2,E1)\n(5) Prevent(E1,E2)\n(6) Prevent(E2,E1)\n(7) Hinder(E1,E2)\n(8) Hinder(E2,E1)\n(9) Other\n", "citation": "@inproceedings{khetan-etal-
|
|
|
1 |
+
{"description": "MIMICause Dataset: A dataset for representation and automatic extraction of causal relation types from clinical notes.\nThe dataset has 2714 samples having both explicit and implicit causality in which entities are in the same sentence or different sentences.\nThe dataset has following nine semantic causal relations (with directionality) between entitities E1 and E2 in a text snippet:\n(1) Cause(E1,E2)\n(2) Cause(E2,E1)\n(3) Enable(E1,E2)\n(4) Enable(E2,E1)\n(5) Prevent(E1,E2)\n(6) Prevent(E2,E1)\n(7) Hinder(E1,E2)\n(8) Hinder(E2,E1)\n(9) Other\n", "citation": "@inproceedings{khetan-etal-2022-mimicause,\n title={MIMICause: Representation and automatic extraction of causal relation types from clinical notes},\n author={Vivek Khetan and Md Imbesat Hassan Rizvi and Jessica Huber and Paige Bartusiak and Bogdan Sacaleanu and Andrew Fano},\n booktitle ={Findings of the Association for Computational Linguistics: ACL 2022},\n month={may},\n year={2022},\n publisher={Association for Computational Linguistics},\n address={Dublin, The Republic of Ireland},\n url={},\n doi={},\n pages={},\n}\n", "homepage": "https://portal.dbmi.hms.harvard.edu/projects/n2c2-nlp/", "license": "", "features": {"E1": {"dtype": "string", "id": null, "_type": "Value"}, "E2": {"dtype": "string", "id": null, "_type": "Value"}, "Text": {"dtype": "large_string", "id": null, "_type": "Value"}, "Label": {"num_classes": 9, "names": ["Cause(E1,E2)", "Cause(E2,E1)", "Enable(E1,E2)", "Enable(E2,E1)", "Prevent(E1,E2)", "Prevent(E2,E1)", "Hinder(E1,E2)", "Hinder(E2,E1)", "Other"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "mimicause", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 366250, "num_examples": 1953, "dataset_name": "mimicause"}, "validation": {"name": "validation", "num_bytes": 91323, "num_examples": 489, "dataset_name": "mimicause"}, "test": {"name": "test", "num_bytes": 52856, "num_examples": 272, "dataset_name": "mimicause"}}, "download_checksums": {"https://portal.dbmi.hms.harvard.edu/projects/n2c2-nlp/mimicause.zip": {"num_bytes": 333362, "checksum": "00c4d30abc9bede6dfb79cebf41a838e92cffb1204c94de320f0be8fda4c358b"}}, "download_size": 333362, "post_processing_size": null, "dataset_size": 510429, "size_in_bytes": 843791}
|
mimicause.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
import datasets
|
2 |
from pathlib import Path
|
3 |
import pandas as pd
|
|
|
4 |
|
5 |
_DESCRIPTION = """\
|
6 |
MIMICause Dataset: A dataset for representation and automatic extraction of causal relation types from clinical notes.
|
@@ -17,10 +18,10 @@ The dataset has following nine semantic causal relations (with directionality) b
|
|
17 |
(9) Other
|
18 |
"""
|
19 |
|
20 |
-
_DOWNLOAD_URL = "https://portal.dbmi.hms.harvard.edu/projects/n2c2-nlp/
|
21 |
|
22 |
_CITATION = """\
|
23 |
-
@inproceedings{khetan-etal-
|
24 |
title={MIMICause: Representation and automatic extraction of causal relation types from clinical notes},
|
25 |
author={Vivek Khetan and Md Imbesat Hassan Rizvi and Jessica Huber and Paige Bartusiak and Bogdan Sacaleanu and Andrew Fano},
|
26 |
booktitle ={Findings of the Association for Computational Linguistics: ACL 2022},
|
@@ -34,14 +35,16 @@ _CITATION = """\
|
|
34 |
}
|
35 |
"""
|
36 |
|
|
|
37 |
class MIMICAUSE(datasets.GeneratorBasedBuilder):
|
38 |
|
39 |
VERSION = datasets.utils.Version("1.0.0")
|
40 |
-
manual_download_instructions =
|
41 |
-
"The MIMICause dataset requires manual download of the mimicause.zip "
|
42 |
-
f"file from the DBMI Data Portal ({_DOWNLOAD_URL}) after signing their"
|
43 |
-
" agreement forms, which is a quick and easy procedure. Then provide "
|
44 |
"the path of the downloaded mimicause.zip file."
|
|
|
45 |
|
46 |
def _info(self):
|
47 |
|
@@ -62,7 +65,7 @@ class MIMICAUSE(datasets.GeneratorBasedBuilder):
|
|
62 |
"Hinder(E2,E1)",
|
63 |
"Other",
|
64 |
],
|
65 |
-
),
|
66 |
}
|
67 |
)
|
68 |
|
@@ -75,20 +78,36 @@ class MIMICAUSE(datasets.GeneratorBasedBuilder):
|
|
75 |
)
|
76 |
|
77 |
def _split_generators(self, dl_manager):
|
|
|
|
|
|
|
|
|
78 |
|
79 |
filepath = dl_manager.download_and_extract(dl_manager.manual_dir)
|
80 |
-
filepath
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
81 |
|
82 |
return [
|
83 |
datasets.SplitGenerator(
|
84 |
-
name=datasets.Split.TRAIN,
|
85 |
-
|
86 |
-
|
|
|
|
|
|
|
|
|
|
|
87 |
]
|
88 |
|
89 |
-
def _generate_examples(self,
|
90 |
-
|
91 |
-
|
92 |
-
data_df = data_df.fillna("")
|
93 |
-
for idx, row in data_df.iterrows():
|
94 |
-
yield idx, row.to_dict()
|
|
|
1 |
import datasets
|
2 |
from pathlib import Path
|
3 |
import pandas as pd
|
4 |
+
from sklearn.model_selection import train_test_split
|
5 |
|
6 |
_DESCRIPTION = """\
|
7 |
MIMICause Dataset: A dataset for representation and automatic extraction of causal relation types from clinical notes.
|
|
|
18 |
(9) Other
|
19 |
"""
|
20 |
|
21 |
+
_DOWNLOAD_URL = "https://portal.dbmi.hms.harvard.edu/projects/n2c2-nlp/"
|
22 |
|
23 |
_CITATION = """\
|
24 |
+
@inproceedings{khetan-etal-2022-mimicause,
|
25 |
title={MIMICause: Representation and automatic extraction of causal relation types from clinical notes},
|
26 |
author={Vivek Khetan and Md Imbesat Hassan Rizvi and Jessica Huber and Paige Bartusiak and Bogdan Sacaleanu and Andrew Fano},
|
27 |
booktitle ={Findings of the Association for Computational Linguistics: ACL 2022},
|
|
|
35 |
}
|
36 |
"""
|
37 |
|
38 |
+
|
39 |
class MIMICAUSE(datasets.GeneratorBasedBuilder):
|
40 |
|
41 |
VERSION = datasets.utils.Version("1.0.0")
|
42 |
+
manual_download_instructions = (
|
43 |
+
"The MIMICause dataset requires manual download of the mimicause.zip "
|
44 |
+
f"file from the DBMI Data Portal ({_DOWNLOAD_URL}) after signing their"
|
45 |
+
" agreement forms, which is a quick and easy procedure. Then provide "
|
46 |
"the path of the downloaded mimicause.zip file."
|
47 |
+
)
|
48 |
|
49 |
def _info(self):
|
50 |
|
|
|
65 |
"Hinder(E2,E1)",
|
66 |
"Other",
|
67 |
],
|
68 |
+
),
|
69 |
}
|
70 |
)
|
71 |
|
|
|
78 |
)
|
79 |
|
80 |
def _split_generators(self, dl_manager):
|
81 |
+
r"""
|
82 |
+
The dataset is split first in 90:10 ratio. The 90% split is further split
|
83 |
+
in 80:20 ratio. Thus the final split ratio is Train:Val:Test :: 72:18:10.
|
84 |
+
"""
|
85 |
|
86 |
filepath = dl_manager.download_and_extract(dl_manager.manual_dir)
|
87 |
+
filepath = Path(filepath) / "Annotations.csv"
|
88 |
+
|
89 |
+
data_df = pd.read_csv(filepath)
|
90 |
+
data_df = data_df.fillna("")
|
91 |
+
|
92 |
+
train_df, test_df = train_test_split(
|
93 |
+
data_df, test_size=0.1, stratify=data_df.Label, random_state=42
|
94 |
+
)
|
95 |
+
train_df, val_df = train_test_split(
|
96 |
+
train_df, test_size=0.2, stratify=train_df.Label, random_state=42
|
97 |
+
)
|
98 |
|
99 |
return [
|
100 |
datasets.SplitGenerator(
|
101 |
+
name=datasets.Split.TRAIN, gen_kwargs={"df": train_df}
|
102 |
+
),
|
103 |
+
datasets.SplitGenerator(
|
104 |
+
name=datasets.Split.VALIDATION, gen_kwargs={"df": val_df}
|
105 |
+
),
|
106 |
+
datasets.SplitGenerator(
|
107 |
+
name=datasets.Split.TEST, gen_kwargs={"df": test_df}
|
108 |
+
),
|
109 |
]
|
110 |
|
111 |
+
def _generate_examples(self, df):
|
112 |
+
for idx, row in df.iterrows():
|
113 |
+
yield idx, row.to_dict()
|
|
|
|
|
|