Update Mimic4Dataset.py
Browse files- Mimic4Dataset.py +12 -5
Mimic4Dataset.py
CHANGED
@@ -398,11 +398,17 @@ class Mimic4Dataset(datasets.GeneratorBasedBuilder):
|
|
398 |
data=data.T
|
399 |
train_data, test_data = train_test_split(data, test_size=self.test_size, random_state=42)
|
400 |
train_data, val_data = train_test_split(test_data, test_size=self.val_size, random_state=42)
|
|
|
401 |
csv_dir = "./data/dict/"+self.config.name.replace(" ","_")
|
402 |
|
403 |
train_data.to_csv(csv_dir+'/train_data.csv',index=False)
|
404 |
val_data.to_csv(csv_dir+'/val_data.csv',index=False)
|
405 |
test_data.to_csv(csv_dir+'/test_data.csv',index=False)
|
|
|
|
|
|
|
|
|
|
|
406 |
return feat_cond, feat_chart, feat_proc, feat_meds, feat_out
|
407 |
|
408 |
###########################################################RAW##################################################################
|
@@ -470,14 +476,15 @@ class Mimic4Dataset(datasets.GeneratorBasedBuilder):
|
|
470 |
csv_dir = "./data/dict/"+self.config.name.replace(" ","_")
|
471 |
|
472 |
return [
|
473 |
-
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": csv_dir+'/train_data.
|
474 |
-
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": csv_dir+'/val_data.
|
475 |
-
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": csv_dir+'/test_data.
|
476 |
]
|
477 |
|
478 |
def _generate_examples_raw(self, filepath):
|
479 |
-
|
480 |
-
|
|
|
481 |
proc_features = data['Proc']
|
482 |
chart_features = data['Chart']
|
483 |
meds_features = data['Med']
|
|
|
398 |
data=data.T
|
399 |
train_data, test_data = train_test_split(data, test_size=self.test_size, random_state=42)
|
400 |
train_data, val_data = train_test_split(test_data, test_size=self.val_size, random_state=42)
|
401 |
+
|
402 |
csv_dir = "./data/dict/"+self.config.name.replace(" ","_")
|
403 |
|
404 |
train_data.to_csv(csv_dir+'/train_data.csv',index=False)
|
405 |
val_data.to_csv(csv_dir+'/val_data.csv',index=False)
|
406 |
test_data.to_csv(csv_dir+'/test_data.csv',index=False)
|
407 |
+
|
408 |
+
train_data.to_pickle(csv_dir+'/train_data.pkl',index=False)
|
409 |
+
val_data.to_pickle(csv_dir+'/val_data.pkl',index=False)
|
410 |
+
test_data.to_pickle(csv_dir+'/test_data.pkl',index=False)
|
411 |
+
|
412 |
return feat_cond, feat_chart, feat_proc, feat_meds, feat_out
|
413 |
|
414 |
###########################################################RAW##################################################################
|
|
|
476 |
csv_dir = "./data/dict/"+self.config.name.replace(" ","_")
|
477 |
|
478 |
return [
|
479 |
+
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": csv_dir+'/train_data.pkl'}),
|
480 |
+
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": csv_dir+'/val_data.pkl'}),
|
481 |
+
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": csv_dir+'/test_data.pkl'}),
|
482 |
]
|
483 |
|
484 |
def _generate_examples_raw(self, filepath):
|
485 |
+
with open(filepath, 'rb') as fp:
|
486 |
+
dataDic = pickle.load(fp)
|
487 |
+
for hid, data in dataDic.items():
|
488 |
proc_features = data['Proc']
|
489 |
chart_features = data['Chart']
|
490 |
meds_features = data['Med']
|