Update Mimic4Dataset.py
Browse files- Mimic4Dataset.py +3 -25
Mimic4Dataset.py
CHANGED
@@ -200,7 +200,6 @@ def vocab(task,diag_flag,proc_flag,out_flag,chart_flag,med_flag,lab_flag):
|
|
200 |
|
201 |
return ethVocabDict,genderVocabDict,ageVocabDict,insVocabDict
|
202 |
|
203 |
-
|
204 |
def concat_data(data,task,feat_cond,feat_proc,feat_out,feat_chart,feat_meds):
|
205 |
meds=data['Med']
|
206 |
proc = data['Proc']
|
@@ -376,7 +375,6 @@ def getXY_deep(data,task,feat_cond,feat_proc,feat_out,feat_chart,feat_meds):
|
|
376 |
|
377 |
return stat, demo, meds, chart, out, proc, lab, y
|
378 |
|
379 |
-
|
380 |
def getXY(dyn,stat,demo,concat_cols,concat):
|
381 |
X_df=pd.DataFrame()
|
382 |
if concat:
|
@@ -449,17 +447,6 @@ def generate_split(path,task,concat,feat_cond,feat_chart,feat_proc, feat_meds, f
|
|
449 |
X_df = encoding(X_df)
|
450 |
return X_df
|
451 |
|
452 |
-
def generate_split_deep(path,task,feat_cond,feat_chart,feat_proc, feat_meds, feat_out):
|
453 |
-
with open(path, 'rb') as fp:
|
454 |
-
dico = pickle.load(fp)
|
455 |
-
X = pd.DataFrame.from_dict(dico, orient='index')
|
456 |
-
X_dict = {}
|
457 |
-
taskf=task.replace(" ","_")
|
458 |
-
for hid, data in tqdm(X.iterrows(),desc='Encoding Splits Data for '+task+' task'):
|
459 |
-
stat, demo, meds, chart, out, proc, lab, y = getXY_deep(data, taskf, feat_cond, feat_proc, feat_out, feat_chart,feat_meds)
|
460 |
-
X_dict[hid] = {'stat': stat, 'demo': demo, 'meds': meds, 'chart': chart, 'out': out, 'proc': proc, 'lab': lab, 'label': y}
|
461 |
-
|
462 |
-
return X_dict
|
463 |
|
464 |
|
465 |
def task_cohort(task, mimic_path, config_path):
|
@@ -527,6 +514,7 @@ def task_cohort(task, mimic_path, config_path):
|
|
527 |
elif version == '1':
|
528 |
cohort_output = day_intervals_cohort.extract_data(icu_no_icu,label,tim,icd_code, root_dir,version_path,disease_label)
|
529 |
#----------------------------------------------FEATURES-------------------------------------------------------
|
|
|
530 |
if data_icu :
|
531 |
feature_selection_icu.feature_icu(cohort_output, version_path,diag_flag,out_flag,chart_flag,proc_flag,med_flag)
|
532 |
#----------------------------------------------GROUPING-------------------------------------------------------
|
@@ -715,7 +703,8 @@ class Mimic4Dataset(datasets.GeneratorBasedBuilder):
|
|
715 |
config = self.config_path.split('/')[-1]
|
716 |
|
717 |
#####################create task cohort
|
718 |
-
|
|
|
719 |
|
720 |
#####################Split data into train, test and val
|
721 |
with open(data_dir, 'rb') as fp:
|
@@ -941,16 +930,6 @@ class Mimic4Dataset(datasets.GeneratorBasedBuilder):
|
|
941 |
yield i, row.to_dict()
|
942 |
######################################################DEEP###############################################################
|
943 |
def _info_deep(self):
|
944 |
-
X_train_deep = generate_split_deep(self.path+'/train_data.pkl',self.config.name,self.feat_cond, self.feat_chart, self.feat_proc, self.feat_meds, self.feat_out)
|
945 |
-
X_test_deep = generate_split_deep(self.path+'/test_data.pkl',self.config.name,self.feat_cond, self.feat_chart, self.feat_proc, self.feat_meds, self.feat_out)
|
946 |
-
X_val_deep = generate_split_deep(self.path+'/val_data.pkl',self.config.name,self.feat_cond, self.feat_chart, self.feat_proc, self.feat_meds, self.feat_out)
|
947 |
-
|
948 |
-
with open(self.path+"/X_train_deep.pkl", 'wb') as f:
|
949 |
-
pickle.dump(X_train_deep, f)
|
950 |
-
with open(self.path+"/X_test_deep.pkl", 'wb') as f:
|
951 |
-
pickle.dump(X_test_deep, f)
|
952 |
-
with open(self.path+"/X_val_deep.pkl", 'wb') as f:
|
953 |
-
pickle.dump(X_val_deep, f)
|
954 |
features = datasets.Features(
|
955 |
{
|
956 |
"label": datasets.ClassLabel(num_classes=2,names=["0", "1"]),
|
@@ -983,7 +962,6 @@ class Mimic4Dataset(datasets.GeneratorBasedBuilder):
|
|
983 |
def _generate_examples_deep(self, filepath):
|
984 |
with open(filepath, 'rb') as fp:
|
985 |
dico = pickle.load(fp)
|
986 |
-
|
987 |
task=self.config.name.replace(" ","_")
|
988 |
if 'Custom' in task:
|
989 |
task = task.rsplit('_', 1)[0]
|
|
|
200 |
|
201 |
return ethVocabDict,genderVocabDict,ageVocabDict,insVocabDict
|
202 |
|
|
|
203 |
def concat_data(data,task,feat_cond,feat_proc,feat_out,feat_chart,feat_meds):
|
204 |
meds=data['Med']
|
205 |
proc = data['Proc']
|
|
|
375 |
|
376 |
return stat, demo, meds, chart, out, proc, lab, y
|
377 |
|
|
|
378 |
def getXY(dyn,stat,demo,concat_cols,concat):
|
379 |
X_df=pd.DataFrame()
|
380 |
if concat:
|
|
|
447 |
X_df = encoding(X_df)
|
448 |
return X_df
|
449 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
450 |
|
451 |
|
452 |
def task_cohort(task, mimic_path, config_path):
|
|
|
514 |
elif version == '1':
|
515 |
cohort_output = day_intervals_cohort.extract_data(icu_no_icu,label,tim,icd_code, root_dir,version_path,disease_label)
|
516 |
#----------------------------------------------FEATURES-------------------------------------------------------
|
517 |
+
print(data_icu)
|
518 |
if data_icu :
|
519 |
feature_selection_icu.feature_icu(cohort_output, version_path,diag_flag,out_flag,chart_flag,proc_flag,med_flag)
|
520 |
#----------------------------------------------GROUPING-------------------------------------------------------
|
|
|
703 |
config = self.config_path.split('/')[-1]
|
704 |
|
705 |
#####################create task cohort
|
706 |
+
if not os.path.exists(data_dir):
|
707 |
+
task_cohort(self.config.name.replace(" ","_"),self.mimic_path,config)
|
708 |
|
709 |
#####################Split data into train, test and val
|
710 |
with open(data_dir, 'rb') as fp:
|
|
|
930 |
yield i, row.to_dict()
|
931 |
######################################################DEEP###############################################################
|
932 |
def _info_deep(self):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
933 |
features = datasets.Features(
|
934 |
{
|
935 |
"label": datasets.ClassLabel(num_classes=2,names=["0", "1"]),
|
|
|
962 |
def _generate_examples_deep(self, filepath):
|
963 |
with open(filepath, 'rb') as fp:
|
964 |
dico = pickle.load(fp)
|
|
|
965 |
task=self.config.name.replace(" ","_")
|
966 |
if 'Custom' in task:
|
967 |
task = task.rsplit('_', 1)[0]
|