jmc255 commited on
Commit
d93115d
·
verified ·
1 Parent(s): be6ec69

Delete dataprocessingcopy.py

Browse files
Files changed (1) hide show
  1. dataprocessingcopy.py +0 -203
dataprocessingcopy.py DELETED
@@ -1,203 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- """dataprocessing.ipynb
3
-
4
- Automatically generated by Colaboratory.
5
-
6
- Original file is located at
7
- https://colab.research.google.com/drive/10At7vh21OGTlE-Myv1NhAHi7l7NwBocQ
8
- """
9
-
10
- import pandas as pd
11
- import numpy as np
12
- import os
13
- from zipfile import ZipFile
14
- import re
15
- import json
16
- import base64
17
-
18
- from google.colab import drive
19
- drive.mount('/content/drive')
20
-
21
- path = "/content/drive/MyDrive/Duke/huggingface_project/data"
22
-
23
- df = pd.read_excel(path+"/questionnaire-data.xlsx", header=2)
24
-
25
- df["vviq_score"] = np.sum(df.filter(like = "vviq"), axis = 1)
26
- df["osiq_score"] = np.sum(df.filter(like = "osiq"), axis = 1)
27
- df["treatment"] = np.where(df.vviq_score > 40, "control", "aphantasia")
28
-
29
- df = df.rename(columns={
30
- "Sub ID": "sub_id",
31
- df.columns[5]: "art_ability",
32
- df.columns[6]: "art_experience",
33
- df.columns[9]: "difficult",
34
- df.columns[10]: "diff_explanation"
35
- })
36
-
37
- df.columns = df.columns.str.lower()
38
-
39
- df = df.drop(df.filter(like="unnamed").columns, axis = 1)
40
-
41
- df = df.drop(df.filter(regex="(vviq|osiq)\d+").columns, axis = 1)
42
-
43
- df[df.columns[df.dtypes == "object"]] = df[df.columns[df.dtypes == "object"]].astype("string")
44
-
45
- df[df.columns] = df[df.columns].replace([np.nan,pd.NA, "nan","na","NA","n/a","N/A","N/a"], None)
46
-
47
- data = {}
48
- for ind, row in df.iterrows():
49
- data[row["sub_id"]] = {
50
- "subject_id": int(row["sub_id"]),
51
- "treatment": row["treatment"],
52
- "demographics": dict(df.iloc[ind][1:-1])
53
- }
54
- data[row["sub_id"]]["demographics"]["art_ability"] = int(data[row["sub_id"]]["demographics"]["art_ability"])
55
- data[row["sub_id"]]["demographics"]["vviq_score"] = int(data[row["sub_id"]]["demographics"]["vviq_score"])
56
- data[row["sub_id"]]["demographics"]["osiq_score"] = int(data[row["sub_id"]]["demographics"]["osiq_score"])
57
-
58
- stored_images = {}
59
- with ZipFile(path + "/Images.zip", "r") as zip:
60
- for image_file in zip.namelist():
61
- with zip.open(image_file, 'r') as fil:
62
- im = fil.read()
63
- im_encoded = base64.b64encode(im).decode("utf-8")
64
- stored_images[image_file.removesuffix(".jpg")] = im_encoded
65
-
66
- def get_sub_files(subject, file_list):
67
- pattern = re.compile("^.*" + subject + "-[a-z]{3}\d-(kitchen|livingroom|bedroom).*")
68
- sub_files = [f for f in file_list if pattern.match(f)]
69
- sub = {
70
- "kitchen": {
71
- "perception": "",
72
- "memory": ""
73
- },
74
- "livingroom": {
75
- "perception": "",
76
- "memory": ""
77
- },
78
- "bedroom": {
79
- "perception": "",
80
- "memory": ""
81
- },
82
- }
83
-
84
- for fil in sub_files:
85
- if "kitchen" in fil:
86
- if "pic" in fil:
87
- sub["kitchen"]["perception"] = fil
88
- else:
89
- sub["kitchen"]["memory"] = fil
90
- elif "livingroom" in fil:
91
- if "pic" in fil:
92
- sub["livingroom"]["perception"] = fil
93
- else:
94
- sub["livingroom"]["memory"] = fil
95
- else:
96
- if "pic" in fil:
97
- sub["bedroom"]["perception"] = fil
98
- else:
99
- sub["bedroom"]["memory"] = fil
100
- return sub
101
-
102
- with ZipFile(path + "/Aphantasia-Drawings.zip", "r") as zip:
103
- files = zip.namelist()
104
- aphan_subs = list({f.split("/")[0] for f in files})
105
- aphantasia_drawing_dataset = {}
106
- for s in aphan_subs:
107
- if int(s[3:]) in data.keys():
108
- data[int(s[3:])]["drawings"] = get_sub_files(s, files)
109
- else:
110
- data[int(s[3:])] = {"drawings": get_sub_files(s,files)}
111
-
112
- with ZipFile(path + "/Control-Drawings.zip", "r") as zip:
113
- files = zip.namelist()
114
- cntrl_subs = list({f.split("/")[0] for f in files})
115
- full_control = {}
116
- for s in cntrl_subs:
117
- if int(s[3:]) in data.keys():
118
- data[int(s[3:])]["drawings"] = get_sub_files(s, files)
119
- else:
120
- data[int(s[3:])] = {"drawings": get_sub_files(s,files)}
121
-
122
- stored_images["kitchen"] = stored_images.pop('high_sun_ajwbpqrwvknlvpeh')
123
- stored_images["bedroom"] = stored_images.pop('low_sun_acqsqjhtcbxeomux')
124
- stored_images["livingroom"] = stored_images.pop('low_sun_byqgoskwpvsbllvy')
125
-
126
- def extract_images(subject, treatment):
127
- images_bytes = {
128
- "kitchen": {
129
- "perception": "",
130
- "memory": ""
131
- },
132
- "livingroom": {
133
- "perception": "",
134
- "memory": ""
135
- },
136
- "bedroom": {
137
- "perception": "",
138
- "memory": ""
139
- }
140
- }
141
- for room in ["kitchen", "livingroom", "bedroom"]:
142
- paths = data[subject]["drawings"].get(room).values()
143
- paths = [p for p in paths if p != ""]
144
- if treatment == "aphantasia":
145
- with ZipFile(path + "/Aphantasia-Drawings.zip", "r") as zip:
146
- for filename in paths:
147
- with zip.open(filename, 'r') as fil:
148
- im = fil.read()
149
- im_encoded = base64.b64encode(im).decode("utf-8")
150
- if "mem" in filename:
151
- images_bytes[room]["memory"] = im_encoded
152
- else:
153
- images_bytes[room]["perception"] = im_encoded
154
- else:
155
- with ZipFile(path + "/Control-Drawings.zip", "r") as zip:
156
- for filename in paths:
157
- with zip.open(filename, 'r') as fil:
158
- im = fil.read()
159
- im_encoded = base64.b64encode(im).decode("utf-8")
160
- if "mem" in filename:
161
- images_bytes[room]["memory"] = im_encoded
162
- else:
163
- images_bytes[room]["perception"] = im_encoded
164
-
165
- return images_bytes
166
-
167
- missing = []
168
- for i in data.keys():
169
- if "drawings" in data[i] and "treatment" in data[i]:
170
- data[i]["drawings"] = extract_images(i,data[i]["treatment"])
171
- else:
172
- missing.append(i)
173
-
174
- for num in missing:
175
- data.pop(num, None)
176
-
177
- for sub in data.keys():
178
- data[sub]["image"] = stored_images
179
-
180
- subject_data_path = path + "/clean_data.json"
181
-
182
- #with open(subject_data_path, "w", encoding="utf-8") as sub_data:
183
- #json.dump(data, sub_data, indent=2)
184
-
185
- type(data)
186
-
187
- da = pd.DataFrame(data)
188
-
189
-
190
-
191
-
192
-
193
- flattened_data = []
194
-
195
- for key, value in data.items():
196
- flattened_subject = pd.json_normalize(value, sep='_')
197
- flattened_data.append(flattened_subject)
198
-
199
-
200
- da = pd.concat(flattened_data, ignore_index=True)
201
-
202
- # Save the DataFrame to a Parquet file
203
- da.to_parquet(path + 'data.parquet')