haneulpark commited on
Commit
500a5b4
·
verified ·
1 Parent(s): 5b5646a

Upload MolData_preprocessing.py

Browse files
Files changed (1) hide show
  1. MolData_preprocessing.py +220 -0
MolData_preprocessing.py ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This is a script for MolData dataset preprocessing
2
+
3
+ # 1. Load modules
4
+ import pandas as pd
5
+ import numpy as np
6
+ import urllib.request
7
+ import rdkit
8
+ from rdkit import Chem
9
+ import os
10
+ import molvs
11
+ import csv
12
+ import json
13
+ import tqdm
14
+
15
+ standardizer = molvs.Standardizer()
16
+ fragment_remover = molvs.fragment.FragmentRemover()
17
+
18
+
19
+ # 2. Download the original dataset
20
+ # https://github.com/LumosBio/MolData
21
+ # Suppose that 'all_molecular_data.csv' has been downloaded from GitHub
22
+
23
+
24
+ # 3. Check if any SMILES is missing in the dataset (first column)
25
+ df = pd.read_csv('all_molecular_data.csv')
26
+
27
+ missing_SMILES = df[df.iloc[:, 0].isna()]
28
+
29
+ print(f'There are {len(missing_SMILES)} rows with missing SMILES.') # This prints 'There are 0 rows with missing SMILES.'
30
+
31
+
32
+ # 4. Sanitize SMILES with MolVS and print problems
33
+ # Since the dataset is large, we divided it into four portions to sanitize
34
+ quarter_df_1 = df.iloc[:len(df)//4]
35
+
36
+ quarter_df_1['X'] = [ \
37
+ rdkit.Chem.MolToSmiles(
38
+ fragment_remover.remove(
39
+ standardizer.standardize(
40
+ rdkit.Chem.MolFromSmiles(
41
+ smiles))))
42
+ for smiles in quarter_df_1['smiles']]
43
+
44
+ problems = []
45
+ for index, row in tqdm.tqdm(quarter_df_1.iterrows()):
46
+ result = molvs.validate_smiles(row['X'])
47
+ if len(result) == 0:
48
+ continue
49
+ problems.append((row['X'], result))
50
+
51
+ # Most are because it includes the salt form and/or it is not neutralized
52
+ for result, alert in problems:
53
+ print(f"SMILES: {result}, problem: {alert[0]}")
54
+
55
+ quarter_df_1.to_csv('MolData_sanitized_0.25.csv')
56
+
57
+
58
+
59
+ quarter_df_2 = df.iloc[len(df)//4 : len(df)//2]
60
+
61
+ quarter_df_2['X'] = [ \
62
+ rdkit.Chem.MolToSmiles(
63
+ fragment_remover.remove(
64
+ standardizer.standardize(
65
+ rdkit.Chem.MolFromSmiles(
66
+ smiles))))
67
+ for smiles in quarter_df_2['smiles']]
68
+
69
+ problems = []
70
+ for index, row in tqdm.tqdm(quarter_df_2.iterrows()):
71
+ result = molvs.validate_smiles(row['X'])
72
+ if len(result) == 0:
73
+ continue
74
+ problems.append((row['X'], result))
75
+
76
+ # Most are because it includes the salt form and/or it is not neutralized
77
+ for result, alert in problems:
78
+ print(f"SMILES: {result}, problem: {alert[0]}")
79
+
80
+ quarter_df_2.to_csv('MolData_sanitized_0.5.csv')
81
+
82
+
83
+ quarter_df_3 = df.iloc[len(df)//2 : 3 *len(df)//4]
84
+
85
+ quarter_df_3['X'] = [ \
86
+ rdkit.Chem.MolToSmiles(
87
+ fragment_remover.remove(
88
+ standardizer.standardize(
89
+ rdkit.Chem.MolFromSmiles(
90
+ smiles))))
91
+ for smiles in quarter_df_3['smiles']]
92
+
93
+ problems = []
94
+ for index, row in tqdm.tqdm(quarter_df_3.iterrows()):
95
+ result = molvs.validate_smiles(row['X'])
96
+ if len(result) == 0:
97
+ continue
98
+ problems.append((row['X'], result))
99
+
100
+ # Most are because it includes the salt form and/or it is not neutralized
101
+ for result, alert in problems:
102
+ print(f"SMILES: {result}, problem: {alert[0]}")
103
+
104
+ quarter_df_3.to_csv('MolData_sanitized_0.75.csv')
105
+
106
+
107
+
108
+ quarter_df_4 = df.iloc[3 *len(df)//4 :len(df)]
109
+
110
+ quarter_df_4['X'] = [ \
111
+ rdkit.Chem.MolToSmiles(
112
+ fragment_remover.remove(
113
+ standardizer.standardize(
114
+ rdkit.Chem.MolFromSmiles(
115
+ smiles))))
116
+ for smiles in quarter_df_4['smiles']]
117
+
118
+ problems = []
119
+ for index, row in tqdm.tqdm(quarter_df_4.iterrows()):
120
+ result = molvs.validate_smiles(row['X'])
121
+ if len(result) == 0:
122
+ continue
123
+ problems.append((row['X'], result))
124
+
125
+ # Most are because it includes the salt form and/or it is not neutralized
126
+ for result, alert in problems:
127
+ print(f"SMILES: {result}, problem: {alert[0]}")
128
+
129
+ quarter_df_4.to_csv('MolData_sanitized_1.0.csv')
130
+
131
+
132
+ # 4. Concatenate
133
+ sanitized1 = pd.read_csv('MolData_sanitized_0.25.csv')
134
+ sanitized2 = pd.read_csv('MolData_sanitized_0.5.csv')
135
+ sanitized3 = pd.read_csv('MolData_sanitized_0.75.csv')
136
+ sanitized4 = pd.read_csv('MolData_sanitized_1.0.csv')
137
+
138
+ smiles_concatenated = pd.concat([sanitized1, sanitized2, sanitized3, sanitized4], ignore_index=True)
139
+
140
+ smiles_concatenated.to_csv('MolData_sanitized_concatenated.csv', index = False)
141
+
142
+
143
+
144
+
145
+ # 5. Formatting and naming (wide form to long form, & column naming)
146
+ # Due to the large size of the dataset, we processed it using chunks to efficiently handle the data.
147
+ chunk_size = 10**5
148
+ input_file = 'MolData_sanitized_concatenated.csv'
149
+ output_prefix = 'MolData_long_form_'
150
+
151
+ column_names = pd.read_csv(input_file, nrows=1).columns
152
+ column_names = column_names.tolist()
153
+
154
+ column_names = ['SMILES' if col == 'X' else col for col in column_names]
155
+
156
+ var_name_list = [col for col in column_names if col.startswith('activity_')]
157
+
158
+ with pd.read_csv(input_file, chunksize=chunk_size) as reader:
159
+ for i, chunk in enumerate(reader):
160
+ chunk.columns = column_names
161
+
162
+ long_df = pd.melt(chunk, id_vars=['SMILES', 'PUBCHEM_CID', 'split'],
163
+ value_vars=var_name_list, var_name='AID', value_name='score')
164
+
165
+ long_df = long_df.dropna(subset=['score'])
166
+ long_df['score'] = long_df['score'].astype('Int64')
167
+
168
+ output_file = f"{output_prefix}{i+1}.csv"
169
+ long_df.to_csv(output_file, index=False)
170
+
171
+ print(f"Saved: {output_file}")
172
+
173
+
174
+
175
+ # 6. Split into train, test, and validation
176
+ chunk_size = 10**5
177
+ input_files = [f'MolData_long_form_{i+1}.csv' for i in range(15)]
178
+
179
+ output_train_file = 'MolData_train.csv'
180
+ output_test_file = 'MolData_test.csv'
181
+ output_valid_file = 'MolData_validation.csv'
182
+
183
+ train_data = []
184
+ test_data = []
185
+ valid_data = []
186
+
187
+ for input_file in input_files:
188
+ with pd.read_csv(input_file, chunksize=chunk_size) as reader:
189
+ for chunk in reader:
190
+ train_chunk = chunk[chunk['split'] == 'train']
191
+ test_chunk = chunk[chunk['split'] == 'test']
192
+ valid_chunk = chunk[chunk['split'] == 'validation']
193
+
194
+ train_data.append(train_chunk)
195
+ test_data.append(test_chunk)
196
+ valid_data.append(valid_chunk)
197
+
198
+ train_df = pd.concat(train_data, ignore_index=True)
199
+ test_df = pd.concat(test_data, ignore_index=True)
200
+ valid_df = pd.concat(valid_data, ignore_index=True)
201
+
202
+ train_df.to_csv(output_train_file, index=False)
203
+ test_df.to_csv(output_test_file, index=False)
204
+ valid_df.to_csv(output_valid_file, index=False)
205
+
206
+
207
+ def fix_cid_column(df):
208
+ df['PUBCHEM_CID'] = df['PUBCHEM_CID'].astype(str).apply(lambda x: x.split(',')[0]) # Because some molecule have two CIDs
209
+ df['PUBCHEM_CID'] = df['PUBCHEM_CID'].astype('Int64')
210
+ df = df.rename(columns = {'score' : 'Y'}) # This is for column renaming
211
+ return df
212
+
213
+ train_csv = fix_cid_column(pd.read_csv('MolData_train.csv'))
214
+ test_csv = fix_cid_column(pd.read_csv('MolData_test.csv'))
215
+ valid_csv = fix_cid_column(pd.read_csv('MolData_validation.csv'))
216
+
217
+ train_csv.to_parquet('MolData_train.parquet', index=False)
218
+ test_csv.to_parquet('MolData_test.parquet', index=False)
219
+ valid_csv.to_parquet('MolData_validation.parquet', index=False)
220
+