Datasets:
File size: 6,775 Bytes
500a5b4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 |
# This is a script for MolData dataset preprocessing
# 1. Load modules
import pandas as pd
import numpy as np
import urllib.request
import rdkit
from rdkit import Chem
import os
import molvs
import csv
import json
import tqdm
standardizer = molvs.Standardizer()
fragment_remover = molvs.fragment.FragmentRemover()
# 2. Download the original dataset
# https://github.com/LumosBio/MolData
# Suppose that 'all_molecular_data.csv' has been downloaded from GitHub
# 3. Check if any SMILES is missing in the dataset (first column)
df = pd.read_csv('all_molecular_data.csv')
missing_SMILES = df[df.iloc[:, 0].isna()]
print(f'There are {len(missing_SMILES)} rows with missing SMILES.') # This prints 'There are 0 rows with missing SMILES.'
# 4. Sanitize SMILES with MolVS and print problems
# Since the dataset is large, we divided it into four portions to sanitize
quarter_df_1 = df.iloc[:len(df)//4]
quarter_df_1['X'] = [ \
rdkit.Chem.MolToSmiles(
fragment_remover.remove(
standardizer.standardize(
rdkit.Chem.MolFromSmiles(
smiles))))
for smiles in quarter_df_1['smiles']]
problems = []
for index, row in tqdm.tqdm(quarter_df_1.iterrows()):
result = molvs.validate_smiles(row['X'])
if len(result) == 0:
continue
problems.append((row['X'], result))
# Most are because it includes the salt form and/or it is not neutralized
for result, alert in problems:
print(f"SMILES: {result}, problem: {alert[0]}")
quarter_df_1.to_csv('MolData_sanitized_0.25.csv')
quarter_df_2 = df.iloc[len(df)//4 : len(df)//2]
quarter_df_2['X'] = [ \
rdkit.Chem.MolToSmiles(
fragment_remover.remove(
standardizer.standardize(
rdkit.Chem.MolFromSmiles(
smiles))))
for smiles in quarter_df_2['smiles']]
problems = []
for index, row in tqdm.tqdm(quarter_df_2.iterrows()):
result = molvs.validate_smiles(row['X'])
if len(result) == 0:
continue
problems.append((row['X'], result))
# Most are because it includes the salt form and/or it is not neutralized
for result, alert in problems:
print(f"SMILES: {result}, problem: {alert[0]}")
quarter_df_2.to_csv('MolData_sanitized_0.5.csv')
quarter_df_3 = df.iloc[len(df)//2 : 3 *len(df)//4]
quarter_df_3['X'] = [ \
rdkit.Chem.MolToSmiles(
fragment_remover.remove(
standardizer.standardize(
rdkit.Chem.MolFromSmiles(
smiles))))
for smiles in quarter_df_3['smiles']]
problems = []
for index, row in tqdm.tqdm(quarter_df_3.iterrows()):
result = molvs.validate_smiles(row['X'])
if len(result) == 0:
continue
problems.append((row['X'], result))
# Most are because it includes the salt form and/or it is not neutralized
for result, alert in problems:
print(f"SMILES: {result}, problem: {alert[0]}")
quarter_df_3.to_csv('MolData_sanitized_0.75.csv')
quarter_df_4 = df.iloc[3 *len(df)//4 :len(df)]
quarter_df_4['X'] = [ \
rdkit.Chem.MolToSmiles(
fragment_remover.remove(
standardizer.standardize(
rdkit.Chem.MolFromSmiles(
smiles))))
for smiles in quarter_df_4['smiles']]
problems = []
for index, row in tqdm.tqdm(quarter_df_4.iterrows()):
result = molvs.validate_smiles(row['X'])
if len(result) == 0:
continue
problems.append((row['X'], result))
# Most are because it includes the salt form and/or it is not neutralized
for result, alert in problems:
print(f"SMILES: {result}, problem: {alert[0]}")
quarter_df_4.to_csv('MolData_sanitized_1.0.csv')
# 4. Concatenate
sanitized1 = pd.read_csv('MolData_sanitized_0.25.csv')
sanitized2 = pd.read_csv('MolData_sanitized_0.5.csv')
sanitized3 = pd.read_csv('MolData_sanitized_0.75.csv')
sanitized4 = pd.read_csv('MolData_sanitized_1.0.csv')
smiles_concatenated = pd.concat([sanitized1, sanitized2, sanitized3, sanitized4], ignore_index=True)
smiles_concatenated.to_csv('MolData_sanitized_concatenated.csv', index = False)
# 5. Formatting and naming (wide form to long form, & column naming)
# Due to the large size of the dataset, we processed it using chunks to efficiently handle the data.
chunk_size = 10**5
input_file = 'MolData_sanitized_concatenated.csv'
output_prefix = 'MolData_long_form_'
column_names = pd.read_csv(input_file, nrows=1).columns
column_names = column_names.tolist()
column_names = ['SMILES' if col == 'X' else col for col in column_names]
var_name_list = [col for col in column_names if col.startswith('activity_')]
with pd.read_csv(input_file, chunksize=chunk_size) as reader:
for i, chunk in enumerate(reader):
chunk.columns = column_names
long_df = pd.melt(chunk, id_vars=['SMILES', 'PUBCHEM_CID', 'split'],
value_vars=var_name_list, var_name='AID', value_name='score')
long_df = long_df.dropna(subset=['score'])
long_df['score'] = long_df['score'].astype('Int64')
output_file = f"{output_prefix}{i+1}.csv"
long_df.to_csv(output_file, index=False)
print(f"Saved: {output_file}")
# 6. Split into train, test, and validation
chunk_size = 10**5
input_files = [f'MolData_long_form_{i+1}.csv' for i in range(15)]
output_train_file = 'MolData_train.csv'
output_test_file = 'MolData_test.csv'
output_valid_file = 'MolData_validation.csv'
train_data = []
test_data = []
valid_data = []
for input_file in input_files:
with pd.read_csv(input_file, chunksize=chunk_size) as reader:
for chunk in reader:
train_chunk = chunk[chunk['split'] == 'train']
test_chunk = chunk[chunk['split'] == 'test']
valid_chunk = chunk[chunk['split'] == 'validation']
train_data.append(train_chunk)
test_data.append(test_chunk)
valid_data.append(valid_chunk)
train_df = pd.concat(train_data, ignore_index=True)
test_df = pd.concat(test_data, ignore_index=True)
valid_df = pd.concat(valid_data, ignore_index=True)
train_df.to_csv(output_train_file, index=False)
test_df.to_csv(output_test_file, index=False)
valid_df.to_csv(output_valid_file, index=False)
def fix_cid_column(df):
df['PUBCHEM_CID'] = df['PUBCHEM_CID'].astype(str).apply(lambda x: x.split(',')[0]) # Because some molecule have two CIDs
df['PUBCHEM_CID'] = df['PUBCHEM_CID'].astype('Int64')
df = df.rename(columns = {'score' : 'Y'}) # This is for column renaming
return df
train_csv = fix_cid_column(pd.read_csv('MolData_train.csv'))
test_csv = fix_cid_column(pd.read_csv('MolData_test.csv'))
valid_csv = fix_cid_column(pd.read_csv('MolData_validation.csv'))
train_csv.to_parquet('MolData_train.parquet', index=False)
test_csv.to_parquet('MolData_test.parquet', index=False)
valid_csv.to_parquet('MolData_validation.parquet', index=False)
|