MolData / MolData_preprocessing.py
haneulpark's picture
Upload MolData_preprocessing.py
500a5b4 verified
# This is a script for MolData dataset preprocessing
# 1. Load modules
import pandas as pd
import numpy as np
import urllib.request
import rdkit
from rdkit import Chem
import os
import molvs
import csv
import json
import tqdm
standardizer = molvs.Standardizer()
fragment_remover = molvs.fragment.FragmentRemover()
# 2. Download the original dataset
# https://github.com/LumosBio/MolData
# Suppose that 'all_molecular_data.csv' has been downloaded from GitHub
# 3. Check if any SMILES is missing in the dataset (first column)
df = pd.read_csv('all_molecular_data.csv')
missing_SMILES = df[df.iloc[:, 0].isna()]
print(f'There are {len(missing_SMILES)} rows with missing SMILES.') # This prints 'There are 0 rows with missing SMILES.'
# 4. Sanitize SMILES with MolVS and print problems
# Since the dataset is large, we divided it into four portions to sanitize
quarter_df_1 = df.iloc[:len(df)//4]
quarter_df_1['X'] = [ \
rdkit.Chem.MolToSmiles(
fragment_remover.remove(
standardizer.standardize(
rdkit.Chem.MolFromSmiles(
smiles))))
for smiles in quarter_df_1['smiles']]
problems = []
for index, row in tqdm.tqdm(quarter_df_1.iterrows()):
result = molvs.validate_smiles(row['X'])
if len(result) == 0:
continue
problems.append((row['X'], result))
# Most are because it includes the salt form and/or it is not neutralized
for result, alert in problems:
print(f"SMILES: {result}, problem: {alert[0]}")
quarter_df_1.to_csv('MolData_sanitized_0.25.csv')
quarter_df_2 = df.iloc[len(df)//4 : len(df)//2]
quarter_df_2['X'] = [ \
rdkit.Chem.MolToSmiles(
fragment_remover.remove(
standardizer.standardize(
rdkit.Chem.MolFromSmiles(
smiles))))
for smiles in quarter_df_2['smiles']]
problems = []
for index, row in tqdm.tqdm(quarter_df_2.iterrows()):
result = molvs.validate_smiles(row['X'])
if len(result) == 0:
continue
problems.append((row['X'], result))
# Most are because it includes the salt form and/or it is not neutralized
for result, alert in problems:
print(f"SMILES: {result}, problem: {alert[0]}")
quarter_df_2.to_csv('MolData_sanitized_0.5.csv')
quarter_df_3 = df.iloc[len(df)//2 : 3 *len(df)//4]
quarter_df_3['X'] = [ \
rdkit.Chem.MolToSmiles(
fragment_remover.remove(
standardizer.standardize(
rdkit.Chem.MolFromSmiles(
smiles))))
for smiles in quarter_df_3['smiles']]
problems = []
for index, row in tqdm.tqdm(quarter_df_3.iterrows()):
result = molvs.validate_smiles(row['X'])
if len(result) == 0:
continue
problems.append((row['X'], result))
# Most are because it includes the salt form and/or it is not neutralized
for result, alert in problems:
print(f"SMILES: {result}, problem: {alert[0]}")
quarter_df_3.to_csv('MolData_sanitized_0.75.csv')
quarter_df_4 = df.iloc[3 *len(df)//4 :len(df)]
quarter_df_4['X'] = [ \
rdkit.Chem.MolToSmiles(
fragment_remover.remove(
standardizer.standardize(
rdkit.Chem.MolFromSmiles(
smiles))))
for smiles in quarter_df_4['smiles']]
problems = []
for index, row in tqdm.tqdm(quarter_df_4.iterrows()):
result = molvs.validate_smiles(row['X'])
if len(result) == 0:
continue
problems.append((row['X'], result))
# Most are because it includes the salt form and/or it is not neutralized
for result, alert in problems:
print(f"SMILES: {result}, problem: {alert[0]}")
quarter_df_4.to_csv('MolData_sanitized_1.0.csv')
# 4. Concatenate
sanitized1 = pd.read_csv('MolData_sanitized_0.25.csv')
sanitized2 = pd.read_csv('MolData_sanitized_0.5.csv')
sanitized3 = pd.read_csv('MolData_sanitized_0.75.csv')
sanitized4 = pd.read_csv('MolData_sanitized_1.0.csv')
smiles_concatenated = pd.concat([sanitized1, sanitized2, sanitized3, sanitized4], ignore_index=True)
smiles_concatenated.to_csv('MolData_sanitized_concatenated.csv', index = False)
# 5. Formatting and naming (wide form to long form, & column naming)
# Due to the large size of the dataset, we processed it using chunks to efficiently handle the data.
chunk_size = 10**5
input_file = 'MolData_sanitized_concatenated.csv'
output_prefix = 'MolData_long_form_'
column_names = pd.read_csv(input_file, nrows=1).columns
column_names = column_names.tolist()
column_names = ['SMILES' if col == 'X' else col for col in column_names]
var_name_list = [col for col in column_names if col.startswith('activity_')]
with pd.read_csv(input_file, chunksize=chunk_size) as reader:
for i, chunk in enumerate(reader):
chunk.columns = column_names
long_df = pd.melt(chunk, id_vars=['SMILES', 'PUBCHEM_CID', 'split'],
value_vars=var_name_list, var_name='AID', value_name='score')
long_df = long_df.dropna(subset=['score'])
long_df['score'] = long_df['score'].astype('Int64')
output_file = f"{output_prefix}{i+1}.csv"
long_df.to_csv(output_file, index=False)
print(f"Saved: {output_file}")
# 6. Split into train, test, and validation
chunk_size = 10**5
input_files = [f'MolData_long_form_{i+1}.csv' for i in range(15)]
output_train_file = 'MolData_train.csv'
output_test_file = 'MolData_test.csv'
output_valid_file = 'MolData_validation.csv'
train_data = []
test_data = []
valid_data = []
for input_file in input_files:
with pd.read_csv(input_file, chunksize=chunk_size) as reader:
for chunk in reader:
train_chunk = chunk[chunk['split'] == 'train']
test_chunk = chunk[chunk['split'] == 'test']
valid_chunk = chunk[chunk['split'] == 'validation']
train_data.append(train_chunk)
test_data.append(test_chunk)
valid_data.append(valid_chunk)
train_df = pd.concat(train_data, ignore_index=True)
test_df = pd.concat(test_data, ignore_index=True)
valid_df = pd.concat(valid_data, ignore_index=True)
train_df.to_csv(output_train_file, index=False)
test_df.to_csv(output_test_file, index=False)
valid_df.to_csv(output_valid_file, index=False)
def fix_cid_column(df):
df['PUBCHEM_CID'] = df['PUBCHEM_CID'].astype(str).apply(lambda x: x.split(',')[0]) # Because some molecule have two CIDs
df['PUBCHEM_CID'] = df['PUBCHEM_CID'].astype('Int64')
df = df.rename(columns = {'score' : 'Y'}) # This is for column renaming
return df
train_csv = fix_cid_column(pd.read_csv('MolData_train.csv'))
test_csv = fix_cid_column(pd.read_csv('MolData_test.csv'))
valid_csv = fix_cid_column(pd.read_csv('MolData_validation.csv'))
train_csv.to_parquet('MolData_train.parquet', index=False)
test_csv.to_parquet('MolData_test.parquet', index=False)
valid_csv.to_parquet('MolData_validation.parquet', index=False)