woshixuhao commited on
Commit
e874b08
·
1 Parent(s): 5db7379

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -265
app.py DELETED
@@ -1,265 +0,0 @@
1
- import argparse
2
- import os
3
- from rdkit import Chem
4
- from sklearn.externals import joblib
5
- import numpy as np
6
- from rdkit.Chem import Descriptors
7
- from rdkit.Chem import rdMolDescriptors
8
- from xgboost.sklearn import XGBClassifier,XGBRegressor
9
- import torch
10
- import torch.nn.functional as F
11
- from torch.autograd import Variable
12
- from rdkit.Chem import MACCSkeys
13
- import torch.nn as nn
14
- import lightgbm as lgb
15
- from sklearn.ensemble import RandomForestRegressor
16
- import wget
17
- import warnings
18
- import gradio as gr
19
- import requests
20
- warnings.filterwarnings("ignore")
21
-
22
- Eluent_smiles=['CCCCCC','CC(OCC)=O','C(Cl)Cl','CO','CCOCC']
23
- def parse_args():
24
- parser = argparse.ArgumentParser()
25
- parser.add_argument('--file_path', type=str, default=os.getcwd()+'\TLC_dataset.xlsx', help='path of download dataset')
26
- parser.add_argument('--dipole_path', type=str, default=os.getcwd() + '\compound_list_带化合物分类.xlsx',
27
- help='path of dipole file')
28
- parser.add_argument('--data_range', type=int, default=4944, help='utilized data range,robot:4114,manual:4458,new:4944')
29
- parser.add_argument('--automatic_divide', type=bool, default=False, help='automatically divide dataset by 80% train,10% validate and 10% test')
30
- parser.add_argument('--choose_total', type=int, default=387, help='train total num,robot:387,manual:530')
31
- parser.add_argument('--choose_train', type=int, default=308, help='train num,robot:387,manual:530')
32
- parser.add_argument('--choose_validate', type=int, default=38, help='validate num')
33
- parser.add_argument('--choose_test', type=int, default=38, help='test num')
34
- parser.add_argument('--seed', type=int, default=324, help='random seed for split dataset')
35
- parser.add_argument('--torch_seed', type=int, default=324, help='random seed for torch')
36
- parser.add_argument('--add_dipole', type=bool, default=True, help='add dipole into dataset')
37
- parser.add_argument('--add_molecular_descriptors', type=bool, default=True, help='add molecular_descriptors (分子量(MW)、拓扑极性表面积(TPSA)、可旋转键的个数(NROTB)、氢键供体个数(HBA)、氢键受体个数(HBD)、脂水分配系数值(LogP)) into dataset')
38
- parser.add_argument('--add_MACCkeys', type=bool, default=True,help='add MACCSkeys into dataset')
39
- parser.add_argument('--add_eluent_matrix', type=bool, default=True,help='add eluent matrix into dataset')
40
- parser.add_argument('--test_mode', type=str, default='robot', help='manual data or robot data or fix, costum test data')
41
- parser.add_argument('--use_model', type=str, default='Ensemble',help='the utilized model (XGB,LGB,ANN,RF,Ensemble,Bayesian)')
42
- parser.add_argument('--download_data', type=bool, default=False, help='use local dataset or download from dataset')
43
- parser.add_argument('--use_sigmoid', type=bool, default=True, help='use sigmoid')
44
- parser.add_argument('--shuffle_array', type=bool, default=True, help='shuffle_array')
45
- parser.add_argument('--characterization_mode', type=str, default='standard',
46
- help='the characterization mode for the dataset, including standard, precise_TPSA, no_multi')
47
-
48
- #---------------parapmeters for plot---------------------
49
- parser.add_argument('--plot_col_num', type=int, default=4, help='The col_num in plot')
50
- parser.add_argument('--plot_row_num', type=int, default=4, help='The row_num in plot')
51
- parser.add_argument('--plot_importance_num', type=int, default=10, help='The max importance num in plot')
52
- #--------------parameters For LGB-------------------
53
- parser.add_argument('--LGB_max_depth', type=int, default=5, help='max_depth for LGB')
54
- parser.add_argument('--LGB_num_leaves', type=int, default=25, help='num_leaves for LGB')
55
- parser.add_argument('--LGB_learning_rate', type=float, default=0.007, help='learning_rate for LGB')
56
- parser.add_argument('--LGB_n_estimators', type=int, default=1000, help='n_estimators for LGB')
57
- parser.add_argument('--LGB_early_stopping_rounds', type=int, default=200, help='early_stopping_rounds for LGB')
58
-
59
- #---------------parameters for XGB-----------------------
60
- parser.add_argument('--XGB_n_estimators', type=int, default=200, help='n_estimators for XGB')
61
- parser.add_argument('--XGB_max_depth', type=int, default=3, help='max_depth for XGB')
62
- parser.add_argument('--XGB_learning_rate', type=float, default=0.1, help='learning_rate for XGB')
63
-
64
- #---------------parameters for RF------------------------
65
- parser.add_argument('--RF_n_estimators', type=int, default=1000, help='n_estimators for RF')
66
- parser.add_argument('--RF_random_state', type=int, default=1, help='random_state for RF')
67
- parser.add_argument('--RF_n_jobs', type=int, default=1, help='n_jobs for RF')
68
-
69
- #--------------parameters for ANN-----------------------
70
- parser.add_argument('--NN_hidden_neuron', type=int, default=128, help='hidden neurons for NN')
71
- parser.add_argument('--NN_optimizer', type=str, default='Adam', help='optimizer for NN (Adam,SGD,RMSprop)')
72
- parser.add_argument('--NN_lr', type=float, default=0.005, help='learning rate for NN')
73
- parser.add_argument('--NN_model_save_location', type=str, default=os.getcwd()+'\model_save_NN', help='learning rate for NN')
74
- parser.add_argument('--NN_max_epoch', type=int, default=5000, help='max training epoch for NN')
75
- parser.add_argument('--NN_add_sigmoid', type=bool, default=True, help='whether add sigmoid in NN')
76
- parser.add_argument('--NN_add_PINN', type=bool, default=False, help='whether add PINN in NN')
77
- parser.add_argument('--NN_epi', type=float, default=100.0, help='The coef of PINN Loss in NN')
78
-
79
-
80
-
81
- config = parser.parse_args()
82
- config.device = 'cuda' if torch.cuda.is_available() else 'cpu'
83
- return config
84
-
85
- class ANN(nn.Module):
86
- '''
87
- Construct artificial neural network
88
- '''
89
- def __init__(self, in_neuron, hidden_neuron, out_neuron,config):
90
- super(ANN, self).__init__()
91
- self.input_layer = nn.Linear(in_neuron, hidden_neuron)
92
- self.hidden_layer = nn.Linear(hidden_neuron, hidden_neuron)
93
- self.output_layer = nn.Linear(hidden_neuron, out_neuron)
94
- self.NN_add_sigmoid=config.NN_add_sigmoid
95
-
96
-
97
- def forward(self, x):
98
- x = self.input_layer(x)
99
- x = F.leaky_relu(x)
100
- x = self.hidden_layer(x)
101
- x = F.leaky_relu(x)
102
- x = self.hidden_layer(x)
103
- x = F.leaky_relu(x)
104
- x = self.hidden_layer(x)
105
- x = F.leaky_relu(x)
106
- x = self.output_layer(x)
107
- if self.NN_add_sigmoid==True:
108
- x = F.sigmoid(x)
109
- return x
110
-
111
- class Model_ML():
112
- def __init__(self,config,X_test):
113
- super(Model_ML, self).__init__()
114
- self.X_test=X_test
115
- self.seed=config.seed
116
- self.torch_seed=config.seed
117
- self.config=config
118
- self.add_dipole = config.add_dipole
119
- self.add_molecular_descriptors = config.add_molecular_descriptors
120
- self.add_eluent_matrix=config.add_eluent_matrix
121
- self.use_sigmoid=config.use_sigmoid
122
-
123
- self.use_model=config.use_model
124
- self.LGB_max_depth=config.LGB_max_depth
125
- self.LGB_num_leaves=config.LGB_num_leaves
126
- self.LGB_learning_rate=config.LGB_learning_rate
127
- self.LGB_n_estimators=config.LGB_n_estimators
128
- self.LGB_early_stopping_rounds=config.LGB_early_stopping_rounds
129
-
130
- self.XGB_n_estimators=config.XGB_n_estimators
131
- self.XGB_max_depth = config.XGB_max_depth
132
- self.XGB_learning_rate = config.XGB_learning_rate
133
-
134
- self.RF_n_estimators=config.RF_n_estimators
135
- self.RF_random_state=config.RF_random_state
136
- self.RF_n_jobs=config.RF_n_jobs
137
-
138
- self.NN_hidden_neuron=config.NN_hidden_neuron
139
- self.NN_optimizer=config.NN_optimizer
140
- self.NN_lr= config.NN_lr
141
- self.NN_model_save_location=config.NN_model_save_location
142
- self.NN_max_epoch=config.NN_max_epoch
143
- self.NN_add_PINN=config.NN_add_PINN
144
- self.NN_epi=config.NN_epi
145
- self.device=config.device
146
-
147
- self.plot_row_num=config.plot_row_num
148
- self.plot_col_num=config.plot_col_num
149
- self.plot_importance_num=config.plot_importance_num
150
-
151
-
152
-
153
- def load_model(self):
154
- model_LGB = lgb.LGBMRegressor(objective='regression', max_depth=self.LGB_max_depth,
155
- num_leaves=self.LGB_num_leaves,
156
- learning_rate=self.LGB_learning_rate, n_estimators=self.LGB_n_estimators)
157
- model_XGB = XGBRegressor(seed=self.seed,
158
- n_estimators=self.XGB_n_estimators,
159
- max_depth=self.XGB_max_depth,
160
- eval_metric='rmse',
161
- learning_rate=self.XGB_learning_rate,
162
- min_child_weight=1,
163
- subsample=1,
164
- colsample_bytree=1,
165
- colsample_bylevel=1,
166
- gamma=0)
167
-
168
- model_RF = RandomForestRegressor(n_estimators=self.RF_n_estimators,
169
- criterion='mse',
170
- random_state=self.RF_random_state,
171
- n_jobs=self.RF_n_jobs)
172
-
173
- Net = ANN(self.X_test.shape[1], self.NN_hidden_neuron, 1, config=self.config).to(self.device)
174
- #model_LGB = joblib.load('model_LGB.pkl')
175
- wget.download('https://huggingface.co/woshixuhao/Rf_prediction/resolve/main/model_LGB.pkl')
176
- wget.download('https://huggingface.co/woshixuhao/Rf_prediction/resolve/main/model_XGB.pkl')
177
- wget.download('https://huggingface.co/woshixuhao/Rf_prediction/resolve/main/model_RF.pkl')
178
- wget.download('https://huggingface.co/woshixuhao/Rf_prediction/resolve/main/model_ANN.pkl')
179
- model_LGB = joblib.load('model_LGB.pkl')
180
- model_XGB = joblib.load('model_XGB.pkl')
181
- model_RF = joblib.load('model_RF.pkl')
182
- Net.load_state_dict(torch.load('model_ANN.pkl'))
183
- return model_LGB,model_XGB,model_RF,Net
184
-
185
- def get_Rf(self):
186
- model_LGB, model_XGB, model_RF, model_ANN = Model_ML.load_model(self)
187
-
188
- X_test_ANN = Variable(torch.from_numpy(self.X_test.astype(np.float32)).to(self.device), requires_grad=True)
189
- y_pred_ANN = model_ANN(X_test_ANN).cpu().data.numpy()
190
- y_pred_ANN = y_pred_ANN.reshape(y_pred_ANN.shape[0], )
191
-
192
-
193
- y_pred_XGB = model_XGB.predict(self.X_test)
194
- if self.use_sigmoid == True:
195
- y_pred_XGB = 1 / (1 + np.exp(-y_pred_XGB))
196
-
197
- y_pred_LGB = model_LGB.predict(self.X_test)
198
- if self.use_sigmoid == True:
199
- y_pred_LGB = 1 / (1 + np.exp(-y_pred_LGB))
200
-
201
- y_pred_RF = model_RF.predict(self.X_test)
202
- if self.use_sigmoid == True:
203
- y_pred_RF = 1 / (1 + np.exp(-y_pred_RF))
204
-
205
- y_pred = (0.2 * y_pred_LGB + 0.2 * y_pred_XGB + 0.2 * y_pred_RF + 0.4 * y_pred_ANN)
206
- return y_pred
207
-
208
- def get_descriptor(smiles,ratio):
209
- compound_mol = Chem.MolFromSmiles(smiles)
210
- descriptor=[]
211
- descriptor.append(Descriptors.ExactMolWt(compound_mol))
212
- descriptor.append(Chem.rdMolDescriptors.CalcTPSA(compound_mol))
213
- descriptor.append(Descriptors.NumRotatableBonds(compound_mol)) # Number of rotable bonds
214
- descriptor.append(Descriptors.NumHDonors(compound_mol)) # Number of H bond donors
215
- descriptor.append(Descriptors.NumHAcceptors(compound_mol)) # Number of H bond acceptors
216
- descriptor.append(Descriptors.MolLogP(compound_mol)) # LogP
217
- descriptor=np.array(descriptor)*ratio
218
- return descriptor
219
-
220
- def get_eluent_descriptor(eluent):
221
- eluent=np.array(eluent)
222
- des = np.zeros([6,])
223
- for i in range(eluent.shape[0]):
224
- if eluent[i] != 0:
225
- e_descriptors = get_descriptor(Eluent_smiles[i], eluent[i])
226
- des+=e_descriptors
227
- return des
228
-
229
- def get_data_from_smile(smile, eluent_list):
230
- compound_mol = Chem.MolFromSmiles(smile)
231
- Finger = MACCSkeys.GenMACCSKeys(Chem.MolFromSmiles(smile))
232
- fingerprint = np.array([x for x in Finger])
233
- compound_finger = fingerprint
234
- compound_MolWt = Descriptors.ExactMolWt(compound_mol)
235
- compound_TPSA = Chem.rdMolDescriptors.CalcTPSA(compound_mol)
236
- compound_nRotB = Descriptors.NumRotatableBonds(compound_mol) # Number of rotable bonds
237
- compound_HBD = Descriptors.NumHDonors(compound_mol) # Number of H bond donors
238
- compound_HBA = Descriptors.NumHAcceptors(compound_mol) # Number of H bond acceptors
239
- compound_LogP = Descriptors.MolLogP(compound_mol) # LogP
240
- X_test = np.zeros([1, 179])
241
- X_test[0, 0:167] = compound_finger
242
- X_test[0, 167:173] = 0
243
- X_test[0, 173:179] = [compound_MolWt, compound_TPSA, compound_nRotB, compound_HBD, compound_HBA, compound_LogP]
244
-
245
- eluent_array = get_eluent_descriptor(eluent_list)
246
- eluent_array = np.array(eluent_array)
247
- X_test[0, 167:173] = eluent_array
248
-
249
- return X_test
250
-
251
- def predict_single(smile,PE,EA,DCM,MeOH,Et20):
252
- config = parse_args()
253
- config.add_dipole = False
254
- eluent_list=[PE,EA,DCM,MeOH,Et20]
255
- X_test=get_data_from_smile(smile,eluent_list)
256
- Model = Model_ML(config,X_test)
257
- Rf=Model.get_Rf()
258
- return Rf[0]
259
-
260
- if __name__=='__main__':
261
- demo = gr.Interface(fn=predict_single, inputs=["text", "number","number","number","number","number"], outputs='number')
262
- demo.launch(share=True)
263
- # smile='O=C(OC1C(OC(C)=O)C(OC(C)=O)C(OC(C)=O)C(COC(C)=O)O1)C'
264
- # eluent=[0,0.9,0,0,0]
265
- # print(predict_single(smile,1,0,0,0,0))