-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathAdaptiveModel_TopSim.py
250 lines (187 loc) · 9.53 KB
/
AdaptiveModel_TopSim.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
# %% LOCAL ADAPTIVE MODEL (MOST SIMILAR STRUCTURES) ##
######################################################
import math
import pandas as pd
import numpy as np
import lightgbm as lgb
from rdkit.Chem import AllChem
from rdkit import Chem, DataStructs
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
# Get indices of top (most similar) structures
def get_top_indices(df, column_name, top_n):
sorted_df = df.sort_values(by=column_name, ascending=False)
return sorted_df.head(top_n).index.tolist()
# Calculate tanimoto similarity between a molecular pair via fingerprints
def calc_similarity_pair(a, b):
if a is None or b is None:
return 0.0
amol = Chem.MolFromSmiles(a)
bmol = Chem.MolFromSmiles(b)
if amol is None or bmol is None:
return 0.0
fp1 = AllChem.GetMorganFingerprintAsBitVect(amol, 3, nBits=2048, useChirality=False)
fp2 = AllChem.GetMorganFingerprintAsBitVect(bmol, 3, nBits=2048, useChirality=False)
similarity = DataStructs.TanimotoSimilarity(fp1, fp2)
return similarity
# Calculate regression metrics
def calc_regression_metrics(y_test_data, y_predicted):
y_test_data = pd.DataFrame(y_test_data)
y_predicted = pd.DataFrame(y_predicted)
y_test_data = y_test_data.dropna()
y_predicted = y_predicted.dropna()
MSE = round(mean_squared_error(y_test_data, y_predicted), 3)
MAE = round(mean_absolute_error(y_test_data, y_predicted), 3)
RMSE = round(math.sqrt(mean_squared_error(y_test_data, y_predicted)), 3)
R2 = round(r2_score(y_test_data, y_predicted), 3)
return MSE, MAE, RMSE, R2
# Run training and regression predictions with LGBM
def LGBM_Regression(X_train, y_train, X_test):
# MODEL PARAMETERS
lgb_train = lgb.Dataset(X_train, y_train)
rgr_LightGBM = lgb.train(params,
train_set=lgb_train)
# Prediction for test set
y_pred = rgr_LightGBM.predict(X_test)
return y_pred
# LGBM model parameter
params = {
'boosting': 'gbdt',
'objective': 'regression',
'num_leaves': 35,
'n_estimators' : 2000,
'learning_rate': 0.05,
'metric': {'l1','l2'},
'verbose': -1,
'n_jobs' : 16,
'random_state': 123
}
# Select cut-offs for similarity to train with
lst_CutOffs = [3, 5, 10, 11, 15, 20, 30, 50, 75, 100, 125, 150, 250, 300, 400, 500, 750, 1000, 2000, 5000, 7500, 10000, 15000, 20000, 30000, 33397]
# Loop for all cutoffs
for cutoff in lst_CutOffs:
# Initialize lists
lst_Y_Predictions = []
lst_Y_Original = []
lst_amount_training_structures = []
lst_max_Similarity = []
lst_mean_Similarity = []
# Loop for all SMILES
for i in range(0, len(df_Test)): #
# Remove test compound from training
print('Remove SMILES from training:', df_Test['Structure STD'].iloc[i])
print('n(Training) BEFORE test cmpnd removal from training data: ', len(df_Train_Pool))
df_Train_Pool_NO_TEST = df_Train_Pool.drop(df_Train_Pool[df_Train_Pool['Structure STD'].isin([df_Test['Structure STD'].iloc[i]])].index, axis='index')
print('n(Training) AFTER test cmpnd removal from training data: ', len(df_Train_Pool_NO_TEST))
print('Calculating similarities between test compound and pool...')
df_Train_Pool_NO_TEST['Similarity'] = None
for row in range(0, len(df_Train_Pool_NO_TEST)):
df_Train_Pool_NO_TEST['Similarity'].iat[row] = calc_similarity_pair(df_Test['Structure STD'].iloc[i],
df_Train_Pool_NO_TEST['Structure STD'].iloc[row])
#_________________________________________________________
## Find compound cluster with selected tanimoto similarity
print('Extracting amount of compounds with highest similarity:', cutoff)
# Initialize lists
lst_sim_ext_inds = []
lst_sim_ext_sim_values = []
try:
lst_sim_ext_inds = get_top_indices(df_Train_Pool_NO_TEST, "Similarity", cutoff) # Index of compounds meeting cutoff (0 - 35k)
lst_sim_ext_sim_values = df_Train_Pool_NO_TEST['Similarity'].loc[lst_sim_ext_inds].tolist() # Actual similarity values
print('Highest similarity: ', max(lst_sim_ext_sim_values))
except:
print('No highest similarity found')
lst_sim_ext_sim_values.append(None)
lst_sim_ext_inds.append(None)
print('Similarities and train indices replaced by "None"')
#_________________________________________________________
## DESCRIPTOR CALCULATION + TRAIN
try:
# Select training and data
print("Extracting 0D RDKit training descriptors from pool for test compound ", i)
X_train = df_Train_Pool_NO_TEST.loc[:, 'MaxAbsEStateIndex':'fr_urea']
X_train = X_train.loc[lst_sim_ext_inds]
y_train = df_Train_Pool_NO_TEST['log10(Papp AB) [cm/s]'].loc[lst_sim_ext_inds]
except:
print('Training descriptor extraction failed for test compound ', i)
X_train = None
y_train = None
print('Train Data replaced by "None"')
try:
print("Extracting 0D RDKit descriptors for test compound ", i)
X_test = df_Test.loc[:, 'MaxAbsEStateIndex':'fr_urea']
X_test = X_test.iloc[i]
y_test = df_Test['log10(Papp AB) [cm/s]'].iloc[i]
except:
print('Test descriptor extraction failed for test compound ', i)
X_test = None
y_test = df_Test['log10(Papp AB) [cm/s]'].iloc[i]
print('Test Data replaced by "None"')
#_________________________________________________________
## Train model on similar compounds + Predict
## LightGBM
try:
print('Predicting for test compound #', i)
y_pred = LGBM_Regression(X_train, y_train, X_test)
print('Predicted value:', y_pred)
print('Experimental value:', y_test)
except:
print('Predicting failed for test compound', i )
y_pred = None
print('y_pred replaced by "None"')
try:
# Collect data
lst_Y_Predictions.append(y_pred[0]) # y_pred (predicted)
lst_Y_Original.append(y_test)
except:
print('Appending Pred data failed, filling NA')
lst_Y_Predictions.append(None) # y_pred (predicted)
lst_Y_Original.append(None)
try:
lst_amount_training_structures.append(len(lst_sim_ext_inds)) # n(Training)
except:
print('Appending n(Training) failed, filling NAs')
lst_amount_training_structures.append(None)
try:
lst_sim_ext_sim_values_notNone = [x for x in lst_sim_ext_sim_values if x is not None]
lst_max_Similarity.append(max(lst_sim_ext_sim_values_notNone)) # Maximum similarity in training data
except:
print('Appending similarity data failed, filling NAs')
lst_max_Similarity.append(None) # Maximum similarity in training data
try:
# Remove None elements
lst_sim_ext_sim_values_notNone = [x for x in lst_sim_ext_sim_values if x is not None]
lst_mean_Similarity.append(np.mean(lst_sim_ext_sim_values_notNone)) # Mean similarity in training data
except:
lst_mean_Similarity.append(None) # Mean similarity in training data
print('\n') # New line
try:
# Combine results per cutoff
df_Y_test = pd.DataFrame({'Predicted': lst_Y_Predictions,
'Measured': lst_Y_Original,
'n': lst_amount_training_structures,
'max. Similarity': lst_max_Similarity,
'mean Similarity': lst_mean_Similarity})
except:
print('Combining results failed.')
try:
# Collect metrics
var_MSE, var_MAE, var_RMSE, var_R2 = calc_regression_metrics(df_Y_test['Measured'], df_Y_test['Predicted'])
dict_metrics = {'MSE': var_MSE, 'MAE': var_MAE, 'RMSE': var_RMSE, 'R2': var_R2}
df_Metrics = pd.DataFrame([dict_metrics])
print(df_Metrics)
print(df_Y_test)
except:
print('Collecting metrics failed, filling with NAs')
dict_metrics = {'MSE': None, 'MAE': None, 'RMSE': None, 'R2': None}
df_Metrics = pd.DataFrame([dict_metrics])
try:
# Export metrics
file_name_Metrics = 'Metrics_' + str(cutoff) + '_0DRDKit_log10cms_LGBM_TopSimEval'
df_Metrics.to_pickle(file_name_Metrics)
except:
print('Exporting metrics failed.')
try:
# Export results
file_name_Results = 'Results_' + str(cutoff) + '_0DRDKit_log10cms_LGBM_TopSimEval' # File name dependent on cutoff
df_Y_test.to_pickle(file_name_Results) # Save predictions in file
except:
print('Exporting results failed.')