-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathproj2_3_ANN_regression_validation.py
97 lines (76 loc) · 3.84 KB
/
proj2_3_ANN_regression_validation.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 14 11:56:10 2021
@author: changai
"""
import numpy as np
from sklearn import model_selection
from toolbox_02450 import train_neural_net, draw_neural_net
import torch
def ANN_validate(X,y,units,cvf=10):
print("========== inner loop start ================")
# w = np.empty((M,cvf,len(units)))
# train_error = np.empty((cvf,len(units)))
# test_error = np.empty((cvf,len(units)))
se = np.empty((cvf,len(units)))
mse = np.empty((cvf,len(units)))
# y = y.squeeze()
n_replicates = 2 # number of networks trained in each k-fold
max_iter = 10000 # stop criterion 2 (max epochs in training)
CV = model_selection.KFold(cvf, shuffle=True)
M = X.shape[1]
f = 0
for train_index, test_index in CV.split(X,y):
print('\nInner crossvalidation fold: {0}/{1}'.format(f+1,cvf))
# X_train = X[train_index]
# y_train = y[train_index]
# X_test = X[test_index]
# y_test = y[test_index]
# mu = np.mean(X_train, 0)
# sigma = np.std(X_train, 0)
# X_train = (X_train - mu) / sigma
# X_test = (X_test - mu) / sigma
X_train = torch.Tensor(X[train_index,:])
y_train = torch.Tensor(y[train_index])
X_test = torch.Tensor(X[test_index,:])
y_test = torch.Tensor(y[test_index])
# Standardize the training and set set based on training set moments
for n in range(0,len(units)):
# # Compute parameters for current value of lambda and current CV fold
# # note: "linalg.lstsq(a,b)" is substitue for Matlab's left division operator "\"
# lambdaI = lambdas[l] * np.eye(M)
# lambdaI[0,0] = 0 # remove bias regularization
# w[:,f,l] = np.linalg.solve(XtX+lambdaI,Xty).squeeze()
# # Evaluate training and test performance
# train_error[f,l] = np.power(y_train-X_train @ w[:,f,l].T,2).mean(axis=0)
# test_error[f,l] = np.power(y_test-X_test @ w[:,f,l].T,2).mean(axis=0)
model = lambda: torch.nn.Sequential(
torch.nn.Linear(M, units[n]), #M features to H hiden units
torch.nn.Tanh(), # 1st transfer function,
torch.nn.Linear(units[n], 1), # H hidden units to 1 output neuron
# torch.nn.Sigmoid() # final tranfer function
)
loss_fn = torch.nn.MSELoss()
# Train the net on training data
net, final_loss, learning_curve = train_neural_net(model,
loss_fn,
X=X_train,
y=y_train,
n_replicates=n_replicates,
max_iter=max_iter)
print('\n\tBest loss: {}\n'.format(final_loss))
# Determine estimated class labels for test set
y_test_est = net(X_test)
# Determine errors and errors
se = (y_test_est.float()-y_test.float())**2 # squared error
mse[f,n] = (sum(se).type(torch.float)/len(y_test)).data.numpy() #mean
# errors.append(mse[n,l]) # store error rate for current CV fold
f=f+1
print(mse)
opt_val_err = np.min(np.mean(mse,axis=0))
opt_units = units[np.argmin(np.mean(mse,axis=0))]
# train_err_vs_lambda = np.mean(train_error,axis=0)
# test_err_vs_lambda = np.mean(test_error,axis=0)
# mean_w_vs_lambda = np.squeeze(np.mean(w,axis=1))
print("========== inner loop end ================")
return opt_val_err, opt_units