-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathmain_3_inputs.py
148 lines (123 loc) · 4.8 KB
/
main_3_inputs.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
from pinns_v2.model import MLP, ModifiedMLP
from pinns_v2.components import ComponentManager, ResidualComponent, ICComponent, SupervisedComponent, ResidualTimeCausalityComponent
from pinns_v2.rff import GaussianEncoding
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
#from pinns.train import train
from pinns_v2.train import train
from pinns_v2.gradient import _jacobian, _hessian
from pinns_v2.dataset import DomainDataset, ICDataset, DomainSupervisedDataset
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#found optimal hyperparameters
#lr = 0.002203836177626117, num_dense_layers = 8, num_dense_nodes = 308, activation_function = <class 'torch.nn.modules.activation.SiLU'>
#step_lr_epochs = 1721, step_lr_gamma = 0.15913059595003437
#with modifiedMLP found different hyperparameters (I think they are wrong):
# l_r = 0.05, num_dense_layers = 10, num_dense_nodes = 5, activation_function = Sin>
# epochs = 1444, step_lr_epochs = 2000, step_lr_gamma = 0.01, period = 5, dataset_size = 10000
epochs = 2000
num_inputs = 3 #x, t, x_f
u_min = -0.21
u_max = 0.0
x_min = 0.0
x_max = 1.0
t_f = 10
f_min = -3.0
f_max = 0.0
delta_u = u_max - u_min
delta_x = x_max - x_min
delta_f = f_max - f_min
params = {
"u_min": u_min,
"u_max": u_max,
"x_min": x_min,
"x_max": x_max,
"t_f": t_f,
"f_min": f_min,
"f_max": f_max
}
def hard_constraint(x, y):
X = x[0]
tau = x[-1]
U = ((X-1)*X*(delta_x**2)*t_f*tau)*(y+(u_min/delta_u)) - (u_min/delta_u)
return U
def f(sample):
x = sample[0]*(delta_x) + x_min
x_f = sample[1]*(delta_x) + x_min
#x_f = 0.2*(delta_x) + x_min
#h = sample[2]*(delta_f) + f_min
h = f_min
z = h * torch.exp(-400*((x-x_f)**2))
return z
def pde_fn(model, sample):
T = 1
mu = 1
k = 1
alpha_2 = (T/mu)*(t_f**2)/(delta_x**2)
beta = (t_f**2)/delta_u
K = k * t_f
J, d = _jacobian(model, sample)
dX = J[0][0]
dtau = J[0][-1]
#H = _jacobian(d, sample)[0]
#ddX = H[0][0, 0]
#ddtau = H[0][-1, -1]
ddX = _jacobian(d, sample, i=0, j=0)[0][0]
ddtau = _jacobian(d, sample, i=2, j=2)[0][0]
return ddtau - alpha_2*ddX - beta*f(sample) + K*dtau
def ic_fn_vel(model, sample):
J, d = _jacobian(model, sample)
dtau = J[0][-1]
dt = dtau*delta_u/t_f
ics = torch.zeros_like(dt)
return dt, ics
batchsize = 500
learning_rate = 0.002203836177626117
print("Building Domain Dataset")
domainDataset = DomainDataset([0.0]*num_inputs,[1.0]*num_inputs, 10000, period = 3)
print("Building IC Dataset")
icDataset = ICDataset([0.0]*(num_inputs-1),[1.0]*(num_inputs-1), 10000, period = 3)
print("Building Domain Supervised Dataset")
#dsdDataset = DomainSupervisedDataset("C:\\Users\\desan\\Documents\\Wolfram Mathematica\\file.csv", 1000)
#print("Building Validation Dataset")
validationDataset = DomainDataset([0.0]*num_inputs,[1.0]*num_inputs, batchsize, shuffle = False)
print("Building Validation IC Dataset")
validationicDataset = ICDataset([0.0]*(num_inputs-1),[1.0]*(num_inputs-1), batchsize, shuffle = False)
encoding = GaussianEncoding(sigma = 1.0, input_size=num_inputs, encoded_size=154)
model = MLP([num_inputs] + [308]*8 + [1], nn.SiLU, hard_constraint, p_dropout=0.0, encoding = encoding)
#model = ModifiedMLP([num_inputs] + [308]*8 + [1], nn.SiLU, hard_constraint, p_dropout = 0.0)
component_manager = ComponentManager()
r = ResidualTimeCausalityComponent(pde_fn, domainDataset, 0.001, number_of_buckets = 100)
#r = ResidualComponent(pde_fn, domainDataset)
component_manager.add_train_component(r)
ic = ICComponent([ic_fn_vel], icDataset)
component_manager.add_train_component(ic)
#d = SupervisedComponent(dsdDataset)
#component_manager.add_train_component(d)
r = ResidualComponent(pde_fn, validationDataset)
component_manager.add_validation_component(r)
ic = ICComponent([ic_fn_vel], validationicDataset)
component_manager.add_validation_component(ic)
def init_normal(m):
if type(m) == torch.nn.Linear:
torch.nn.init.xavier_uniform_(m.weight)
model = model.apply(init_normal)
model = model.to(device)
# optimizer = optim.Adam(model.parameters(), lr=learning_rate, betas = (0.9,0.99),eps = 10**-15)
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
#scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=20, factor=0.5)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1721, gamma=0.15913059595003437)
# optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
data = {
"name": "string_2inputs_nostiffness_force_damping_ic0hard_icv0_causality_t10.0_optimized_modifiedMLP",
#"name": "prova",
"model": model,
"epochs": epochs,
"batchsize": batchsize,
"optimizer": optimizer,
"scheduler": scheduler,
"component_manager": component_manager,
"additional_data": params
}
train(data, output_to_file=False)