-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy path7)LossAndUpdateAuto.py
72 lines (47 loc) · 1.63 KB
/
7)LossAndUpdateAuto.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
import torch
import torch.nn as nn
#Design model (input, output size , froward pass)
#2) construct loss and optimizer
#3) Training Loop:
# -frward pass: compute prediction
# -backward pass: gradients
# -update weigths
#f = w* x
#f = 2*x
x = torch.tensor([[1],[2],[3],[4]],dtype=torch.float32)
y = torch.tensor([[2],[4],[6],[8]],dtype=torch.float32)
TestTensor = torch.tensor ([5],dtype = torch.float32)
n_samples , n_features = x.shape
print(n_samples , n_features)
inpout_size = n_features
output_size = n_features
model = nn.Linear(inpout_size,output_size)
"""class LinearRegression(nn.Module):
def __init__ (self, input_dim , output_dim):
super(LinearRegression,self).__init__()
#define layers
self.lin = nn.Linear(input_dim,output_dim)
def forward(self,x):
return self.lin(x)"""
print(f'Prediction before training : f(5) = {model(TestTensor).item():.3f}')
#Training
learning_rate = 0.01
#L'utilisation de backward demande plus d'iteration mais automatise le sclacul de gradient
n_iters = 1000
loss = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr = learning_rate)
for epoch in range(n_iters):
#prediction(forward pass)
y_pred = model(x)
#loss
l = loss(y, y_pred)
#gradient = backward pass
l.backward() #Calculate dl/dw automaticly
#Update weight
optimizer.step()
#zero gradient
optimizer.zero_grad()
if epoch % 100 == 0:
[w, b ]=model.parameters()
print(f'epoch {epoch+1}: w {w[0][0].item():.3f}, loss = {l:.8f}')
print(f'Prediction after training : f(5) = {model(TestTensor).item():.3f}')