-
Notifications
You must be signed in to change notification settings - Fork 0
/
train_self.py
145 lines (118 loc) · 5.79 KB
/
train_self.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
import torch
from torch import nn
import torchvision
from torch.utils.tensorboard import SummaryWriter
from torchvision import models
from torchvision import datasets, transforms
from datetime import datetime
import sys
from torchsummary import summary
import nuit,mymodule
import os
def main():
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
transform = {
"train": transforms.Compose([
transforms.CenterCrop(224),
transforms.Resize(size=(224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=(0.4914, 0.4822, 0.4465), std=(0.4914, 0.4822, 0.4465))]),
"val":transforms.Compose([
transforms.CenterCrop(224),
transforms.Resize(size=(224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=(0.4914, 0.4822, 0.4465), std=(0.4914, 0.4822, 0.4465))
])
}
data_root = os.path.abspath(os.path.join(os.getcwd(), "../"))
data_root = os.path.join(data_root,"deep-learning-for-image-processing","MyDate","mydata")
train_dataset = datasets.ImageFolder(root=os.path.join(data_root, "all_mydata", "train"),
transform=transform["train"])
train_dataloader = torch.utils.data.DataLoader(dataset=train_dataset,
shuffle=True,
batch_size=1)
# nuit.get_mean_std(dataset=train_dataset)
test_dataset = datasets.ImageFolder(root=os.path.join(data_root, "all_mydata", "val"),
transform=transform["val"])
test_dataloader = torch.utils.data.DataLoader(dataset=test_dataset,
shuffle=False,
batch_size=1)
train_dataset_lens = len(train_dataset)
test_dataset_lens = len(test_dataset)
t = len(train_dataloader)
print("batch的组数:{}".format(t))
print(f'训练集的长度{train_dataset_lens},测试集的长度{test_dataset_lens}')
my = mymodule.Huochai(num_classes=5)
# in_fueature = my.line2.out_features
# my.line3 = nn.Linear(in_fueature, 5)
net = my.to(device)
#summary(net, (3, 224, 224), batch_size=1)
# images, labels = next(iter(test_dataloader))
# print(train_dataset.data.shape)
# print(images.shape)
Loss_function = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters(), lr=0.001)
TAMESTAMP = "{0:%Y-%m-%dT%H-%M-%S/}".format(datetime.now())
mylist = []
for epoch in range(10):
writer = SummaryWriter('./logs_self/time{}/result_{}'.format(TAMESTAMP, epoch))
print(f'----------第{epoch}轮训练开始----------')
total_train_step = 0
total_test_step = 0
now_allbatch_loss = 0
now_allbatch_test_loss = 0
acc = 0
net.train()
for batch_idx, data in enumerate(train_dataloader):
images, labels = data
images = images.to(device)
labels = labels.to(device)
outputs = net(images).to(device)
per_batch_loss = Loss_function(outputs, labels)
optimizer.zero_grad()
per_batch_loss.backward()
optimizer.step()
now_allbatch_loss += per_batch_loss
total_train_step = total_train_step + 1
now_avg_loss_1 = (now_allbatch_loss / total_train_step)
# print(f'训练batch次数{total_train_step},当前训练batch的loss:{per_batch_loss},目前总loss:'
# f'{now_allbatch_loss},当前平均loss:{now_avg_loss}'
# )
if batch_idx % 5 == 4:
print(f'训练batch次数{total_train_step},当前训练batch的loss:{per_batch_loss:.4f},目前总loss:'
f'{now_allbatch_loss:.4f},当前平均loss:{now_avg_loss_1:.4f}'
)
writer.add_scalar('train_loss', per_batch_loss, total_train_step)
#writer.add_scalar('epoch_loss_1', now_avg_loss_1, epoch)
net.eval()
with torch.no_grad():
print('--------------------测试开始----------------测试开始---------------测试开始--------------------')
for test_batch_idx, data in enumerate(test_dataloader):
imgs, targets = data
imgs = imgs.to(device)
targets = targets.to(device)
outputs = net(imgs).to(device)
predict_sure = torch.max(outputs, dim=1)[1]
now_per_test_loss = Loss_function(outputs, targets)
now_allbatch_test_loss += now_per_test_loss
total_test_step = total_test_step + 1
now_avg_loss = (now_allbatch_test_loss / total_test_step)
acc = torch.eq(predict_sure, targets).sum().item() / len(targets)
if test_batch_idx % 1 == 0:
print(f'测试batch次数:{total_test_step},当前测试batch的loss:{now_per_test_loss:.4f}'
f'目前的总loss:{now_allbatch_test_loss:.4f},当前平均loss:{now_avg_loss:.4f}'
f'准确率:{acc}')
writer.add_scalar('test_loss', now_per_test_loss, total_test_step)
mylist.append(now_avg_loss_1.item())
#
# #writer.add_scalar('epoch_loss_1', mylist[-1], epoch)
if not os.path.exists('./weight_self'):
os.makedirs('./wright_self')
torch.save(net.state_dict(), "./weight_self/model-{}.pth".format(epoch))
writer = SummaryWriter('./logs_self/time{}/result_{}'.format(TAMESTAMP,'totalloss'))
for i in range(10):
writer.add_scalar('epoch_loss', mylist[i], i)
print('----------训练结束--------')
writer.close()
if __name__ == '__main__':
main()