-
Notifications
You must be signed in to change notification settings - Fork 0
/
visualize.py
108 lines (76 loc) · 3.28 KB
/
visualize.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
import numpy as np
import torchvision
import time
import os
import copy
import pdb
import time
import argparse
import sys
import cv2
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import datasets, models, transforms
from retinanet.dataloader import CocoDataset, CSVDataset, collater, Resizer, AspectRatioBasedSampler, Augmenter, \
UnNormalizer, Normalizer
assert torch.__version__.split('.')[0] == '1'
print('CUDA available: {}'.format(torch.cuda.is_available()))
def main(args=None):
parser = argparse.ArgumentParser(description='Simple training script for training a RetinaNet network.')
parser.add_argument('--dataset', help='Dataset type, must be one of csv or coco.')
parser.add_argument('--coco_path', help='Path to COCO directory')
parser.add_argument('--csv_classes', help='Path to file containing class list (see readme)')
parser.add_argument('--csv_val', help='Path to file containing validation annotations (optional, see readme)')
parser.add_argument('--model', help='Path to model (.pt) file.')
parser = parser.parse_args(args)
if parser.dataset == 'coco':
dataset_val = CocoDataset(parser.coco_path, set_name='train2017', transform=transforms.Compose([Normalizer(), Resizer()]))
elif parser.dataset == 'csv':
dataset_val = CSVDataset(train_file=parser.csv_train, class_list=parser.csv_classes, transform=transforms.Compose([Normalizer(), Resizer()]))
else:
raise ValueError('Dataset type not understood (must be csv or coco), exiting.')
sampler_val = AspectRatioBasedSampler(dataset_val, batch_size=1, drop_last=False)
dataloader_val = DataLoader(dataset_val, num_workers=1, collate_fn=collater, batch_sampler=sampler_val)
retinanet = torch.load(parser.model)
use_gpu = True
if use_gpu:
if torch.cuda.is_available():
retinanet = retinanet.cuda()
if torch.cuda.is_available():
retinanet = torch.nn.DataParallel(retinanet).cuda()
else:
retinanet = torch.nn.DataParallel(retinanet)
retinanet.eval()
unnormalize = UnNormalizer()
def draw_caption(image, box, caption):
b = np.array(box).astype(int)
cv2.putText(image, caption, (b[0], b[1] - 10), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 0), 2)
cv2.putText(image, caption, (b[0], b[1] - 10), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)
for idx, data in enumerate(dataloader_val):
with torch.no_grad():
st = time.time()
if torch.cuda.is_available():
scores, classification, transformed_anchors = retinanet(data['img'].cuda().float())
else:
scores, classification, transformed_anchors = retinanet(data['img'].float())
print('Elapsed time: {}'.format(time.time()-st))
idxs = np.where(scores.cpu()>0.5)
img = np.array(255 * unnormalize(data['img'][0, :, :, :])).copy()
img[img<0] = 0
img[img>255] = 255
img = np.transpose(img, (1, 2, 0))
img = cv2.cvtColor(img.astype(np.uint8), cv2.COLOR_BGR2RGB)
for j in range(idxs[0].shape[0]):
bbox = transformed_anchors[idxs[0][j], :]
x1 = int(bbox[0])
y1 = int(bbox[1])
x2 = int(bbox[2])
y2 = int(bbox[3])
label_name = dataset_val.labels[int(classification[idxs[0][j]])]
draw_caption(img, (x1, y1, x2, y2), label_name)
cv2.rectangle(img, (x1, y1), (x2, y2), color=(0, 0, 255), thickness=2)
print(label_name)
cv2.imshow('img', img)
cv2.waitKey(0)
if __name__ == '__main__':
main()