forked from qfgaohao/pytorch-ssd
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathexport_onnx_default_box.py
86 lines (72 loc) · 2.89 KB
/
export_onnx_default_box.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
from vision.ssd.vgg_ssd import create_vgg_ssd
from vision.ssd.mobilenetv1_ssd import create_mobilenetv1_ssd
from vision.ssd.mobilenetv1_ssd_lite import create_mobilenetv1_ssd_lite
from vision.ssd.squeezenet_ssd_lite import create_squeezenet_ssd_lite
from vision.ssd.mobilenet_v2_ssd_lite import create_mobilenetv2_ssd_lite
import sys
import torch.onnx
#from caffe2.python.onnx.backend import Caffe2Backend as c2
import onnx
if len(sys.argv) < 3:
print('Usage: python convert_to_caffe2_models.py <net type: mobilenet-v1-ssd|others> <model path>')
sys.exit(0)
net_type = sys.argv[1]
model_path = sys.argv[2]
label_path = sys.argv[3]
class_names = [name.strip() for name in open(label_path).readlines()]
num_classes = len(class_names)
if net_type == 'vgg16-ssd':
net = create_vgg_ssd(len(class_names), is_test=True)
elif net_type == 'mb1-ssd':
net = create_mobilenetv1_ssd(len(class_names), is_test=True)
elif net_type == 'mb1-ssd-lite':
net = create_mobilenetv1_ssd_lite(len(class_names), is_test=True)
elif net_type == 'mb2-ssd-lite':
net = create_mobilenetv2_ssd_lite(len(class_names), is_test=True)
elif net_type == 'sq-ssd-lite':
net = create_squeezenet_ssd_lite(len(class_names), is_test=True)
else:
print("The net type is wrong. It should be one of vgg16-ssd, mb1-ssd and mb1-ssd-lite.")
sys.exit(1)
net.load(model_path)
net.eval()
model_path = f"models/{net_type}.onnx"
init_net_path = f"models/{net_type}_init_net.pb"
init_net_txt_path = f"models/{net_type}_init_net.pbtxt"
predict_net_path = f"models/{net_type}_predict_net.pb"
predict_net_txt_path = f"models/{net_type}_predict_net.pbtxt"
# onnx file exporting
dummy_input = torch.randn(1, 3, 300, 300)
torch.onnx.export(net, dummy_input, model_path, verbose=False, output_names=['scores', 'boxes'])
def_filename=f"models/{net_type}.anchor"
# default box file exporting
from ctypes import c_ubyte
def to_bytes(tensor):
tensor = tensor * 256.
tensor = tensor.round().clamp(0., 255.).view(-1).int().tolist()
t = c_ubyte * len(tensor)
buf = t()
for k, v in enumerate(tensor):
buf[k] = v
return buf
#for i, prior in enumerate(net.config.priors[0:10]):
# print("{:4d}: {}".format(i, prior))
anchor = net.config.priors
with open(def_filename, 'wb') as f:
f.write(to_bytes(anchor))
# model = onnx.load(model_path)
# init_net, predict_net = c2.onnx_graph_to_caffe2_net(model)
#
# print(f"Save the model in binary format to the files {init_net_path} and {predict_net_path}.")
#
# with open(init_net_path, "wb") as fopen:
# fopen.write(init_net.SerializeToString())
# with open(predict_net_path, "wb") as fopen:
# fopen.write(predict_net.SerializeToString())
#
# print(f"Save the model in txt format to the files {init_net_txt_path} and {predict_net_txt_path}. ")
# with open(init_net_txt_path, 'w') as f:
# f.write(str(init_net))
#
# with open(predict_net_txt_path, 'w') as f:
# f.write(str(predict_net))