-
Notifications
You must be signed in to change notification settings - Fork 2
/
scrfd.py
378 lines (346 loc) · 14.1 KB
/
scrfd.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
# -*- coding: utf-8 -*-
"""
@File : scrfd
@Description: scrfd人脸检测
@Author: Yang Jian
@Contact: [email protected]
@Time: 2022/2/25 10:31
@IDE: PYTHON
@REFERENCE: https://github.com/yangjian1218
"""
from __future__ import division
import datetime
from cv2 import KeyPoint
import numpy as np
import onnx
import onnxruntime
import os
import os.path as osp
import cv2
import sys
import face_align
def softmax(z):
assert len(z.shape) == 2
s = np.max(z, axis=1)
s = s[:, np.newaxis] # necessary step to do broadcasting
e_x = np.exp(z - s)
div = np.sum(e_x, axis=1)
div = div[:, np.newaxis] # dito
return e_x / div
def distance2bbox(points, distance, max_shape=None):
"""Decode distance prediction to bounding box.
Args:
points (Tensor): Shape (n, 2), [x, y].
distance (Tensor): Distance from the given point to 4
boundaries (left, top, right, bottom).
max_shape (tuple): Shape of the image.
Returns:
Tensor: Decoded bboxes.
"""
x1 = points[:, 0] - distance[:, 0]
y1 = points[:, 1] - distance[:, 1]
x2 = points[:, 0] + distance[:, 2]
y2 = points[:, 1] + distance[:, 3]
if max_shape is not None:
x1 = x1.clamp(min=0, max=max_shape[1])
y1 = y1.clamp(min=0, max=max_shape[0])
x2 = x2.clamp(min=0, max=max_shape[1])
y2 = y2.clamp(min=0, max=max_shape[0])
return np.stack([x1, y1, x2, y2], axis=-1)
def distance2kps(points, distance, max_shape=None):
"""Decode distance prediction to bounding box.
Args:
points (Tensor): Shape (n, 2), [x, y].
distance (Tensor): Distance from the given point to 4
boundaries (left, top, right, bottom).
max_shape (tuple): Shape of the image.
Returns:
Tensor: Decoded bboxes.
"""
preds = []
for i in range(0, distance.shape[1], 2):
px = points[:, i % 2] + distance[:, i]
py = points[:, i % 2 + 1] + distance[:, i + 1]
if max_shape is not None:
px = px.clamp(min=0, max=max_shape[1])
py = py.clamp(min=0, max=max_shape[0])
preds.append(px)
preds.append(py)
return np.stack(preds, axis=-1)
class SCRFD:
def __init__(self, model_file=None, session=None):
self.model_file = model_file
self.session = session
self.taskname = 'detection'
if self.session is None:
assert self.model_file is not None
assert osp.exists(self.model_file)
self.session = onnxruntime.InferenceSession(self.model_file, None)
self.center_cache = {}
self.nms_thresh = 0.4
self.det_thresh = 0.5
self._init_vars()
def _init_vars(self):
input_cfg = self.session.get_inputs()[0]
input_shape = input_cfg.shape
# print("input_shape:",input_shape)
if isinstance(input_shape[2], str):
self.input_size = None
else:
self.input_size = tuple(input_shape[2:4][::-1])
# print('image_size:', self.image_size)
input_name = input_cfg.name
self.input_shape = input_shape
outputs = self.session.get_outputs()
output_names = []
for o in outputs:
output_names.append(o.name)
self.input_name = input_name
self.output_names = output_names
# print("input_name:",self.input_name)
# print("output_name:",self.output_names)
self.input_mean = 127.5
self.input_std = 128.0
# assert len(outputs)==10 or len(outputs)==15
self.use_kps = False
self._anchor_ratio = 1.0
self._num_anchors = 1
if len(outputs) == 6:
self.fmc = 3
self._feat_stride_fpn = [8, 16, 32]
self._num_anchors = 2
elif len(outputs) == 9:
self.fmc = 3
self._feat_stride_fpn = [8, 16, 32] # 基本执行这个
self._num_anchors = 2
self.use_kps = True
elif len(outputs) == 10:
self.fmc = 5
self._feat_stride_fpn = [8, 16, 32, 64, 128]
self._num_anchors = 1
elif len(outputs) == 15:
self.fmc = 5
self._feat_stride_fpn = [8, 16, 32, 64, 128]
self._num_anchors = 1
self.use_kps = True
def prepare(self, ctx_id, **kwargs):
if ctx_id < 0:
self.session.set_providers(['CPUExecutionProvider'])
# else:
# self.session.set_providers(['CUDAExecutionProvider'], [{'device_id': ctx_id}])
nms_thresh = kwargs.get('nms_thresh', None)
if nms_thresh is not None:
self.nms_thresh = nms_thresh
det_thresh = kwargs.get('det_thresh', None)
if det_thresh is not None:
self.det_thresh = det_thresh
input_size = kwargs.get('input_size', None)
if input_size is not None:
if self.input_size is not None:
print('warning: det_size is already set in scrfd model, ignore')
else:
self.input_size = input_size
img_tmp = np.random.randint(0, 255, size=(112, 112, 3), dtype=np.int32)
img_tmp = np.asfarray(img_tmp, dtype="float32")
blob = cv2.dnn.blobFromImage(img_tmp, 1.0 / self.input_std, input_size,
(self.input_mean, self.input_mean, self.input_mean))
self.session.run(self.output_names, {self.input_name: blob})
def init_det_threshold(self, det_threshold):
"""
单独设置人脸检测阈值
:param det_threshold: 人脸检测阈值
:return:
"""
self.det_thresh = det_threshold
def forward(self, img, threshold=0.6, swap_rb=True):
scores_list = []
bboxes_list = []
kpss_list = []
input_size = tuple(img.shape[0:2][::-1])
# print('input_size:',input_size)
blob = cv2.dnn.blobFromImages([img], 1.0 / self.input_std, input_size,
(self.input_mean, self.input_mean, self.input_mean), swapRB=swap_rb)
net_outs = self.session.run(self.output_names, {self.input_name: blob})
# print("net_outs:::",net_outs[0])
input_height = blob.shape[2]
input_width = blob.shape[3]
fmc = self.fmc # 3
for idx, stride in enumerate(self._feat_stride_fpn):
scores = net_outs[idx]
# print("scores:",scores)
bbox_preds = net_outs[idx + fmc]
bbox_preds = bbox_preds * stride
if self.use_kps:
kps_preds = net_outs[idx + fmc * 2] * stride
height = input_height // stride
width = input_width // stride
K = height * width
key = (height, width, stride)
if key in self.center_cache:
anchor_centers = self.center_cache[key]
else:
# solution-1, c style:
# anchor_centers = np.zeros( (height, width, 2), dtype=np.float32 )
# for i in range(height):
# anchor_centers[i, :, 1] = i
# for i in range(width):
# anchor_centers[:, i, 0] = i
# solution-2:
# ax = np.arange(width, dtype=np.float32)
# ay = np.arange(height, dtype=np.float32)
# xv, yv = np.meshgrid(np.arange(width), np.arange(height))
# anchor_centers = np.stack([xv, yv], axis=-1).astype(np.float32)
# solution-3:
anchor_centers = np.stack(np.mgrid[:height, :width][::-1], axis=-1).astype(np.float32)
# print(anchor_centers.shape)
anchor_centers = (anchor_centers * stride).reshape((-1, 2))
if self._num_anchors > 1:
anchor_centers = np.stack([anchor_centers] * self._num_anchors, axis=1).reshape((-1, 2))
if len(self.center_cache) < 100:
self.center_cache[key] = anchor_centers
# print(anchor_centers.shape,bbox_preds.shape,scores.shape,kps_preds.shape)
pos_inds = np.where(scores >= threshold)[0]
# print("pos_inds:",pos_inds)
bboxes = distance2bbox(anchor_centers, bbox_preds)
pos_scores = scores[pos_inds]
pos_bboxes = bboxes[pos_inds]
scores_list.append(pos_scores)
bboxes_list.append(pos_bboxes)
if self.use_kps:
kpss = distance2kps(anchor_centers, kps_preds)
# kpss = kps_preds
kpss = kpss.reshape((kpss.shape[0], -1, 2))
pos_kpss = kpss[pos_inds]
kpss_list.append(pos_kpss)
# print("....:",bboxes_list)
return scores_list, bboxes_list, kpss_list
def detect(self, img, input_size=None, max_num=0, det_thresh=None, metric='default', swap_rb=True):
"""
:param img: 原始图像
:param input_size: 输入尺寸,元组或者列表
:param max_num: 返回人脸数量, 如果为0,表示所有,
:param det_thresh: 人脸检测阈值,
:param metric: 排序方式,默认为面积+中心偏移, "max"为面积最大排序
:param swap_rb: 是否进行r b通道转换, 如果传入的是bgr格式图片,则需要为True
:return:
"""
assert input_size is not None or self.input_size is not None
input_size = self.input_size if input_size is None else input_size
# resize方法选择,缩小选择cv2.INTER_AREA , 放大选择cv2.INTER_LINEAR
resize_interpolation = cv2.INTER_AREA if img.shape[0] >= input_size[0] else cv2.INTER_LINEAR
im_ratio = float(img.shape[0]) / img.shape[1]
model_ratio = float(input_size[1]) / input_size[0]
if im_ratio > model_ratio:
new_height = input_size[1]
new_width = int(new_height / im_ratio)
else:
new_width = input_size[0]
new_height = int(new_width * im_ratio)
det_scale = float(new_height) / img.shape[0]
resized_img = cv2.resize(img, (new_width, new_height), interpolation=resize_interpolation)
det_img = np.zeros((input_size[1], input_size[0], 3), dtype=np.uint8)
det_img[:new_height, :new_width, :] = resized_img
if det_thresh == None:
det_thresh = self.det_thresh
scores_list, bboxes_list, kpss_list = self.forward(det_img, det_thresh, swap_rb)
# print("====",len(scores_list),len(bboxes_list),len(kpss_list))
# print("scores_list:",scores_list)
scores = np.vstack(scores_list)
scores_ravel = scores.ravel()
order = scores_ravel.argsort()[::-1]
bboxes = np.vstack(bboxes_list) / det_scale
if self.use_kps:
kpss = np.vstack(kpss_list) / det_scale
pre_det = np.hstack((bboxes, scores)).astype(np.float32, copy=False)
pre_det = pre_det[order, :]
keep = self.nms(pre_det)
det = pre_det[keep, :]
if self.use_kps:
kpss = kpss[order, :, :]
kpss = kpss[keep, :, :]
else:
kpss = None
if max_num > 0 and det.shape[0] > max_num:
area = (det[:, 2] - det[:, 0]) * (det[:, 3] -
det[:, 1])
img_center = img.shape[0] // 2, img.shape[1] // 2
offsets = np.vstack([
(det[:, 0] + det[:, 2]) / 2 - img_center[1],
(det[:, 1] + det[:, 3]) / 2 - img_center[0]
])
offset_dist_squared = np.sum(np.power(offsets, 2.0), 0)
if metric == 'max':
values = area
else:
values = area - offset_dist_squared * 2.0 # some extra weight on the centering
bindex = np.argsort(
values)[::-1] # some extra weight on the centering
bindex = bindex[0:max_num]
det = det[bindex, :]
if kpss is not None:
kpss = kpss[bindex, :]
return det, kpss
def get_align(self, image, kpss):
"""
从图像中生成align后的人脸图像
:param image: nparray, 原始图
:param kpss: 人脸关键点坐标列表
:return: aligned 人脸 112x112
"""
aligns = []
for pts in kpss:
align = face_align.norm_crop(image, pts) # 得到112x112的对齐图像
aligns.append(align)
return aligns
def nms(self, dets):
thresh = self.nms_thresh
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
if __name__ == '__main__':
detector = SCRFD(model_file="models/scrfd/scrfd_2.5g_kps.onnx")
detector.prepare(0)
img_path = r"data/test2.jpg"
img = cv2.imread(img_path)
ta = datetime.datetime.now()
cycle = 100
# for i in range(cycle):
bboxes, kpss = detector.detect(img, input_size=(640, 640)) # 得到box跟关键点
# print("bboxes:",bboxes,"\nkpss:",kpss)
tb = datetime.datetime.now()
print('all cost:', (tb - ta).total_seconds() * 1000)
print(img_path, bboxes.shape)
if kpss is not None:
print(kpss.shape)
# todo 画图
for i in range(bboxes.shape[0]):
bbox = bboxes[i]
x1, y1, x2, y2, score = bbox.astype(np.int)
cv2.rectangle(img, (x1, y1), (x2, y2), (255, 0, 0), 2)
if kpss is not None:
kps = kpss[i]
for kp in kps:
kp = kp.astype(np.int)
cv2.circle(img, tuple(kp), 1, (0, 0, 255), 2)
cv2.namedWindow("img", 2)
cv2.imshow("img", img)
cv2.waitKey(0)