Skip to content
This repository has been archived by the owner on Nov 21, 2023. It is now read-only.

Commit

Permalink
Support exporting fpn (#372)
Browse files Browse the repository at this point in the history
Summary:
Based on orionr's work

 - [x] Solve [the problem about GenerateProposals](#334 (comment))
 - [x] Use the existing [ResizeNearest](https://github.com/caffe2/caffe2/blob/master/caffe2/operators/resize_op.cc#L57) layer instead of UpsampleNearest. ResizeNearest has cpu implementation and neon optimization
 - [x] Make it work (with pytorch/pytorch#7091)

With this PR, FPN is supported in cooperation with pytorch/pytorch#7091. I have verified that it works on `e2e_faster_rcnn_R-50-FPN_1x.yaml`
Pull Request resolved: #372

Reviewed By: newstzpz

Differential Revision: D9213242

Pulled By: rbgirshick

fbshipit-source-id: 8fc7b77e6cbf08adaafd760505dd760df59bfd79
  • Loading branch information
daquexian authored and facebook-github-bot committed Aug 8, 2018
1 parent 6a2b761 commit 9387594
Showing 1 changed file with 113 additions and 35 deletions.
148 changes: 113 additions & 35 deletions tools/convert_pkl_to_pb.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@
from detectron.utils.logging import setup_logging
from detectron.utils.model_convert_utils import convert_op_in_proto
from detectron.utils.model_convert_utils import op_filter
import detectron.utils.blob as blob_utils
import detectron.core.test_engine as test_engine
import detectron.utils.c2 as c2_utils
import detectron.utils.model_convert_utils as mutils
Expand Down Expand Up @@ -124,10 +125,41 @@ def unscope_name(name):


def reset_names(names):
for i in range(0, len(names)):
for i in range(len(names)):
names[i] = unscope_name(names[i])


def convert_collect_and_distribute(
op, blobs,
roi_canonical_scale,
roi_canonical_level,
roi_max_level,
roi_min_level,
rpn_max_level,
rpn_min_level,
rpn_post_nms_topN,
):
print('Converting CollectAndDistributeFpnRpnProposals'
' Python -> C++:\n{}'.format(op))
assert op.name.startswith('CollectAndDistributeFpnRpnProposalsOp'), \
'Not valid CollectAndDistributeFpnRpnProposalsOp'

inputs = [x for x in op.input]
ret = core.CreateOperator(
'CollectAndDistributeFpnRpnProposals',
inputs,
list(op.output),
roi_canonical_scale=roi_canonical_scale,
roi_canonical_level=roi_canonical_level,
roi_max_level=roi_max_level,
roi_min_level=roi_min_level,
rpn_max_level=rpn_max_level,
rpn_min_level=rpn_min_level,
rpn_post_nms_topN=rpn_post_nms_topN,
)
return ret


def convert_gen_proposals(
op, blobs,
rpn_pre_nms_topN,
Expand All @@ -136,19 +168,22 @@ def convert_gen_proposals(
rpn_min_size,
):
print('Converting GenerateProposals Python -> C++:\n{}'.format(op))
assert op.name.startswith("GenerateProposalsOp"), "Not valid GenerateProposalsOp"
assert op.name.startswith('GenerateProposalsOp'), 'Not valid GenerateProposalsOp'

spatial_scale = mutils.get_op_arg_valf(op, "spatial_scale", None)
spatial_scale = mutils.get_op_arg_valf(op, 'spatial_scale', None)
assert spatial_scale is not None

lvl = int(op.input[0][-1]) if op.input[0][-1].isdigit() else None

inputs = [x for x in op.input]
anchor_name = "anchor"
anchor_name = 'anchor{}'.format(lvl) if lvl else 'anchor'
inputs.append(anchor_name)
blobs[anchor_name] = get_anchors(spatial_scale)
anchor_sizes = (cfg.FPN.RPN_ANCHOR_START_SIZE * 2.**(lvl - cfg.FPN.RPN_MIN_LEVEL),) if lvl else cfg.RPN.SIZES
blobs[anchor_name] = get_anchors(spatial_scale, anchor_sizes)
print('anchors {}'.format(blobs[anchor_name]))

ret = core.CreateOperator(
"GenerateProposals",
'GenerateProposals',
inputs,
list(op.output),
spatial_scale=spatial_scale,
Expand All @@ -158,14 +193,13 @@ def convert_gen_proposals(
min_size=rpn_min_size,
correct_transform_coords=True,
)

return ret, anchor_name


def get_anchors(spatial_scale):
def get_anchors(spatial_scale, anchor_sizes):
anchors = generate_anchors.generate_anchors(
stride=1. / spatial_scale,
sizes=cfg.RPN.SIZES,
sizes=anchor_sizes,
aspect_ratios=cfg.RPN.ASPECT_RATIOS).astype(np.float32)
return anchors

Expand All @@ -188,36 +222,78 @@ def convert_op_name(op):
reset_names(op.output)
return [op]

@op_filter(type="Python", inputs=['rpn_cls_probs', 'rpn_bbox_pred', 'im_info'])
def convert_gen_proposal(op_in):
gen_proposals_op, ext_input = convert_gen_proposals(
op_in, blobs,
rpn_min_size=float(cfg.TEST.RPN_MIN_SIZE),
rpn_post_nms_topN=cfg.TEST.RPN_POST_NMS_TOP_N,
rpn_pre_nms_topN=cfg.TEST.RPN_PRE_NMS_TOP_N,
rpn_nms_thresh=cfg.TEST.RPN_NMS_THRESH,
)
net.external_input.extend([ext_input])
return [gen_proposals_op]
@op_filter(type='Python')
def convert_python(op):
if op.name.startswith('GenerateProposalsOp'):
gen_proposals_op, ext_input = convert_gen_proposals(
op, blobs,
rpn_min_size=float(cfg.TEST.RPN_MIN_SIZE),
rpn_post_nms_topN=cfg.TEST.RPN_POST_NMS_TOP_N,
rpn_pre_nms_topN=cfg.TEST.RPN_PRE_NMS_TOP_N,
rpn_nms_thresh=cfg.TEST.RPN_NMS_THRESH,
)
net.external_input.extend([ext_input])
return [gen_proposals_op]
elif op.name.startswith('CollectAndDistributeFpnRpnProposalsOp'):
collect_dist_op = convert_collect_and_distribute(
op, blobs,
roi_canonical_scale=cfg.FPN.ROI_CANONICAL_SCALE,
roi_canonical_level=cfg.FPN.ROI_CANONICAL_LEVEL,
roi_max_level=cfg.FPN.ROI_MAX_LEVEL,
roi_min_level=cfg.FPN.ROI_MIN_LEVEL,
rpn_max_level=cfg.FPN.RPN_MAX_LEVEL,
rpn_min_level=cfg.FPN.RPN_MIN_LEVEL,
rpn_post_nms_topN=cfg.TEST.RPN_POST_NMS_TOP_N,
)
return [collect_dist_op]
else:
raise ValueError('Failed to convert Python op {}'.format(
op.name))

# Only convert UpsampleNearest to ResizeNearest when converting to pb so that the existing models is unchanged
# https://github.com/facebookresearch/Detectron/pull/372#issuecomment-410248561
@op_filter(type='UpsampleNearest')
def convert_upsample_nearest(op):
for arg in op.arg:
if arg.name == 'scale':
scale = arg.i
break
else:
raise KeyError('No attribute "scale" in UpsampleNearest op')
resize_nearest_op = core.CreateOperator('ResizeNearest',
list(op.input),
list(op.output),
name=op.name,
width_scale=float(scale),
height_scale=float(scale))
return resize_nearest_op

@op_filter(input_has='rois')
@op_filter()
def convert_rpn_rois(op):
for j in range(0, len(op.input)):
for j in range(len(op.input)):
if op.input[j] == 'rois':
print('Converting op {} input name: rois -> rpn_rois:\n{}'.format(
op.type, op))
op.input[j] = 'rpn_rois'
for j in range(len(op.output)):
if op.output[j] == 'rois':
print('Converting op {} output name: rois -> rpn_rois:\n{}'.format(
op.type, op))
op.output[j] = 'rpn_rois'
return [op]

@op_filter(type_in=['StopGradient', 'Alias'])
def convert_remove_op(op):
print('Removing op {}:\n{}'.format(op.type, op))
return []

# We want to apply to all operators, including converted
# so run separately
convert_op_in_proto(net, convert_remove_op)
convert_op_in_proto(net, convert_upsample_nearest)
convert_op_in_proto(net, convert_python)
convert_op_in_proto(net, convert_op_name)
convert_op_in_proto(net, [
convert_gen_proposal, convert_rpn_rois, convert_remove_op
])
convert_op_in_proto(net, convert_rpn_rois)

reset_names(net.external_input)
reset_names(net.external_output)
Expand Down Expand Up @@ -272,6 +348,7 @@ def convert_model_gpu(args, net, init_net):
cdo_cpu = mutils.get_device_option_cpu()

CPU_OPS = [
["CollectAndDistributeFpnRpnProposals", None],
["GenerateProposals", None],
["BBoxTransform", None],
["BoxWithNMSLimit", None],
Expand Down Expand Up @@ -424,10 +501,8 @@ def _prepare_blobs(
im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)

blob = np.zeros([1, im.shape[0], im.shape[1], 3], dtype=np.float32)
blob[0, :, :, :] = im
channel_swap = (0, 3, 1, 2) # swap channel to (k, c, h, w)
blob = blob.transpose(channel_swap)
# Reuse code in blob_utils and fit FPN
blob = blob_utils.im_list_to_blob([im])

blobs = {}
blobs['data'] = blob
Expand Down Expand Up @@ -462,7 +537,7 @@ def run_model_pb(args, net, init_net, im, check_blobs):
)

try:
workspace.RunNet(net.Proto().name)
workspace.RunNet(net)
scores = workspace.FetchBlob('score_nms')
classids = workspace.FetchBlob('class_nms')
boxes = workspace.FetchBlob('bbox_nms')
Expand Down Expand Up @@ -520,13 +595,16 @@ def main():
merge_cfg_from_list(args.opts)
cfg.NUM_GPUS = 1
assert_and_infer_cfg()
logger.info('Conerting model with config:')
logger.info('Converting model with config:')
logger.info(pprint.pformat(cfg))

assert not cfg.MODEL.KEYPOINTS_ON, "Keypoint model not supported."
assert not cfg.MODEL.MASK_ON, "Mask model not supported."
assert not cfg.FPN.FPN_ON, "FPN not supported."
assert not cfg.RETINANET.RETINANET_ON, "RetinaNet model not supported."
# script will stop when it can't find an operator rather
# than stopping based on these flags
#
# assert not cfg.MODEL.KEYPOINTS_ON, "Keypoint model not supported."
# assert not cfg.MODEL.MASK_ON, "Mask model not supported."
# assert not cfg.FPN.FPN_ON, "FPN not supported."
# assert not cfg.RETINANET.RETINANET_ON, "RetinaNet model not supported."

# load model from cfg
model, blobs = load_model(args)
Expand Down

0 comments on commit 9387594

Please sign in to comment.