diff --git a/utensor_cgen/backend/utensor/_graph_lower/_op_lower.py b/utensor_cgen/backend/utensor/_graph_lower/_op_lower.py index 4d1cd6a6..e5255d4e 100644 --- a/utensor_cgen/backend/utensor/_graph_lower/_op_lower.py +++ b/utensor_cgen/backend/utensor/_graph_lower/_op_lower.py @@ -40,6 +40,8 @@ class uTensorRearchGraphLower(uTensorGraphLowerBase): class OptypeRenameManager(object): NAME_MAP = { 'Add': 'AddOperator', + 'Mul': 'MulOperator', + 'Sub': 'SubOperator', 'Conv2D': 'ConvOperator', 'MatMul': 'MatrixMultOperator' } @@ -89,6 +91,8 @@ def apply(cls, ugraph): for op_info in ugraph.get_ops_by_type('FullyConnectedOperator'): if cls._check_quantized(op_info): op_info.code_gen_attributes['namespaces'] = ('TflmSymQuantOps',) + else: + op_info.code_gen_attributes['namespaces'] = ('ReferenceOperators',) @classmethod def _check_quantized(cls, op_info): diff --git a/utensor_cgen/backend/utensor/code_generator/rearch/_operators/_impls.py b/utensor_cgen/backend/utensor/code_generator/rearch/_operators/_impls.py index b07552f7..ace26096 100644 --- a/utensor_cgen/backend/utensor/code_generator/rearch/_operators/_impls.py +++ b/utensor_cgen/backend/utensor/code_generator/rearch/_operators/_impls.py @@ -478,6 +478,69 @@ def get_eval_snippet(self, op_var_name, op_info, tensor_var_map): nested_namespaces=type(self).namespaces, ) +@OperatorFactory.register +class _ConvOperator(_CommonParams): + op_type = "ConvOperator" + + @classmethod + @must_return_type(Hashable) + def get_constructor_parameters(cls, op_info): + + strides = [ + 1, + op_info.op_attr['StrideW'], + op_info.op_attr['StrideH'], + 1, + ] + padding = cls._PADDING_MAP[op_info.op_attr['Padding']] + strides_str = ','.join(map(str, strides)) + return ("{{ {} }}".format(strides_str), padding) + + def get_declare_snippet(self, op_var_name, tensor_var_map): + return DeclareOpSnippet( + op=self, + templ_dtypes=[self.out_dtypes[0]], + op_var_name=op_var_name, + ) + + def get_eval_snippet(self, op_var_name, op_info, tensor_var_map): + return ConvOpEvalSnippet( + op_info=op_info, + templ_dtypes=[self.out_dtypes[0]], + op_name=op_var_name, + tensor_var_map=tensor_var_map, + ) + +@OperatorFactory.register +class _FullyConnectedOperator(_CommonParams): + namespaces = ('ReferenceOperators',) + op_type = "FullyConnectedOperator" + + @classmethod + @must_return_type(Hashable) + def get_constructor_parameters(cls, op_info): + activation_idx = cls._ACTIVATION_STR_PATTERN.match( + op_info.op_attr['FusedActivationFunction'] + ).group(1) + activation = cls._ACTIVATION_MAP[activation_idx] + return (activation,) + + def get_declare_snippet(self, op_var_name, tensor_var_map): + return DeclareOpSnippet( + op=self, + templ_dtypes=[self.out_dtypes[0]], + op_var_name=op_var_name, + nested_namespaces=type(self).namespaces, + ) + + def get_eval_snippet(self, op_var_name, op_info, tensor_var_map): + return FullyConnectedSnippet( + op_info=op_info, + templ_dtypes=[self.out_dtypes[0]], + op_name=op_var_name, + tensor_var_map=tensor_var_map, + nested_namespaces=type(self).namespaces, + ) @OperatorFactory.register class _QuantizedFullyConnectedOperator(_CommonParams): @@ -521,3 +584,158 @@ def get_eval_snippet(self, op_var_name, op_info, tensor_var_map): return MissingOpEvalSnippet(op_info, tensor_var_map) OperatorFactory._operators[_MissingOperator.op_type] = _MissingOperator + +@OperatorFactory.register +class _BatchNormOperator(_Operator): + namespaces = ('ReferenceOperators',) + op_type = "BatchNormOperator" + + @classmethod + @must_return_type(Hashable) + def get_constructor_parameters(cls, op_info): + strides = [ + 1, + op_info.op_attr['StrideW'], + op_info.op_attr['StrideH'], + 1, + ] + padding = cls._PADDING_MAP[op_info.op_attr['Padding']] + strides_str = ','.join(map(str, strides)) + return ("{{ {} }}".format(strides_str), padding) + + def get_declare_snippet(self, op_var_name, tensor_var_map): + return DeclareOpSnippet( + op=self, + templ_dtypes=[self.out_dtypes[0]], + op_var_name=op_var_name, + nested_namespaces=type(self).namespaces, + ) + + def get_eval_snippet(self, op_var_name, op_info, tensor_var_map): + return BatchNormSnippet( + op_info=op_info, + templ_dtypes=[self.out_dtypes[0]], + op_name=op_var_name, + tensor_var_map=tensor_var_map, + nested_namespaces=type(self).namespaces, + ) + +@OperatorFactory.register +class _MeanOperator(_Operator): + namespaces = ('ReferenceOperators',) + op_type = "MeanOperator" + + @classmethod + @must_return_type(Hashable) + def get_constructor_parameters(cls, op_info): + keep_dims = str(op_info.op_attr["keep_dims"]) + return (" {} ".format(keep_dims), ) + + def get_declare_snippet(self, op_var_name, tensor_var_map): + return DeclareOpSnippet( + op=self, + templ_dtypes=[self.out_dtypes[0]], + op_var_name=op_var_name, + nested_namespaces=type(self).namespaces, + ) + + def get_eval_snippet(self, op_var_name, op_info, tensor_var_map): + return BatchNormSnippet( + op_info=op_info, + templ_dtypes=[self.out_dtypes[0]], + op_name=op_var_name, + tensor_var_map=tensor_var_map, + nested_namespaces=type(self).namespaces, + ) + +@OperatorFactory.register +class _SoftmaxOperator(_CommonParams): + namespaces = ('ReferenceOperators',) + op_type = "SoftmaxOperator" + + @classmethod + @must_return_type(Hashable) + def get_constructor_parameters(cls, op_info): + Beta = op_info.op_attr["Beta"] + return (" %f " % Beta,) + + def get_declare_snippet(self, op_var_name, tensor_var_map): + return DeclareOpSnippet( + op=self, + templ_dtypes=[self.out_dtypes[0]], + op_var_name=op_var_name, + nested_namespaces=type(self).namespaces, + ) + + def get_eval_snippet(self, op_var_name, op_info, tensor_var_map): + return BatchNormSnippet( + op_info=op_info, + templ_dtypes=[self.out_dtypes[0]], + op_name=op_var_name, + tensor_var_map=tensor_var_map, + nested_namespaces=type(self).namespaces, + ) + +@OperatorFactory.register +class _MulOperator(_Operator): + namespaces = ('ReferenceOperators',) + op_type = 'MulOperator' + + def get_declare_snippet(self, op_var_name, tensor_var_map): + return DeclareOpSnippet( + op=self, + templ_dtypes=[self.in_dtypes[0]], + op_var_name=op_var_name, + nested_namespaces=type(self).namespaces, + ) + + def get_eval_snippet(self, op_var_name, op_info, tensor_var_map): + return MulOpEvalSnippet( + op_info=op_info, + templ_dtypes=[self.in_dtypes[0]], + op_name=op_var_name, + tensor_var_map=tensor_var_map, + nested_namespaces=type(self).namespaces, + ) + +@OperatorFactory.register +class _SubOperator(_Operator): + namespaces = ('ReferenceOperators',) + op_type = 'SubOperator' + + def get_declare_snippet(self, op_var_name, tensor_var_map): + return DeclareOpSnippet( + op=self, + templ_dtypes=[self.in_dtypes[0]], + op_var_name=op_var_name, + nested_namespaces=type(self).namespaces, + ) + + def get_eval_snippet(self, op_var_name, op_info, tensor_var_map): + return SubOpEvalSnippet( + op_info=op_info, + templ_dtypes=[self.in_dtypes[0]], + op_name=op_var_name, + tensor_var_map=tensor_var_map, + nested_namespaces=type(self).namespaces, + ) + +@OperatorFactory.register +class _SigmoidOperator(_Operator): + namespaces = ('ReferenceOperators',) + op_type = 'SigmoidOperator' + + def get_declare_snippet(self, op_var_name, tensor_var_map): + return DeclareOpSnippet( + op=self, + templ_dtypes=[self.in_dtypes[0]], + op_var_name=op_var_name, + ) + + def get_eval_snippet(self, op_var_name, op_info, tensor_var_map): + return SigmoidOpEvalSnippet( + op_info=op_info, + templ_dtypes=[self.in_dtypes[0]], + op_name=op_var_name, + tensor_var_map=tensor_var_map, + ) diff --git a/utensor_cgen/backend/utensor/snippets/rearch/_snippets.py b/utensor_cgen/backend/utensor/snippets/rearch/_snippets.py index a184420d..4ded93d4 100644 --- a/utensor_cgen/backend/utensor/snippets/rearch/_snippets.py +++ b/utensor_cgen/backend/utensor/snippets/rearch/_snippets.py @@ -28,8 +28,16 @@ "MinPoolEvalSnippet", "MaxPoolEvalSnippet", "QuantizedFullyConnectedSnippet", + "FullyConnectedSnippet", "MissingOpEvalSnippet", + "BatchNormSnippet", "TimeSlotContainer", + "MulOpEvalSnippet", + "SubOpEvalSnippet", + "ConvOpEvalSnippet", + "MeanOpEvalSnippet", + "SoftmaxOpEvalSnippet", + "SigmoidOpEvalSnippet", "SimpleContainer", ] @@ -156,6 +164,9 @@ class DepthwiseSeperateConvOpEvalSnippet(OpEvalSnippet): __inputs__ = ["in", "depthwise_filter", "pointwise_filter"] __outputs__ = ["out"] +class ConvOpEvalSnippet(OpEvalSnippet): + __inputs__ = ["in", "filter"] + __outputs__ = ["out"] class QuantDepthwiseSeperateConvOpEvalSnippet(OpEvalSnippet): __inputs__ = ["in", "filter", "bias"] @@ -226,11 +237,31 @@ class MaxPoolEvalSnippet(OpEvalSnippet): __inputs__ = ["in"] __outputs__ = ["out"] +class FullyConnectedSnippet(OpEvalSnippet): + __inputs__ = ["input", "filter", "bias"] + __outputs__ = ["output"] class QuantizedFullyConnectedSnippet(OpEvalSnippet): __inputs__ = ["input", "filter", "bias"] __outputs__ = ["output"] +class BatchNormSnippet(OpEvalSnippet): + __inputs__ = ["x", "mean", "variance", "offset", "scale"] + __outputs__ = ["output"] + +class MulOpEvalSnippet(OpEvalSnippet): + __inputs__ = ['a', 'b'] + __outputs__ = ['c'] +class SubOpEvalSnippet(OpEvalSnippet): + __inputs__ = ['a', 'b'] + __outputs__ = ['c'] +class MeanOpEvalSnippet(OpEvalSnippet): + __inputs__ = ['input', 'axis'] + __outputs__ = ['output'] +class SoftmaxOpEvalSnippet(OpEvalSnippet): + __inputs__ = ['input'] + __outputs__ = ['output'] + class MissingOpEvalSnippet(OpEvalSnippet): __template_name__ = "snippets/rearch/op_missing.cpp" @@ -252,6 +283,9 @@ def __init__(self, op_info, tensor_var_map): ] self.template_vars['output_tensors'] = op_info.output_tensors[:] self.template_vars['quant_params_map'] = quant_params_map +class SigmoidOpEvalSnippet(OpEvalSnippet): + __inputs__ = ['in'] + __outputs__ = ['out'] class TimeSlotContainer(SnippetBase): diff --git a/utensor_cgen/frontend/tflite.py b/utensor_cgen/frontend/tflite.py index d7229a35..9897af2f 100644 --- a/utensor_cgen/frontend/tflite.py +++ b/utensor_cgen/frontend/tflite.py @@ -82,10 +82,18 @@ def _build_tensor_map(self, fb_model, ugraph): tensor_name = tensor.Name().decode('utf8') if tensor_name is "" or None: tensor_name = "tensor_" + str(idx) - + dtype = self._TENSOR_NP_TYPE[tensor.Type()] attributes = dict() quant_params = tensor.Quantization() +# if quant_params is not None and quant_params.ZeroPointAsNumpy() and quant_params.ScaleAsNumpy(): +# zp = quant_params.ZeroPointAsNumpy() +# if zp.dtype == np.dtype('