From 0711bc2e4ff8ce50b473fa280b69e77e844ee6f7 Mon Sep 17 00:00:00 2001 From: MyungJoo Ham Date: Wed, 10 Jan 2024 17:12:59 +0900 Subject: [PATCH] Remove dependencies on FP16 support status. Most modules should NOT depend on the existnace of FP16 support. Let a single (or minimal) entity (a single class?) know and handle it. This is a suggestion commit that shows an example how to refactor it. Most modules are not yet touched with this commit. Signed-off-by: MyungJoo Ham --- Applications/YOLO/jni/yolo_v2_loss.cpp | 48 +++++++++----------------- api/ccapi/include/layer.h | 3 +- api/ccapi/include/tensor_dim.h | 15 ++++++++ nntrainer/layers/layer_node.h | 8 +++-- 4 files changed, 38 insertions(+), 36 deletions(-) diff --git a/Applications/YOLO/jni/yolo_v2_loss.cpp b/Applications/YOLO/jni/yolo_v2_loss.cpp index 8421dd24e..35dd9c508 100644 --- a/Applications/YOLO/jni/yolo_v2_loss.cpp +++ b/Applications/YOLO/jni/yolo_v2_loss.cpp @@ -178,11 +178,9 @@ calc_iou(nntrainer::Tensor &bbox1_x1, nntrainer::Tensor &bbox1_y1, if (type_intersection_width == ml::train::TensorDim::DataType::FP32) { intersection_width.apply_i(nntrainer::ActiFunc::relu); } else if (type_intersection_width == ml::train::TensorDim::DataType::FP16) { -#ifdef ENABLE_FP16 + if (!is_fp16_enabled()) + throw std::runtime_error("Not supported data type: fp16"); intersection_width.apply_i<_FP16>(nntrainer::ActiFunc::relu<_FP16>); -#else - throw std::runtime_error("Not supported data type"); -#endif } intersection_y2.subtract(intersection_y1, intersection_height); @@ -191,11 +189,9 @@ calc_iou(nntrainer::Tensor &bbox1_x1, nntrainer::Tensor &bbox1_y1, if (type_intersection_height == ml::train::TensorDim::DataType::FP32) { intersection_height.apply_i(nntrainer::ActiFunc::relu); } else if (type_intersection_height == ml::train::TensorDim::DataType::FP16) { -#ifdef ENABLE_FP16 + if (!is_fp16_enabled()) + throw std::runtime_error("Not supported data type: fp16"); intersection_height.apply_i<_FP16>(nntrainer::ActiFunc::relu<_FP16>); -#else - throw std::runtime_error("Not supported data type"); -#endif } nntrainer::Tensor intersection = @@ -238,12 +234,10 @@ std::vector calc_iou_grad( intersection_width_relu_prime = intersection_width.apply(nntrainer::ActiFunc::reluPrime); } else if (type_intersection_width == ml::train::TensorDim::DataType::FP16) { -#ifdef ENABLE_FP16 + if (!is_fp16_enabled()) + throw std::runtime_error("Not supported data type: fp16"); intersection_height_relu_prime = intersection_height.apply<_FP16>(nntrainer::ActiFunc::reluPrime<_FP16>); -#else - throw std::runtime_error("Not supported data type"); -#endif } nntrainer::Tensor intersection_x2_local_grad = @@ -539,22 +533,18 @@ void YoloV2LossLayer::forwarding(nntrainer::RunLayerContext &context, if (type_bbox_w_pred == ml::train::TensorDim::DataType::FP32) { bbox_w_pred.apply_i(nntrainer::exp_util); } else if (type_bbox_w_pred == ml::train::TensorDim::DataType::FP16) { -#ifdef ENABLE_FP16 + if (!is_fp16_enabled()) + throw std::runtime_error("Not supported data type: fp16"); bbox_w_pred.apply_i<_FP16>(nntrainer::exp_util<_FP16>); -#else - throw std::runtime_error("Not supported data type"); -#endif } auto type_bbox_h_pred = bbox_h_pred.getDataType(); if (type_bbox_h_pred == ml::train::TensorDim::DataType::FP32) { bbox_h_pred.apply_i(nntrainer::exp_util); } else if (type_bbox_h_pred == ml::train::TensorDim::DataType::FP16) { -#ifdef ENABLE_FP16 + if (!is_fp16_enabled()) + throw std::runtime_error("Not supported data type: fp16"); bbox_h_pred.apply_i<_FP16>(nntrainer::exp_util<_FP16>); -#else - throw std::runtime_error("Not supported data type"); -#endif } sigmoid.run_fn(confidence_pred, confidence_pred); @@ -569,11 +559,9 @@ void YoloV2LossLayer::forwarding(nntrainer::RunLayerContext &context, if (type_bbox_w_pred_anchor == ml::train::TensorDim::DataType::FP32) { bbox_w_pred_anchor.apply_i(nntrainer::sqrtFloat); } else if (type_bbox_w_pred_anchor == ml::train::TensorDim::DataType::FP16) { -#ifdef ENABLE_FP16 + if (!is_fp16_enabled()) + throw std::runtime_error("Not supported data type: fp16"); bbox_w_pred_anchor.apply_i<_FP16>(nntrainer::sqrtFloat<_FP16>); -#else - throw std::runtime_error("Not supported data type"); -#endif } bbox_h_pred_anchor.multiply_i(anchors_h); @@ -581,11 +569,9 @@ void YoloV2LossLayer::forwarding(nntrainer::RunLayerContext &context, if (type_bbox_h_pred_anchor == ml::train::TensorDim::DataType::FP32) { bbox_h_pred_anchor.apply_i(nntrainer::sqrtFloat); } else if (type_bbox_h_pred_anchor == ml::train::TensorDim::DataType::FP16) { -#ifdef ENABLE_FP16 + if (!is_fp16_enabled()) + throw std::runtime_error("Not supported data type: fp16"); bbox_h_pred_anchor.apply_i<_FP16>(nntrainer::sqrtFloat<_FP16>); -#else - throw std::runtime_error("Not supported data type"); -#endif } generate_ground_truth(context); @@ -810,11 +796,9 @@ unsigned int YoloV2LossLayer::find_responsible_anchors(float bbox_ratio) { if (data_type == ml::train::TensorDim::DataType::FP32) { similarity.apply_i(nntrainer::absFloat); } else if (data_type == ml::train::TensorDim::DataType::FP16) { -#ifdef ENABLE_FP16 + if (!is_fp16_enabled()) + throw std::runtime_error("Not supported data type: fp16"); similarity.apply_i<_FP16>(nntrainer::absFloat<_FP16>); -#else - throw std::runtime_error("Not supported data type"); -#endif } auto data = similarity.getData(); diff --git a/api/ccapi/include/layer.h b/api/ccapi/include/layer.h index 9d1fd1f6e..77f848721 100644 --- a/api/ccapi/include/layer.h +++ b/api/ccapi/include/layer.h @@ -204,7 +204,6 @@ class Layer { virtual void getWeights(std::vector &weights, std::vector &weights_dim) = 0; -#ifdef ENABLE_FP16 /** * @brief Get weight data of the layer * @retval weight data of the layer @@ -225,7 +224,7 @@ class Layer { virtual void getFP16Weights(std::vector<_FP16 *> &weights, std::vector &weights_dim) = 0; -#endif + /** * @brief Set weight data of the layer * @note Size of vector must be the same with number of weights. diff --git a/api/ccapi/include/tensor_dim.h b/api/ccapi/include/tensor_dim.h index 64523618c..89c65387c 100644 --- a/api/ccapi/include/tensor_dim.h +++ b/api/ccapi/include/tensor_dim.h @@ -28,8 +28,23 @@ #else #define _FP16 _Float16 #endif +#else /* !ENABLE_FP16 */ +#define _FP16 void /* Keep the FP16 programming interface, but don't allow using it in run-time */ #endif +/** + * @brief Check if fp16 is enabled. Let's not use #if/#endif for FP16 elsewhere + * @todo Move to a proper header file! + */ +static bool is_fp16_enabled () +{ +#ifdef ENABLE_FP16 + return true; +#else + return false; +#endif +} + namespace ml { namespace train { diff --git a/nntrainer/layers/layer_node.h b/nntrainer/layers/layer_node.h index 74eb04ad2..6b57bf407 100644 --- a/nntrainer/layers/layer_node.h +++ b/nntrainer/layers/layer_node.h @@ -597,7 +597,6 @@ class LayerNode final : public ml::train::Layer, public GraphNode { } return; } -#ifdef ENABLE_FP16 /** * @brief Get weight data of the layer * @retval weight data of the layer @@ -606,6 +605,9 @@ class LayerNode final : public ml::train::Layer, public GraphNode { * @note layer needs to be finalized before called. */ const std::vector<_FP16 *> getFP16Weights() override { + if (!is_fp16_enabled()) + throw std::runtime_error("Not supported data type: fp16"); + NNTR_THROW_IF(!run_context, std::runtime_error) << __func__ << " layer needs to be finalized first!"; @@ -626,6 +628,9 @@ class LayerNode final : public ml::train::Layer, public GraphNode { */ void getFP16Weights(std::vector<_FP16 *> &weights, std::vector &weight_dim) override { + if (!is_fp16_enabled()) + throw std::runtime_error("Not supported data type: fp16"); + NNTR_THROW_IF(!run_context, std::runtime_error) << __func__ << " layer needs to be finalized first!"; @@ -637,7 +642,6 @@ class LayerNode final : public ml::train::Layer, public GraphNode { } return; } -#endif /** * @brief Set weight data of the layer