Skip to content

Commit

Permalink
Remove dependencies on FP16 support status.
Browse files Browse the repository at this point in the history
Most modules should NOT depend on the existnace of FP16 support.
Let a single (or minimal) entity (a single class?) know and handle it.

This is a suggestion commit that shows an example how to
refactor it. Most modules are not yet touched with this commit.

Signed-off-by: MyungJoo Ham <[email protected]>
  • Loading branch information
myungjoo committed Jan 10, 2024
1 parent faadfbc commit 0711bc2
Show file tree
Hide file tree
Showing 4 changed files with 38 additions and 36 deletions.
48 changes: 16 additions & 32 deletions Applications/YOLO/jni/yolo_v2_loss.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -178,11 +178,9 @@ calc_iou(nntrainer::Tensor &bbox1_x1, nntrainer::Tensor &bbox1_y1,
if (type_intersection_width == ml::train::TensorDim::DataType::FP32) {
intersection_width.apply_i<float>(nntrainer::ActiFunc::relu<float>);
} else if (type_intersection_width == ml::train::TensorDim::DataType::FP16) {
#ifdef ENABLE_FP16
if (!is_fp16_enabled())
throw std::runtime_error("Not supported data type: fp16");
intersection_width.apply_i<_FP16>(nntrainer::ActiFunc::relu<_FP16>);
#else
throw std::runtime_error("Not supported data type");
#endif
}

intersection_y2.subtract(intersection_y1, intersection_height);
Expand All @@ -191,11 +189,9 @@ calc_iou(nntrainer::Tensor &bbox1_x1, nntrainer::Tensor &bbox1_y1,
if (type_intersection_height == ml::train::TensorDim::DataType::FP32) {
intersection_height.apply_i<float>(nntrainer::ActiFunc::relu<float>);
} else if (type_intersection_height == ml::train::TensorDim::DataType::FP16) {
#ifdef ENABLE_FP16
if (!is_fp16_enabled())
throw std::runtime_error("Not supported data type: fp16");
intersection_height.apply_i<_FP16>(nntrainer::ActiFunc::relu<_FP16>);
#else
throw std::runtime_error("Not supported data type");
#endif
}

nntrainer::Tensor intersection =
Expand Down Expand Up @@ -238,12 +234,10 @@ std::vector<nntrainer::Tensor> calc_iou_grad(
intersection_width_relu_prime =
intersection_width.apply<float>(nntrainer::ActiFunc::reluPrime<float>);
} else if (type_intersection_width == ml::train::TensorDim::DataType::FP16) {
#ifdef ENABLE_FP16
if (!is_fp16_enabled())
throw std::runtime_error("Not supported data type: fp16");
intersection_height_relu_prime =
intersection_height.apply<_FP16>(nntrainer::ActiFunc::reluPrime<_FP16>);
#else
throw std::runtime_error("Not supported data type");
#endif
}

nntrainer::Tensor intersection_x2_local_grad =
Expand Down Expand Up @@ -539,22 +533,18 @@ void YoloV2LossLayer::forwarding(nntrainer::RunLayerContext &context,
if (type_bbox_w_pred == ml::train::TensorDim::DataType::FP32) {
bbox_w_pred.apply_i<float>(nntrainer::exp_util<float>);
} else if (type_bbox_w_pred == ml::train::TensorDim::DataType::FP16) {
#ifdef ENABLE_FP16
if (!is_fp16_enabled())
throw std::runtime_error("Not supported data type: fp16");
bbox_w_pred.apply_i<_FP16>(nntrainer::exp_util<_FP16>);
#else
throw std::runtime_error("Not supported data type");
#endif
}

auto type_bbox_h_pred = bbox_h_pred.getDataType();
if (type_bbox_h_pred == ml::train::TensorDim::DataType::FP32) {
bbox_h_pred.apply_i<float>(nntrainer::exp_util<float>);
} else if (type_bbox_h_pred == ml::train::TensorDim::DataType::FP16) {
#ifdef ENABLE_FP16
if (!is_fp16_enabled())
throw std::runtime_error("Not supported data type: fp16");
bbox_h_pred.apply_i<_FP16>(nntrainer::exp_util<_FP16>);
#else
throw std::runtime_error("Not supported data type");
#endif
}

sigmoid.run_fn(confidence_pred, confidence_pred);
Expand All @@ -569,23 +559,19 @@ void YoloV2LossLayer::forwarding(nntrainer::RunLayerContext &context,
if (type_bbox_w_pred_anchor == ml::train::TensorDim::DataType::FP32) {
bbox_w_pred_anchor.apply_i<float>(nntrainer::sqrtFloat<float>);
} else if (type_bbox_w_pred_anchor == ml::train::TensorDim::DataType::FP16) {
#ifdef ENABLE_FP16
if (!is_fp16_enabled())
throw std::runtime_error("Not supported data type: fp16");
bbox_w_pred_anchor.apply_i<_FP16>(nntrainer::sqrtFloat<_FP16>);
#else
throw std::runtime_error("Not supported data type");
#endif
}

bbox_h_pred_anchor.multiply_i(anchors_h);
auto type_bbox_h_pred_anchor = bbox_h_pred_anchor.getDataType();
if (type_bbox_h_pred_anchor == ml::train::TensorDim::DataType::FP32) {
bbox_h_pred_anchor.apply_i<float>(nntrainer::sqrtFloat<float>);
} else if (type_bbox_h_pred_anchor == ml::train::TensorDim::DataType::FP16) {
#ifdef ENABLE_FP16
if (!is_fp16_enabled())
throw std::runtime_error("Not supported data type: fp16");
bbox_h_pred_anchor.apply_i<_FP16>(nntrainer::sqrtFloat<_FP16>);
#else
throw std::runtime_error("Not supported data type");
#endif
}

generate_ground_truth(context);
Expand Down Expand Up @@ -810,11 +796,9 @@ unsigned int YoloV2LossLayer::find_responsible_anchors(float bbox_ratio) {
if (data_type == ml::train::TensorDim::DataType::FP32) {
similarity.apply_i<float>(nntrainer::absFloat<float>);
} else if (data_type == ml::train::TensorDim::DataType::FP16) {
#ifdef ENABLE_FP16
if (!is_fp16_enabled())
throw std::runtime_error("Not supported data type: fp16");
similarity.apply_i<_FP16>(nntrainer::absFloat<_FP16>);
#else
throw std::runtime_error("Not supported data type");
#endif
}
auto data = similarity.getData();

Expand Down
3 changes: 1 addition & 2 deletions api/ccapi/include/layer.h
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,6 @@ class Layer {
virtual void getWeights(std::vector<float *> &weights,
std::vector<ml::train::TensorDim> &weights_dim) = 0;

#ifdef ENABLE_FP16
/**
* @brief Get weight data of the layer
* @retval weight data of the layer
Expand All @@ -225,7 +224,7 @@ class Layer {
virtual void
getFP16Weights(std::vector<_FP16 *> &weights,
std::vector<ml::train::TensorDim> &weights_dim) = 0;
#endif

/**
* @brief Set weight data of the layer
* @note Size of vector must be the same with number of weights.
Expand Down
15 changes: 15 additions & 0 deletions api/ccapi/include/tensor_dim.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,23 @@
#else
#define _FP16 _Float16
#endif
#else /* !ENABLE_FP16 */
#define _FP16 void /* Keep the FP16 programming interface, but don't allow using it in run-time */
#endif

/**
* @brief Check if fp16 is enabled. Let's not use #if/#endif for FP16 elsewhere
* @todo Move to a proper header file!
*/
static bool is_fp16_enabled ()
{
#ifdef ENABLE_FP16
return true;
#else
return false;
#endif
}

namespace ml {
namespace train {

Expand Down
8 changes: 6 additions & 2 deletions nntrainer/layers/layer_node.h
Original file line number Diff line number Diff line change
Expand Up @@ -597,7 +597,6 @@ class LayerNode final : public ml::train::Layer, public GraphNode {
}
return;
}
#ifdef ENABLE_FP16
/**
* @brief Get weight data of the layer
* @retval weight data of the layer
Expand All @@ -606,6 +605,9 @@ class LayerNode final : public ml::train::Layer, public GraphNode {
* @note layer needs to be finalized before called.
*/
const std::vector<_FP16 *> getFP16Weights() override {
if (!is_fp16_enabled())
throw std::runtime_error("Not supported data type: fp16");

NNTR_THROW_IF(!run_context, std::runtime_error)
<< __func__ << " layer needs to be finalized first!";

Expand All @@ -626,6 +628,9 @@ class LayerNode final : public ml::train::Layer, public GraphNode {
*/
void getFP16Weights(std::vector<_FP16 *> &weights,
std::vector<TensorDim> &weight_dim) override {
if (!is_fp16_enabled())
throw std::runtime_error("Not supported data type: fp16");

NNTR_THROW_IF(!run_context, std::runtime_error)
<< __func__ << " layer needs to be finalized first!";

Expand All @@ -637,7 +642,6 @@ class LayerNode final : public ml::train::Layer, public GraphNode {
}
return;
}
#endif

/**
* @brief Set weight data of the layer
Expand Down

0 comments on commit 0711bc2

Please sign in to comment.