Skip to content

Commit

Permalink
Remove dependencies on FP16 support status.
Browse files Browse the repository at this point in the history
Most modules should NOT depend on the existnace of FP16 support.
Let a single (or minimal) entity (a single class?) know and handle it.

This is a suggestion commit that shows an example how to
refactor it. Most modules are not yet touched with this commit.

Signed-off-by: MyungJoo Ham <[email protected]>
  • Loading branch information
myungjoo committed Jan 11, 2024
1 parent faadfbc commit cf20b63
Show file tree
Hide file tree
Showing 4 changed files with 35 additions and 40 deletions.
40 changes: 8 additions & 32 deletions Applications/YOLO/jni/yolo_v2_loss.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -178,11 +178,8 @@ calc_iou(nntrainer::Tensor &bbox1_x1, nntrainer::Tensor &bbox1_y1,
if (type_intersection_width == ml::train::TensorDim::DataType::FP32) {
intersection_width.apply_i<float>(nntrainer::ActiFunc::relu<float>);
} else if (type_intersection_width == ml::train::TensorDim::DataType::FP16) {
#ifdef ENABLE_FP16
throw_unless_fp16_enabled;
intersection_width.apply_i<_FP16>(nntrainer::ActiFunc::relu<_FP16>);
#else
throw std::runtime_error("Not supported data type");
#endif
}

intersection_y2.subtract(intersection_y1, intersection_height);
Expand All @@ -191,11 +188,8 @@ calc_iou(nntrainer::Tensor &bbox1_x1, nntrainer::Tensor &bbox1_y1,
if (type_intersection_height == ml::train::TensorDim::DataType::FP32) {
intersection_height.apply_i<float>(nntrainer::ActiFunc::relu<float>);
} else if (type_intersection_height == ml::train::TensorDim::DataType::FP16) {
#ifdef ENABLE_FP16
throw_unless_fp16_enabled;
intersection_height.apply_i<_FP16>(nntrainer::ActiFunc::relu<_FP16>);
#else
throw std::runtime_error("Not supported data type");
#endif
}

nntrainer::Tensor intersection =
Expand Down Expand Up @@ -238,12 +232,9 @@ std::vector<nntrainer::Tensor> calc_iou_grad(
intersection_width_relu_prime =
intersection_width.apply<float>(nntrainer::ActiFunc::reluPrime<float>);
} else if (type_intersection_width == ml::train::TensorDim::DataType::FP16) {
#ifdef ENABLE_FP16
throw_unless_fp16_enabled;
intersection_height_relu_prime =
intersection_height.apply<_FP16>(nntrainer::ActiFunc::reluPrime<_FP16>);
#else
throw std::runtime_error("Not supported data type");
#endif
}

nntrainer::Tensor intersection_x2_local_grad =
Expand Down Expand Up @@ -539,22 +530,16 @@ void YoloV2LossLayer::forwarding(nntrainer::RunLayerContext &context,
if (type_bbox_w_pred == ml::train::TensorDim::DataType::FP32) {
bbox_w_pred.apply_i<float>(nntrainer::exp_util<float>);
} else if (type_bbox_w_pred == ml::train::TensorDim::DataType::FP16) {
#ifdef ENABLE_FP16
throw_unless_fp16_enabled;
bbox_w_pred.apply_i<_FP16>(nntrainer::exp_util<_FP16>);
#else
throw std::runtime_error("Not supported data type");
#endif
}

auto type_bbox_h_pred = bbox_h_pred.getDataType();
if (type_bbox_h_pred == ml::train::TensorDim::DataType::FP32) {
bbox_h_pred.apply_i<float>(nntrainer::exp_util<float>);
} else if (type_bbox_h_pred == ml::train::TensorDim::DataType::FP16) {
#ifdef ENABLE_FP16
throw_unless_fp16_enabled;
bbox_h_pred.apply_i<_FP16>(nntrainer::exp_util<_FP16>);
#else
throw std::runtime_error("Not supported data type");
#endif
}

sigmoid.run_fn(confidence_pred, confidence_pred);
Expand All @@ -569,23 +554,17 @@ void YoloV2LossLayer::forwarding(nntrainer::RunLayerContext &context,
if (type_bbox_w_pred_anchor == ml::train::TensorDim::DataType::FP32) {
bbox_w_pred_anchor.apply_i<float>(nntrainer::sqrtFloat<float>);
} else if (type_bbox_w_pred_anchor == ml::train::TensorDim::DataType::FP16) {
#ifdef ENABLE_FP16
throw_unless_fp16_enabled;
bbox_w_pred_anchor.apply_i<_FP16>(nntrainer::sqrtFloat<_FP16>);
#else
throw std::runtime_error("Not supported data type");
#endif
}

bbox_h_pred_anchor.multiply_i(anchors_h);
auto type_bbox_h_pred_anchor = bbox_h_pred_anchor.getDataType();
if (type_bbox_h_pred_anchor == ml::train::TensorDim::DataType::FP32) {
bbox_h_pred_anchor.apply_i<float>(nntrainer::sqrtFloat<float>);
} else if (type_bbox_h_pred_anchor == ml::train::TensorDim::DataType::FP16) {
#ifdef ENABLE_FP16
throw_unless_fp16_enabled;
bbox_h_pred_anchor.apply_i<_FP16>(nntrainer::sqrtFloat<_FP16>);
#else
throw std::runtime_error("Not supported data type");
#endif
}

generate_ground_truth(context);
Expand Down Expand Up @@ -810,11 +789,8 @@ unsigned int YoloV2LossLayer::find_responsible_anchors(float bbox_ratio) {
if (data_type == ml::train::TensorDim::DataType::FP32) {
similarity.apply_i<float>(nntrainer::absFloat<float>);
} else if (data_type == ml::train::TensorDim::DataType::FP16) {
#ifdef ENABLE_FP16
throw_unless_fp16_enabled;
similarity.apply_i<_FP16>(nntrainer::absFloat<_FP16>);
#else
throw std::runtime_error("Not supported data type");
#endif
}
auto data = similarity.getData();

Expand Down
3 changes: 1 addition & 2 deletions api/ccapi/include/layer.h
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,6 @@ class Layer {
virtual void getWeights(std::vector<float *> &weights,
std::vector<ml::train::TensorDim> &weights_dim) = 0;

#ifdef ENABLE_FP16
/**
* @brief Get weight data of the layer
* @retval weight data of the layer
Expand All @@ -225,7 +224,7 @@ class Layer {
virtual void
getFP16Weights(std::vector<_FP16 *> &weights,
std::vector<ml::train::TensorDim> &weights_dim) = 0;
#endif

/**
* @brief Set weight data of the layer
* @note Size of vector must be the same with number of weights.
Expand Down
21 changes: 21 additions & 0 deletions api/ccapi/include/tensor_dim.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,29 @@
#else
#define _FP16 _Float16
#endif
#else /* !ENABLE_FP16 */
#define _FP16 void /* Keep the FP16 programming interface, but don't allow using it in run-time */
#endif

/**
* @brief Check if fp16 is enabled. Let's not use #if/#endif for FP16 elsewhere
* @todo Move to a proper header file!
*/
static bool is_fp16_enabled ()
{
#ifdef ENABLE_FP16
return true;
#else
return false;
#endif
}

#define throw_unless_fp16_enabled \
do { \
if (!is_fp16_enabled()) \
throw std::runtime_error("The data type 'fp16' is not supported."); \
} while (0)

namespace ml {
namespace train {

Expand Down
11 changes: 5 additions & 6 deletions nntrainer/layers/layer_node.h
Original file line number Diff line number Diff line change
Expand Up @@ -559,16 +559,13 @@ class LayerNode final : public ml::train::Layer, public GraphNode {

if (getWeight(idx).getDataType() ==
ml::train::TensorDim::DataType::FP16) {
#ifdef ENABLE_FP16
throw_unless_fp16_enabled;
_FP16 *data = getWeight(idx).getData<_FP16>();
float *d = new float[getWeight(idx).size()]();
weights.emplace_back(d);
for (unsigned int i = 0; i < getWeight(idx).size(); ++i) {
weights[idx][i] = static_cast<float>(data[i]);
}
#else
throw std::runtime_error("enable-fp16 is not set");
#endif
} else {
weights.emplace_back(getWeight(idx).getData());
}
Expand Down Expand Up @@ -597,7 +594,6 @@ class LayerNode final : public ml::train::Layer, public GraphNode {
}
return;
}
#ifdef ENABLE_FP16
/**
* @brief Get weight data of the layer
* @retval weight data of the layer
Expand All @@ -606,6 +602,8 @@ class LayerNode final : public ml::train::Layer, public GraphNode {
* @note layer needs to be finalized before called.
*/
const std::vector<_FP16 *> getFP16Weights() override {
throw_unless_fp16_enabled;

NNTR_THROW_IF(!run_context, std::runtime_error)
<< __func__ << " layer needs to be finalized first!";

Expand All @@ -626,6 +624,8 @@ class LayerNode final : public ml::train::Layer, public GraphNode {
*/
void getFP16Weights(std::vector<_FP16 *> &weights,
std::vector<TensorDim> &weight_dim) override {
throw_unless_fp16_enabled;

NNTR_THROW_IF(!run_context, std::runtime_error)
<< __func__ << " layer needs to be finalized first!";

Expand All @@ -637,7 +637,6 @@ class LayerNode final : public ml::train::Layer, public GraphNode {
}
return;
}
#endif

/**
* @brief Set weight data of the layer
Expand Down

0 comments on commit cf20b63

Please sign in to comment.