diff --git a/nntrainer/layers/pooling2d_layer.cpp b/nntrainer/layers/pooling2d_layer.cpp
index a68e42e8d0..7f06a61765 100644
--- a/nntrainer/layers/pooling2d_layer.cpp
+++ b/nntrainer/layers/pooling2d_layer.cpp
@@ -6,6 +6,8 @@
  * @date   12 June 2020
  * @see    https://github.com/nnstreamer/nntrainer
  * @author Jijoong Moon <jijoong.moon@samsung.com>
+ * @author Donghak Park <donghak.park@samsung.com>
+ * @author Jiho Chu <jiho.chu@samsung.com>
  * @bug    No known bugs except for NYI items
  * @brief  This is 2 Dimensional Pooling Layer Class for Neural Network
  *
@@ -26,6 +28,13 @@ namespace nntrainer {
 
 static constexpr size_t SINGLE_INOUT_IDX = 0;
 
+/**
+ * @brief Help function for Pooling Handler
+ */
+template <typename T> struct PoolFunc {
+  typedef std::function<T(const T *, int, int, int)> Type;
+};
+
 Pooling2DLayer::Pooling2DLayer(
   const std::array<unsigned int, POOLING2D_DIM * 2> &padding_) :
   Layer(),
@@ -73,7 +82,9 @@ void Pooling2DLayer::finalize(InitLayerContext &context) {
     NNTR_THROW_IF(pt + pb + pl + pr != 0, std::invalid_argument)
       << "[Pooling2D] global_max, global_average does not accept padding";
 
-    NNTR_THROW_IF(stride[0] != 1 || stride[1] != 1, std::invalid_argument)
+    NNTR_THROW_IF(static_cast<int>(stride[0]) != 1 ||
+                    static_cast<int>(stride[1]) != 1,
+                  std::invalid_argument)
       << "[Pooling2D] global_max, global_average does not accept stride";
   }
 
@@ -96,6 +107,7 @@ void Pooling2DLayer::finalize(InitLayerContext &context) {
   out_dim.channel(in_dim.channel());
   out_dim.height((eff_in_height - pool_size[0]) / stride[0] + 1);
   out_dim.width((eff_in_width - pool_size[1]) / stride[1] + 1);
+  out_dim.setDataType(in_dim.getDataType());
   context.setOutputDimensions({out_dim});
 
   /**
@@ -111,13 +123,17 @@ void Pooling2DLayer::finalize(InitLayerContext &context) {
    * // clang-format on
    */
   if (pooling_type == props::PoolingTypeInfo::Enum::global_max) {
+    auto helper_dim = in_dim;
+    helper_dim.setDataType(ml::train::TensorDim::DataType::FP32);
     pool_helper_idx =
-      context.requestTensor(in_dim, "helper_idx", Tensor::Initializer::NONE,
+      context.requestTensor(helper_dim, "helper_idx", Tensor::Initializer::NONE,
                             false, TensorLifespan::ITERATION_LIFESPAN);
-    pool_helper_size.resize(in_dim.batch() * in_dim.channel());
+    pool_helper_size.resize(helper_dim.batch() * helper_dim.channel());
   } else {
+    auto helper_dim = out_dim;
+    helper_dim.setDataType(ml::train::TensorDim::DataType::FP32);
     pool_helper_idx =
-      context.requestTensor(out_dim, "helper_idx", Tensor::Initializer::NONE,
+      context.requestTensor(helper_dim, "helper_idx", Tensor::Initializer::NONE,
                             false, TensorLifespan::ITERATION_LIFESPAN);
   }
 }
@@ -172,15 +188,12 @@ void Pooling2DLayer::calcDerivative(RunLayerContext &context) {
   unsigned int J, K;
 
   result.setZero();
-  float *result_data = result.getData();
 
   unsigned int out_map_size = deriv.height() * deriv.width();
   unsigned int in_map_size = height * width;
-
-  switch (pooling_type) {
-  case props::PoolingTypeInfo::Enum::max: {
+  auto apply_max = [&]<typename T>(T *result_data) {
     const int *iter = pool_helper.getData<int>();
-    const float *deriv_data = deriv.getData();
+    const T *deriv_data = deriv.getData<T>();
     for (unsigned int b = 0; b < batch; ++b) {
       for (unsigned int c = 0; c < channel; ++c) {
         for (unsigned int i = 0; i < out_map_size; ++i) {
@@ -195,9 +208,9 @@ void Pooling2DLayer::calcDerivative(RunLayerContext &context) {
         result_data += in_map_size;
       }
     }
-  } break;
-  case props::PoolingTypeInfo::Enum::global_average:
-  case props::PoolingTypeInfo::Enum::average: {
+  };
+
+  auto apply_average = [&]<typename T>(T *result_data) {
     int height_stride_end = height - p_height + pt;
     int width_stride_end = width - p_width + pl;
     const int *iter = pool_helper.getData<int>();
@@ -207,7 +220,7 @@ void Pooling2DLayer::calcDerivative(RunLayerContext &context) {
         for (int j = -pt; j <= height_stride_end; j += stride[0]) {
           K = 0;
           for (int k = -pl; k <= width_stride_end; k += stride[1]) {
-            float del = deriv.getValue<float>(b, i, J, K) / *iter;
+            T del = deriv.getValue<T>(b, i, J, K) / *iter;
             int patch_height_end =
               std::min(static_cast<int>(j + p_height), height);
             int patch_width_end =
@@ -217,7 +230,7 @@ void Pooling2DLayer::calcDerivative(RunLayerContext &context) {
             for (int h = start_h; h < patch_height_end; ++h) {
               for (int w = start_w; w < patch_width_end; ++w) {
                 result.setValue(b, i, h, w,
-                                result.getValue<float>(b, i, h, w) + del);
+                                result.getValue<T>(b, i, h, w) + del);
               }
             }
             iter++;
@@ -227,26 +240,61 @@ void Pooling2DLayer::calcDerivative(RunLayerContext &context) {
         }
       }
     }
-  } break;
-  case props::PoolingTypeInfo::Enum::global_max: {
-    const float *deriv_data = deriv.getData();
+  };
+
+  auto apply_global_max = [&]<typename T>(T *result_data) {
+    const T *deriv_data = deriv.getData<T>();
     for (unsigned int b = 0; b < batch; b++) {
       for (unsigned int c = 0; c < channel; c++) {
         const int *iter =
           pool_helper.getData<int>() + pool_helper.getIndex(b, c, 0, 0);
         unsigned int helper_size = pool_helper_size[b * channel + c];
-        float der = *deriv_data / helper_size;
+        T der = *deriv_data / static_cast<T>(helper_size);
 
         for (unsigned int idx = 0; idx < helper_size; idx++)
           result_data[iter[idx]] += der;
-
         deriv_data++;
         result_data += in_map_size;
       }
     }
-  } break;
-  default:
-    throw std::runtime_error("Error: Unknown Pooling Type");
+  };
+
+  auto in_data_type = in_dim.getDataType();
+
+  if (in_data_type == ml::train::TensorDim::DataType::FP32) {
+    switch (pooling_type) {
+    case props::PoolingTypeInfo::Enum::max:
+      apply_max(result.getData<float>());
+    case props::PoolingTypeInfo::Enum::global_average:
+    case props::PoolingTypeInfo::Enum::average:
+      apply_average(result.getData<float>());
+    case props::PoolingTypeInfo::Enum::global_max:
+      apply_global_max(result.getData<float>());
+    default:
+      throw std::runtime_error("Error: Unknown Pooling Type");
+    }
+  }
+#ifdef ENABLE_FP16
+  else if (in_data_type == ml::train::TensorDim::DataType::FP16) {
+
+    switch (pooling_type) {
+    case props::PoolingTypeInfo::Enum::max:
+      apply_max(result.getData<_FP16>());
+      break;
+    case props::PoolingTypeInfo::Enum::global_average:
+    case props::PoolingTypeInfo::Enum::average:
+      apply_average(result.getData<_FP16>());
+      break;
+    case props::PoolingTypeInfo::Enum::global_max:
+      apply_global_max(result.getData<_FP16>());
+      break;
+    default:
+      throw std::runtime_error("Error: Unknown Pooling Type");
+    }
+  }
+#endif
+  else {
+    throw std::runtime_error("Unsupported datatype");
   }
 }
 
@@ -290,124 +338,167 @@ void Pooling2DLayer::pooling2d(Tensor &in, bool training, Tensor &output,
    * @param start_w (width index pointing the start of the patch)
    * @return result value of pooling
    */
-  std::function<float(const float *, int, int, int)> pool_fn;
+  PoolFunc<float>::Type pool_fn_fp32;
+#ifdef ENABLE_FP16
+  PoolFunc<_FP16>::Type pool_fn_fp16;
+#endif
 
   unsigned int max_idx_count = 0;
-  switch (pooling_type) {
-  case props::PoolingTypeInfo::Enum::max: {
-    pool_fn = [&](const float *in_data, int channel_idx, int start_h,
-                  int start_w) {
-      int end_h = start_h + patch_height;
-      int end_w = start_w + patch_width;
-
-      float max_val = std::numeric_limits<float>::lowest();
-
-      int cur_max_idx = -1;
-      int eff_end_h = std::min(end_h, in_height);
-      int eff_end_w = std::min(end_w, in_width);
-      start_w = std::max(0, start_w);
-      for (int h = std::max(0, start_h); h < eff_end_h; ++h) {
-        for (int w = start_w; w < eff_end_w; ++w) {
-          int cur_idx = h * in_width + w;
-          float val = in_data[cur_idx];
-          if (max_val < val) {
-            max_val = val;
-            if (training) {
-              cur_max_idx = cur_idx;
-            }
+
+  auto pool_fn_max = [&]<typename T>(const T *in_data, int channel_idx,
+                                     int start_h, int start_w) {
+    int end_h = start_h + patch_height;
+    int end_w = start_w + patch_width;
+
+    T max_val = std::numeric_limits<T>::lowest();
+
+    int cur_max_idx = -1;
+    int eff_end_h = std::min(end_h, in_height);
+    int eff_end_w = std::min(end_w, in_width);
+    start_w = std::max(0, start_w);
+    for (int h = std::max(0, start_h); h < eff_end_h; ++h) {
+      for (int w = start_w; w < eff_end_w; ++w) {
+        int cur_idx = h * in_width + w;
+        T val = in_data[cur_idx];
+        if (max_val < val) {
+          max_val = val;
+          if (training) {
+            cur_max_idx = cur_idx;
           }
         }
       }
+    }
 
-      if (training) {
-        pool_helper.setValueInt(max_idx_count++, cur_max_idx);
-      }
+    if (training) {
+      pool_helper.setValueInt(max_idx_count++, cur_max_idx);
+    }
 
-      return max_val;
-    };
-    break;
-  }
-  case props::PoolingTypeInfo::Enum::global_max: {
-    pool_fn = [&, this](const float *in_data, int channel_idx, int start_h,
-                        int start_w) {
-      int end_h = start_h + patch_height;
-      int end_w = start_w + patch_width;
-
-      float max_val = std::numeric_limits<float>::lowest();
-      int *helper_data = pool_helper.getData<int>();
-      helper_data += channel_idx * in_height * in_width;
-
-      for (int h = start_h; h < end_h; ++h) {
-        for (int w = start_w; w < end_w; ++w) {
-          int cur_idx = h * in_width + w;
-          float val = in_data[cur_idx];
-          if (max_val < val) {
-            max_val = val;
-            max_idx_count = 0;
-          }
+    return max_val;
+  };
 
-          if (training && max_val == val) {
-            *(helper_data + max_idx_count++) = cur_idx;
-          }
+  auto pool_fn_global_max = [&, this]<typename T>(const T *in_data,
+                                                  int channel_idx, int start_h,
+                                                  int start_w) {
+    int end_h = start_h + patch_height;
+    int end_w = start_w + patch_width;
+
+    T max_val = std::numeric_limits<T>::lowest();
+    int *helper_data = pool_helper.getData<int>();
+    helper_data += channel_idx * in_height * in_width;
+
+    for (int h = start_h; h < end_h; ++h) {
+      for (int w = start_w; w < end_w; ++w) {
+        int cur_idx = h * in_width + w;
+        T val = in_data[cur_idx];
+        if (max_val < val) {
+          max_val = val;
+          max_idx_count = 0;
         }
-      }
 
-      pool_helper_size[batch_idx * in.channel() + channel_idx] = max_idx_count;
-      return max_val;
-    };
-    break;
-  }
-  case props::PoolingTypeInfo::Enum::global_average:
-  case props::PoolingTypeInfo::Enum::average: {
-    pool_fn = [&](const float *in_data, int channel_idx, int start_h,
-                  int start_w) {
-      int end_h = start_h + patch_height;
-      int end_w = start_w + patch_width;
-      float total = 0.0f;
-
-      int eff_end_h = std::min(end_h, in_height);
-      int eff_end_w = std::min(end_w, in_width);
-      int eff_start_h = std::max(0, start_h);
-      int eff_start_w = std::max(0, start_w);
-
-      int cnt = (eff_end_h - eff_start_h) * (eff_end_w - eff_start_w);
-      for (int h = eff_start_h; h < eff_end_h; ++h) {
-        for (int w = eff_start_w; w < eff_end_w; ++w) {
-          float val = in_data[h * in_width + w];
-          total += val;
+        if (training && max_val == val) {
+          *(helper_data + max_idx_count++) = cur_idx;
         }
       }
+    }
 
-      if (training) {
-        pool_helper.setValueInt(max_idx_count++, cnt);
+    pool_helper_size[batch_idx * in.channel() + channel_idx] = max_idx_count;
+    return max_val;
+  };
+
+  auto pool_fn_average = [&]<typename T>(const T *in_data, int channel_idx,
+                                         int start_h, int start_w) {
+    int end_h = start_h + patch_height;
+    int end_w = start_w + patch_width;
+    T total = static_cast<T>(0.0f);
+
+    int eff_end_h = std::min(end_h, in_height);
+    int eff_end_w = std::min(end_w, in_width);
+    int eff_start_h = std::max(0, start_h);
+    int eff_start_w = std::max(0, start_w);
+
+    int cnt = (eff_end_h - eff_start_h) * (eff_end_w - eff_start_w);
+    for (int h = eff_start_h; h < eff_end_h; ++h) {
+      for (int w = eff_start_w; w < eff_end_w; ++w) {
+        T val = in_data[h * in_width + w];
+        total += val;
       }
-      return total / cnt;
-    };
+    }
+
+    if (training) {
+      pool_helper.setValueInt(max_idx_count++, cnt);
+    }
+    return total / cnt;
+  };
+
+  switch (pooling_type) {
+  case props::PoolingTypeInfo::Enum::max:
+    pool_fn_fp32 = pool_fn_max;
+#ifdef ENABLE_FP16
+    pool_fn_fp16 = pool_fn_max;
+#endif
+    break;
+  case props::PoolingTypeInfo::Enum::global_max:
+    pool_fn_fp32 = pool_fn_global_max;
+#ifdef ENABLE_FP16
+    pool_fn_fp16 = pool_fn_global_max;
+#endif
+    break;
+  case props::PoolingTypeInfo::Enum::global_average:
+  case props::PoolingTypeInfo::Enum::average:
+    pool_fn_fp32 = pool_fn_average;
+#ifdef ENABLE_FP16
+    pool_fn_fp16 = pool_fn_average;
+#endif
     break;
-  }
   case props::PoolingTypeInfo::Enum::unknown:
   default:
     throw std::invalid_argument("unknown pooling type given");
     break;
   }
 
-  const float *in_data = in.getData();
-  float *out_data = output.getData();
-
-  unsigned int map_size = in_height * in_width;
-
-  int height_stride_end = height - patch_height - pt;
-  int width_stride_end = width - patch_width - pl;
-  for (unsigned int i = 0; i < channel; ++i) {
-    const float *in_data_channel_sliced = in_data + i * map_size;
-    for (int j = -pt; j <= height_stride_end; j += stride[0]) {
-      for (int k = -pl; k <= width_stride_end; k += stride[1]) {
-        float pool_value = pool_fn(in_data_channel_sliced, i, j, k);
-        *out_data = pool_value;
-        out_data++;
+  if (in.getDataType() == ml::train::TensorDim::DataType::FP32) {
+    const float *in_data = in.getData<float>();
+    float *out_data = output.getData<float>();
+
+    unsigned int map_size = in_height * in_width;
+
+    int height_stride_end = height - patch_height - pt;
+    int width_stride_end = width - patch_width - pl;
+    for (unsigned int i = 0; i < channel; ++i) {
+      const float *in_data_channel_sliced = in_data + i * map_size;
+      for (int j = -pt; j <= height_stride_end; j += stride[0]) {
+        for (int k = -pl; k <= width_stride_end; k += stride[1]) {
+          float pool_value = pool_fn_fp32(in_data_channel_sliced, i, j, k);
+          *out_data = pool_value;
+          out_data++;
+        }
+      }
+    }
+  }
+#ifdef ENABLE_FP16
+  else if (in.getDataType() == ml::train::TensorDim::DataType::FP16) {
+    const _FP16 *in_data = in.getData<_FP16>();
+    _FP16 *out_data = output.getData<_FP16>();
+
+    unsigned int map_size = in_height * in_width;
+
+    int height_stride_end = height - patch_height - pt;
+    int width_stride_end = width - patch_width - pl;
+    for (unsigned int i = 0; i < channel; ++i) {
+      const _FP16 *in_data_channel_sliced = in_data + i * map_size;
+      for (int j = -pt; j <= height_stride_end; j += stride[0]) {
+        for (int k = -pl; k <= width_stride_end; k += stride[1]) {
+          _FP16 pool_value = pool_fn_fp16(in_data_channel_sliced, i, j, k);
+          *out_data = pool_value;
+          out_data++;
+        }
       }
     }
   }
+#endif
+  else {
+    throw std::runtime_error("Not supported datatype");
+  }
 }
 
 void Pooling2DLayer::setBatch(RunLayerContext &context, unsigned int batch) {
diff --git a/nntrainer/models/neuralnet.cpp b/nntrainer/models/neuralnet.cpp
index ceaa5b6778..de3a0953a4 100644
--- a/nntrainer/models/neuralnet.cpp
+++ b/nntrainer/models/neuralnet.cpp
@@ -1031,7 +1031,7 @@ int NeuralNetwork::train_run(
         break;
       }
       auto &iteration = iter_view.get();
-      if (iteration.batch() != batch_size) {
+      if (iteration.batch() != static_cast<unsigned int>(batch_size)) {
         /// @todo support partial batch
         continue;
       }