Skip to content

Commit

Permalink
Enable more portable and optimized kernel tests (pytorch#3900)
Browse files Browse the repository at this point in the history
Summary: Pull Request resolved: pytorch#3900

Test Plan: CI

Reviewed By: shoumikhin

Differential Revision: D58307445

Pulled By: kirklandsign

fbshipit-source-id: 10b98f21ee1253040377bc739e2debea7bf4a3af
  • Loading branch information
kirklandsign authored and facebook-github-bot committed Jun 7, 2024
1 parent 84a11ca commit 6cac9c1
Show file tree
Hide file tree
Showing 14 changed files with 437 additions and 401 deletions.
38 changes: 38 additions & 0 deletions kernels/portable/test/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

# This file should be formatted with
# ~~~
# cmake-format -i CMakeLists.txt
# ~~~
# It should also be cmake-lint clean.
#
# This file builds portable-specific tests

cmake_minimum_required(VERSION 3.19)
project(kernels_portable_test)

# Use C++17 for test.
set(CMAKE_CXX_STANDARD 17)

set(EXECUTORCH_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/../../..)

include(${EXECUTORCH_ROOT}/build/Test.cmake)

set(_kernels_portable_test_sources
# So far we can't generate custom_ops.yaml in OSS so we can't build op
# library with op_allclose. We disable the test for now.
# "op_allclose_test.cpp"
"op_div_test.cpp" "op_gelu_test.cpp" "op_mul_test.cpp"
)

et_cxx_test(
kernels_portable_test SOURCES ${_kernels_portable_test_sources} EXTRA_LIBS
portable_kernels portable_ops_lib
)
target_include_directories(
kernels_portable_test PRIVATE "${CMAKE_INSTALL_PREFIX}/include"
)
42 changes: 19 additions & 23 deletions kernels/test/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -217,28 +217,6 @@ set(_portable_kernels_test_sources
${CMAKE_CURRENT_BINARY_DIR}/include/portable/executorch/kernels/test/supported_features.cpp
)

list(
REMOVE_ITEM
_portable_kernels_test_sources
"op_amax_test.cpp"
"op_as_strided_copy_test.cpp"
"op_amin_test.cpp"
"op_bitwise_not_test.cpp"
"op_detach_copy_test.cpp"
"op_div_test.cpp"
"op_full_like_test.cpp"
"op_logit_test.cpp"
"op_max_test.cpp"
"op_mean_test.cpp"
"op_min_test.cpp"
"op_slice_copy_test.cpp"
)

set(_optimized_kernels_test_sources
op_neg_test.cpp
${CMAKE_CURRENT_BINARY_DIR}/include/portable/executorch/kernels/test/supported_features.cpp
)

et_cxx_test(
portable_kernels_test SOURCES ${_portable_kernels_test_sources} EXTRA_LIBS
portable_kernels portable_ops_lib
Expand All @@ -249,9 +227,27 @@ target_include_directories(
"${CMAKE_INSTALL_PREFIX}/include"
)

set(_optimized_kernels_test_sources
"op_add_test.cpp"
"op_bmm_test.cpp"
"op_div_test.cpp"
"op_exp_test.cpp"
"op_gelu_test.cpp"
"op_le_test.cpp"
"op_log_softmax_test.cpp"
"op_mul_test.cpp"
"op_native_layer_norm_test.cpp"
"op_neg_test.cpp"
"op_sub_test.cpp"
${CMAKE_CURRENT_BINARY_DIR}/include/portable/executorch/kernels/test/supported_features.cpp
)

# We don't have sleef on OSS so we don't have gelu and log_softmax
list(REMOVE_ITEM _optimized_kernels_test_sources "op_gelu_test.cpp" "op_log_softmax_test.cpp")

et_cxx_test(
optimized_kernels_test SOURCES ${_optimized_kernels_test_sources} EXTRA_LIBS
optimized_kernels optimized_ops_lib
optimized_kernels optimized_ops_lib portable_kernels eigen_blas
)
add_dependencies(optimized_kernels_test generate_wrapper)
target_include_directories(
Expand Down
68 changes: 34 additions & 34 deletions kernels/test/op_amax_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -189,44 +189,44 @@ class OpAmaxOutTest : public OperatorTest {
op_amax_out(in, empty_dim_list, /*keepdim=*/false, out);
EXPECT_TENSOR_CLOSE(out, tf.make({}, {9}));
}
};

template <>
void test_amax_out_dtype<ScalarType::Bool>() {
TensorFactory<ScalarType::Bool> tf_bool;
// clang-format off
Tensor in = tf_bool.make(
{2, 3, 4},
{
true, false, true, false,
false, false, false, false,
false, true, true, false,
template <>
void OpAmaxOutTest::test_amax_out_dtype<ScalarType::Bool>() {
TensorFactory<ScalarType::Bool> tf_bool;
// clang-format off
Tensor in = tf_bool.make(
{2, 3, 4},
{
true, false, true, false,
false, false, false, false,
false, true, true, false,

false, false, true, false,
false, false, false, true,
true, true, true, true,
});
// clang-format on
false, false, true, false,
false, false, false, true,
true, true, true, true,
});
// clang-format on

Tensor out = tf_bool.zeros({2, 3, 1});
Tensor out = tf_bool.zeros({2, 3, 1});

// +/-inf and nan should work
op_amax_out(in, /*dim=*/-1, /*keepdim=*/true, out);
// clang-format off
EXPECT_TENSOR_CLOSE(
out, tf_bool.make(
{2, 3, 1},
{
true,
false,
true,

true,
true,
true
}));
// clang-format on
}
};
// +/-inf and nan should work
op_amax_out(in, /*dim=*/-1, /*keepdim=*/true, out);
// clang-format off
EXPECT_TENSOR_CLOSE(
out, tf_bool.make(
{2, 3, 1},
{
true,
false,
true,

true,
true,
true
}));
// clang-format on
}

TEST_F(OpAmaxOutTest, InvalidDimensionListDies) {
if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
Expand Down
68 changes: 34 additions & 34 deletions kernels/test/op_amin_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -189,44 +189,44 @@ class OpAminOutTest : public OperatorTest {
op_amin_out(in, empty_dim_list, /*keepdim=*/false, out);
EXPECT_TENSOR_CLOSE(out, tf.make({}, {2}));
}
};

template <>
void test_amin_out_dtype<ScalarType::Bool>() {
TensorFactory<ScalarType::Bool> tf_bool;
// clang-format off
Tensor in = tf_bool.make(
{2, 3, 4},
{
true, false, true, false,
false, false, false, false,
false, true, true, false,
template <>
void OpAminOutTest::test_amin_out_dtype<ScalarType::Bool>() {
TensorFactory<ScalarType::Bool> tf_bool;
// clang-format off
Tensor in = tf_bool.make(
{2, 3, 4},
{
true, false, true, false,
false, false, false, false,
false, true, true, false,

false, false, true, false,
false, false, false, true,
true, true, true, true,
});
// clang-format on
false, false, true, false,
false, false, false, true,
true, true, true, true,
});
// clang-format on

Tensor out = tf_bool.zeros({2, 3, 1});
Tensor out = tf_bool.zeros({2, 3, 1});

// +/-inf and nan should work
op_amin_out(in, /*dim=*/-1, /*keepdim=*/true, out);
// clang-format off
EXPECT_TENSOR_CLOSE(
out, tf_bool.make(
{2, 3, 1},
{
false,
false,
false,

false,
false,
true
}));
// clang-format on
}
};
// +/-inf and nan should work
op_amin_out(in, /*dim=*/-1, /*keepdim=*/true, out);
// clang-format off
EXPECT_TENSOR_CLOSE(
out, tf_bool.make(
{2, 3, 1},
{
false,
false,
false,

false,
false,
true
}));
// clang-format on
}

TEST_F(OpAminOutTest, InvalidDimensionListDies) {
if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
Expand Down
104 changes: 51 additions & 53 deletions kernels/test/op_as_strided_copy_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -69,59 +69,6 @@ class OpAsStridedCopyOutTest : public OperatorTest {
EXPECT_TENSOR_EQ(out, tf.make(out_sizes, {3, 6, 5, 8, 4, 7, 6, 9}));
}

template <>
void test_detach_copy_out<ScalarType::Bool>() {
TensorFactory<ScalarType::Bool> tf;
const std::vector<int32_t> in_sizes = {3, 3};
const std::vector<int32_t> out_sizes = {2, 2, 2};
Tensor in = tf.make(
in_sizes, {false, true, false, true, false, true, false, true, false});
Tensor out = tf.zeros(out_sizes);

// Valid input should give the expected output
optional<int64_t> storage_offset = 2;
int64_t sizes[3] = {2, 2, 2};
int64_t stride[3] = {1, 2, 3};
op_as_strided_copy_out(
/*self=*/in,
/*size=*/ArrayRef<int64_t>{sizes, 3},
/*stride=*/ArrayRef<int64_t>{stride, 3},
storage_offset,
out);
EXPECT_TENSOR_EQ(
out,
tf.make(
out_sizes, {false, true, false, true, true, false, true, false}));
}

template <>
void test_detach_copy_out<ScalarType::Float>() {
TensorFactory<ScalarType::Float> tf;
const std::vector<int32_t> in_sizes = {3, 3};
const std::vector<int32_t> out_sizes = {2, 2, 2};

Tensor in = tf.make(
in_sizes,
{3.14, 2.33, 42, INFINITY, -INFINITY, NAN, -3.14, -2.33, -42});
Tensor out = tf.zeros(out_sizes);

// Valid input should give the expected output
optional<int64_t> storage_offset = 2;
int64_t sizes[3] = {2, 2, 2};
int64_t stride[3] = {1, 2, 3};
op_as_strided_copy_out(
/*self=*/in,
/*size=*/ArrayRef<int64_t>{sizes, 3},
/*stride=*/ArrayRef<int64_t>{stride, 3},
storage_offset,
out);
EXPECT_TENSOR_CLOSE(
out,
tf.make(
out_sizes,
{42.0, NAN, -INFINITY, 2.33, INFINITY, -3.14, NAN, -42.0}));
}

template <ScalarType DTYPE>
void test_as_strided_copy_out_invalid_parameters() {
TensorFactory<DTYPE> tf;
Expand Down Expand Up @@ -192,6 +139,57 @@ class OpAsStridedCopyOutTest : public OperatorTest {
}
};

template <>
void OpAsStridedCopyOutTest::test_detach_copy_out<ScalarType::Bool>() {
TensorFactory<ScalarType::Bool> tf;
const std::vector<int32_t> in_sizes = {3, 3};
const std::vector<int32_t> out_sizes = {2, 2, 2};
Tensor in = tf.make(
in_sizes, {false, true, false, true, false, true, false, true, false});
Tensor out = tf.zeros(out_sizes);

// Valid input should give the expected output
optional<int64_t> storage_offset = 2;
int64_t sizes[3] = {2, 2, 2};
int64_t stride[3] = {1, 2, 3};
op_as_strided_copy_out(
/*self=*/in,
/*size=*/ArrayRef<int64_t>{sizes, 3},
/*stride=*/ArrayRef<int64_t>{stride, 3},
storage_offset,
out);
EXPECT_TENSOR_EQ(
out,
tf.make(out_sizes, {false, true, false, true, true, false, true, false}));
}

template <>
void OpAsStridedCopyOutTest::test_detach_copy_out<ScalarType::Float>() {
TensorFactory<ScalarType::Float> tf;
const std::vector<int32_t> in_sizes = {3, 3};
const std::vector<int32_t> out_sizes = {2, 2, 2};

Tensor in = tf.make(
in_sizes, {3.14, 2.33, 42, INFINITY, -INFINITY, NAN, -3.14, -2.33, -42});
Tensor out = tf.zeros(out_sizes);

// Valid input should give the expected output
optional<int64_t> storage_offset = 2;
int64_t sizes[3] = {2, 2, 2};
int64_t stride[3] = {1, 2, 3};
op_as_strided_copy_out(
/*self=*/in,
/*size=*/ArrayRef<int64_t>{sizes, 3},
/*stride=*/ArrayRef<int64_t>{stride, 3},
storage_offset,
out);
EXPECT_TENSOR_CLOSE(
out,
tf.make(
out_sizes,
{42.0, NAN, -INFINITY, 2.33, INFINITY, -3.14, NAN, -42.0}));
}

TEST_F(OpAsStridedCopyOutTest, AllScalarInputOutputSupport) {
#define TEST_ENTRY(ctype, dtype) test_detach_copy_out<ScalarType::dtype>();
ET_FORALL_INT_TYPES(TEST_ENTRY);
Expand Down
Loading

0 comments on commit 6cac9c1

Please sign in to comment.