Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[XPU] fix serveral xpu ut #70973

Open
wants to merge 3 commits into
base: develop
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
48 changes: 48 additions & 0 deletions paddle/phi/backends/xpu/xpu3_op_list.cc
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,38 @@ XPUOpMap& get_kl3_ops() {
phi::DataType::INT64,
phi::DataType::FLOAT16,
phi::DataType::FLOAT32})},
{"array_pop",
XPUKernelSet({phi::DataType::FLOAT32,
phi::DataType::FLOAT64,
phi::DataType::INT32,
phi::DataType::FLOAT16,
phi::DataType::BFLOAT16,
phi::DataType::INT64,
phi::DataType::BOOL})},
{"array_read",
XPUKernelSet({phi::DataType::FLOAT32,
phi::DataType::FLOAT64,
phi::DataType::INT32,
phi::DataType::FLOAT16,
phi::DataType::BFLOAT16,
phi::DataType::INT64,
phi::DataType::BOOL})},
{"array_to_tensor",
XPUKernelSet({phi::DataType::FLOAT32,
phi::DataType::FLOAT64,
phi::DataType::INT32,
phi::DataType::FLOAT16,
phi::DataType::BFLOAT16,
phi::DataType::INT64,
phi::DataType::BOOL})},
{"array_write",
XPUKernelSet({phi::DataType::FLOAT32,
phi::DataType::FLOAT64,
phi::DataType::INT32,
phi::DataType::FLOAT16,
phi::DataType::BFLOAT16,
phi::DataType::INT64,
phi::DataType::BOOL})},
{"assign",
XPUKernelSet({phi::DataType::FLOAT32,
phi::DataType::FLOAT64,
Expand Down Expand Up @@ -282,6 +314,22 @@ XPUOpMap& get_kl3_ops() {
XPUKernelSet({phi::DataType::FLOAT32, phi::DataType::FLOAT16})},
{"conv2d_transpose_xpu",
XPUKernelSet({phi::DataType::FLOAT32, phi::DataType::FLOAT16})},
{"create_array",
XPUKernelSet({phi::DataType::FLOAT32,
phi::DataType::FLOAT64,
phi::DataType::INT32,
phi::DataType::FLOAT16,
phi::DataType::BFLOAT16,
phi::DataType::INT64,
phi::DataType::BOOL})},
{"create_array_like",
XPUKernelSet({phi::DataType::FLOAT32,
phi::DataType::FLOAT64,
phi::DataType::INT32,
phi::DataType::FLOAT16,
phi::DataType::BFLOAT16,
phi::DataType::INT64,
phi::DataType::BOOL})},
{"cumsum",
XPUKernelSet({phi::DataType::FLOAT32,
phi::DataType::FLOAT16,
Expand Down
84 changes: 84 additions & 0 deletions paddle/phi/kernels/array_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -186,6 +186,20 @@ PD_REGISTER_KERNEL(create_array,
phi::dtype::complex<double>) {}
#endif

#if defined(PADDLE_WITH_XPU)
PD_REGISTER_KERNEL(create_array,
XPU,
ALL_LAYOUT,
phi::CreateArrayKernel,
bool,
int,
int64_t,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {}
#endif

PD_REGISTER_KERNEL(create_array_like,
CPU,
ALL_LAYOUT,
Expand Down Expand Up @@ -216,6 +230,20 @@ PD_REGISTER_KERNEL(create_array_like,
phi::dtype::complex<double>) {}
#endif

#if defined(PADDLE_WITH_XPU)
PD_REGISTER_KERNEL(create_array_like,
XPU,
ALL_LAYOUT,
phi::CreateArrayLikeKernel,
bool,
int,
int64_t,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {}
#endif

PD_REGISTER_KERNEL(array_length,
CPU,
ALL_LAYOUT,
Expand Down Expand Up @@ -260,6 +288,20 @@ PD_REGISTER_KERNEL(array_read,
phi::dtype::complex<double>) {}
#endif

#if defined(PADDLE_WITH_XPU)
PD_REGISTER_KERNEL(array_read,
XPU,
ALL_LAYOUT,
phi::ArrayReadKernel,
bool,
int,
int64_t,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {}
#endif

PD_REGISTER_KERNEL(array_write,
CPU,
ALL_LAYOUT,
Expand Down Expand Up @@ -290,6 +332,20 @@ PD_REGISTER_KERNEL(array_write,
phi::dtype::complex<double>) {}
#endif

#if defined(PADDLE_WITH_XPU)
PD_REGISTER_KERNEL(array_write,
XPU,
ALL_LAYOUT,
phi::ArrayWriteKernel,
bool,
int,
int64_t,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {}
#endif

PD_REGISTER_KERNEL(array_to_tensor,
CPU,
ALL_LAYOUT,
Expand Down Expand Up @@ -320,6 +376,20 @@ PD_REGISTER_KERNEL(array_to_tensor,
phi::dtype::complex<double>) {}
#endif

#if defined(PADDLE_WITH_XPU)
PD_REGISTER_KERNEL(array_to_tensor,
XPU,
ALL_LAYOUT,
phi::ArrayToTensorKernel,
bool,
int,
int64_t,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {}
#endif

PD_REGISTER_KERNEL(array_pop,
CPU,
ALL_LAYOUT,
Expand Down Expand Up @@ -349,3 +419,17 @@ PD_REGISTER_KERNEL(array_pop,
phi::dtype::complex<float>,
phi::dtype::complex<double>) {}
#endif

#if defined(PADDLE_WITH_XPU)
PD_REGISTER_KERNEL(array_pop,
XPU,
ALL_LAYOUT,
phi::ArrayPopKernel,
bool,
int,
int64_t,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {}
#endif
10 changes: 5 additions & 5 deletions test/dygraph_to_static/test_list.py
Original file line number Diff line number Diff line change
Expand Up @@ -208,11 +208,11 @@ def test_list_pop_in_while_loop(x, iter_num):

class TestListWithoutControlFlowConfig(Dy2StTestBase):
def setUp(self):
self.place = (
base.CUDAPlace(0)
if base.is_compiled_with_cuda()
else base.CPUPlace()
)
self.place = base.CPUPlace()
if base.is_compiled_with_cuda():
self.place = base.CUDAPlace(0)
if base.is_compiled_with_xpu():
self.place = base.XPUPlace(0)

self.init_data()
self.init_dygraph_func()
Expand Down
2 changes: 2 additions & 0 deletions test/legacy_test/test_index_sample_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,7 @@ def config(self):
self.index_type = "int64"


@unittest.skipIf(core.is_compiled_with_xpu(), "complex is not supported on XPU")
class TestIndexSampleComplex64(TestIndexSampleOp):
def config(self):
"""
Expand All @@ -140,6 +141,7 @@ def config(self):
self.index_type = "int64"


@unittest.skipIf(core.is_compiled_with_xpu(), "complex is not supported on XPU")
class TestIndexSampleComplex128(TestIndexSampleOp):
def config(self):
"""
Expand Down
5 changes: 4 additions & 1 deletion test/legacy_test/test_manual_seed.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,10 @@ def test_seed(self):
x2_np = x2.numpy()
x3_np = x3.numpy()

if not base.core.is_compiled_with_cuda():
if (
not base.core.is_compiled_with_cuda()
and not base.core.is_compiled_with_xpu()
):
np.testing.assert_allclose(x1_np, x2_np, rtol=1e-05)
np.testing.assert_allclose(x_np, x3_np, rtol=1e-05)

Expand Down
5 changes: 4 additions & 1 deletion test/legacy_test/test_scatter_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -903,7 +903,10 @@ def executed_api(self):
self.scatter = paddle.scatter_


@unittest.skipIf(core.is_compiled_with_cuda(), "CUDA will not throw exception")
@unittest.skipIf(
core.is_compiled_with_cuda() or core.is_compiled_with_xpu(),
"CUDA and XPU will not throw exception",
)
class TestScatterError(unittest.TestCase):
def test_scatter_index(self):
paddle.disable_static()
Expand Down
6 changes: 0 additions & 6 deletions tools/xpu/disable_ut_xpu_kl3.local
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,6 @@ test_imperative_se_resnext
test_imperative_transformer_sorted_gradient
test_imperative_triple_grad
test_index_put_op
test_index_sample_op
test_index_select_strided
test_inference_api_deprecated
test_inplace
Expand All @@ -155,9 +154,7 @@ test_layer_astype
test_linalg_cond
test_linalg_matrix_exp
test_linalg_pinv_op
test_list
test_lstm
test_manual_seed
test_masked_select_op
test_math_op_patch_var_base
test_matmul_weight_trans_pass
Expand All @@ -168,7 +165,6 @@ test_mnist_amp
test_modelaverage
test_normal
test_normal_inplace
test_optimizer_for_varbase
test_ormqr
test_pad3d_op
test_parallel_dygraph_dataparallel
Expand All @@ -190,12 +186,10 @@ test_quant_amp_deprecated
test_quantile_and_nanquantile
test_reduce_op
test_reduce_op_static_build
test_reshape_op
test_resnet_amp
test_rmsprop_op
test_rnn_cells
test_run
test_scatter_op
test_setitem
test_sink_decomp
test_slice
Expand Down
Loading