From 3614247a285e10f54062ce9e8a6fa279bd684f8e Mon Sep 17 00:00:00 2001 From: Dmovic <944388576@qq.com> Date: Thu, 23 May 2024 05:53:09 +0000 Subject: [PATCH 01/22] add adaptive avg pool input check --- python/oneflow/nn/modules/pooling.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/python/oneflow/nn/modules/pooling.py b/python/oneflow/nn/modules/pooling.py index d764e69e581..c0d0f7dd2b1 100644 --- a/python/oneflow/nn/modules/pooling.py +++ b/python/oneflow/nn/modules/pooling.py @@ -674,6 +674,10 @@ def __init__(self, output_size: _size_1_t) -> None: super().__init__() assert output_size is not None, "'output_size' cannot be NoneType" self.output_size = _single(output_size) + assert len(self.output_size) == 1, "'output_size' should contain one int" + assert ( + self.output_size[0] >= 0 + ), f"elements of output_size must be greater than or equal to 0, but got {self.output_size}" def forward(self, x): assert ( @@ -741,6 +745,10 @@ def __init__(self, output_size, data_format=None) -> None: super().__init__() assert output_size is not None, "'output_size' cannot be NoneType" self.output_size = _pair(output_size) + assert len(self.output_size) == 2, "'output_size' must be 2" + assert ( + self.output_size[0] >= 0 and self.output_size[1] >= 0 + ), f"elements of output_size must be greater than or equal to 0, but got {self.output_size}" if data_format: if not data_format in ["channels_first", "channels_last"]: raise ValueError( @@ -824,6 +832,10 @@ def __init__(self, output_size) -> None: super().__init__() assert output_size is not None, "'output_size' cannot be NoneType" self.output_size = _triple(output_size) + assert len(self.output_size) == 3, "'output_size' must be 3" + assert ( + self.output_size[0] >= 0 and self.output_size[1] >= 0 and self.output_size[2] >= 0 + ), f"elements of output_size must be greater than or equal to 0, but got {self.output_size}" def forward(self, x): assert ( From 148146fee64059f09e3d35fac79bd051bd40fa80 Mon Sep 17 00:00:00 2001 From: Dmovic <944388576@qq.com> Date: Thu, 23 May 2024 09:08:47 +0000 Subject: [PATCH 02/22] add max unpool input check --- python/oneflow/nn/modules/pooling.py | 86 ++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) diff --git a/python/oneflow/nn/modules/pooling.py b/python/oneflow/nn/modules/pooling.py index c0d0f7dd2b1..17702b7117e 100644 --- a/python/oneflow/nn/modules/pooling.py +++ b/python/oneflow/nn/modules/pooling.py @@ -1037,6 +1037,35 @@ def forward(self, input): ) +def _unpool_output_size_check( + input, kernel_size: List[int], stride: List[int], padding: List[int], output_size: Optional[List[int]] +) -> List[int]: + input_size = input.size() + default_size = [] + for d in range(len(kernel_size)): + default_size.append((input_size[-len(kernel_size) + d] - 1) * stride[d] + kernel_size[d] - 2 * padding[d]) + if output_size is None: + ret = default_size + else: + if len(output_size) == len(kernel_size) + 2: + output_size = output_size[2:] + if len(output_size) != len(kernel_size): + raise ValueError( + "output_size should be a sequence containing " + f"{len(kernel_size)} or {len(kernel_size) + 2} elements, but it has a length of '{len(output_size)}'" + ) + for d in range(len(kernel_size)): + min_size = default_size[d] - stride[d] + max_size = default_size[d] + stride[d] + if not (min_size < output_size[d] < max_size): + raise ValueError( + f'invalid output_size "{output_size}" (dim {d} must be between {min_size} and {max_size})' + ) + + ret = output_size + return ret + + class MaxUnpool1d(Module): r"""Computes a partial inverse of :class:`MaxPool1d`. @@ -1112,6 +1141,25 @@ def __init__( self.padding = padding def forward(self, x, indices, output_size=None): + kernel_size = _single(self.kernel_size) + if self.stride is not None: + _stride = _single(self.stride) + else: + _stride = kernel_size + padding = _single(self.padding) + check_output_size = _unpool_output_size_check(x, kernel_size, _stride, padding, output_size) + assert ( + len(check_output_size) == 1 + ), f"There should be exactly one element in output_size, but got {len(check_output_size)}" + assert ( + indices.dtype == flow.int64 + ), f"elements in indices should be type int64 but got: {indices.dtype}" + assert ( + len(x.size()) == 2 or len(x.size()) == 3 + ), f"Input to max_unpooling1d should be a 2d or 3d Tensor, but got {len(x.size())} dimensions" + assert ( + x.size() == indices.size() + ), f"Expected shape of indices to be same as that of the input tensor" return flow._C.max_unpool1d( x, indices, self.kernel_size, self.stride, self.padding, output_size ) @@ -1200,6 +1248,25 @@ def __init__( self.padding = padding def forward(self, x, indices, output_size=None): + kernel_size = _pair(self.kernel_size) + if self.stride is not None: + _stride = _pair(self.stride) + else: + _stride = kernel_size + padding = _pair(self.padding) + check_output_size = _unpool_output_size_check(x, kernel_size, _stride, padding, output_size) + assert ( + len(check_output_size) == 2 + ), f"There should be exactly two elements in output_size, but got {len(check_output_size)}" + assert ( + indices.dtype == flow.int64 + ), f"elements in indices should be type int64 but got: {indices.dtype}" + assert ( + len(x.size()) == 3 or len(x.size()) == 4 + ), f"Input to max_unpooling1d should be a 3d or 4d Tensor, but got {len(x.size())} dimensions" + assert ( + x.size() == indices.size() + ), f"Expected shape of indices to be same as that of the input tensor" return flow._C.max_unpool2d( x, indices, self.kernel_size, self.stride, self.padding, output_size ) @@ -1278,6 +1345,25 @@ def __init__( self.padding = padding def forward(self, x, indices, output_size=None): + kernel_size = _triple(self.kernel_size) + if self.stride is not None: + _stride = _triple(self.stride) + else: + _stride = kernel_size + padding = _triple(self.padding) + check_output_size = _unpool_output_size_check(x, kernel_size, _stride, padding, output_size) + assert ( + len(check_output_size) == 3 + ), f"There should be exactly three elements in output_size, but got {len(check_output_size)}" + assert ( + indices.dtype == flow.int64 + ), f"elements in indices should be type int64 but got: {indices.dtype}" + assert ( + len(x.size()) == 4 or len(x.size()) == 5 + ), f"Input to max_unpooling1d should be a 4d or 5d Tensor, but got {len(x.size())} dimensions" + assert ( + x.size() == indices.size() + ), f"Expected shape of indices to be same as that of the input tensor" return flow._C.max_unpool3d( x, indices, self.kernel_size, self.stride, self.padding, output_size ) From e8784e367c7762c64ab7feeaad3bfd5bebfe3913 Mon Sep 17 00:00:00 2001 From: Dmovic <944388576@qq.com> Date: Thu, 23 May 2024 09:47:50 +0000 Subject: [PATCH 03/22] add adaptive max pool input check --- python/oneflow/nn/modules/pooling.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/python/oneflow/nn/modules/pooling.py b/python/oneflow/nn/modules/pooling.py index 17702b7117e..e7cf88dec3a 100644 --- a/python/oneflow/nn/modules/pooling.py +++ b/python/oneflow/nn/modules/pooling.py @@ -904,6 +904,9 @@ def forward(self, input): assert ( len(input.shape) == 3 and len(self.output_size) == 1 ), "the length of 'output_size' does not match the input size, 1 expected" + assert ( + self.output_size[0] >= 0 + ), f"elements of output_size must be greater than or equal to 0, but got {self.output_size}" new_output_size = _generate_output_size(input.shape, self.output_size) return flow.nn.functional.adaptive_max_pool1d( input, self.output_size, self.return_indices @@ -976,6 +979,10 @@ def forward(self, input): assert ( len(input.shape) == 4 ), f"expected 4-dimensional tensor, but got {len(input.shape)}-dimensional tensor" + assert len(self.output_size) == 2, "'output_size' must be 2" + assert ( + self.output_size[0] >= 0 and self.output_size[1] + ), f"elements of output_size must be greater than or equal to 0, but got {self.output_size}" new_output_size = _generate_output_size(input.shape, self.output_size) return flow.nn.functional.adaptive_max_pool2d( input, self.output_size, self.return_indices, self.channel_pos @@ -1031,6 +1038,10 @@ def forward(self, input): assert ( len(input.shape) == 5 ), f"expected 5-dimensional tensor, but got {len(input.shape)}-dimensional tensor" + assert len(self.output_size) == 3, "'output_size' must be 3" + assert ( + self.output_size[0] >= 0 and self.output_size[1] >= 0 and self.output_size[2] >= 0 + ), f"elements of output_size must be greater than or equal to 0, but got {self.output_size}" new_output_size = _generate_output_size(input.shape, self.output_size) return flow.nn.functional.adaptive_max_pool3d( input, self.output_size, self.return_indices From 64deba5659e567081054ef94c5ca8f0dc71a7c20 Mon Sep 17 00:00:00 2001 From: Dmovic <944388576@qq.com> Date: Thu, 23 May 2024 09:50:15 +0000 Subject: [PATCH 04/22] fix max pool2d --- python/oneflow/nn/modules/pooling.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/oneflow/nn/modules/pooling.py b/python/oneflow/nn/modules/pooling.py index e7cf88dec3a..5066e5a9752 100644 --- a/python/oneflow/nn/modules/pooling.py +++ b/python/oneflow/nn/modules/pooling.py @@ -981,7 +981,7 @@ def forward(self, input): ), f"expected 4-dimensional tensor, but got {len(input.shape)}-dimensional tensor" assert len(self.output_size) == 2, "'output_size' must be 2" assert ( - self.output_size[0] >= 0 and self.output_size[1] + self.output_size[0] >= 0 and self.output_size[1] >= 0 ), f"elements of output_size must be greater than or equal to 0, but got {self.output_size}" new_output_size = _generate_output_size(input.shape, self.output_size) return flow.nn.functional.adaptive_max_pool2d( From 2a24cd782d061016cec2f3457575454e3f93a620 Mon Sep 17 00:00:00 2001 From: oneflow-ci-bot Date: Fri, 24 May 2024 02:44:09 +0000 Subject: [PATCH 05/22] auto format by CI --- python/oneflow/nn/modules/pooling.py | 32 ++++++++++++++++++++++------ 1 file changed, 25 insertions(+), 7 deletions(-) diff --git a/python/oneflow/nn/modules/pooling.py b/python/oneflow/nn/modules/pooling.py index 5066e5a9752..4a0fc547b3b 100644 --- a/python/oneflow/nn/modules/pooling.py +++ b/python/oneflow/nn/modules/pooling.py @@ -834,7 +834,9 @@ def __init__(self, output_size) -> None: self.output_size = _triple(output_size) assert len(self.output_size) == 3, "'output_size' must be 3" assert ( - self.output_size[0] >= 0 and self.output_size[1] >= 0 and self.output_size[2] >= 0 + self.output_size[0] >= 0 + and self.output_size[1] >= 0 + and self.output_size[2] >= 0 ), f"elements of output_size must be greater than or equal to 0, but got {self.output_size}" def forward(self, x): @@ -1040,7 +1042,9 @@ def forward(self, input): ), f"expected 5-dimensional tensor, but got {len(input.shape)}-dimensional tensor" assert len(self.output_size) == 3, "'output_size' must be 3" assert ( - self.output_size[0] >= 0 and self.output_size[1] >= 0 and self.output_size[2] >= 0 + self.output_size[0] >= 0 + and self.output_size[1] >= 0 + and self.output_size[2] >= 0 ), f"elements of output_size must be greater than or equal to 0, but got {self.output_size}" new_output_size = _generate_output_size(input.shape, self.output_size) return flow.nn.functional.adaptive_max_pool3d( @@ -1049,12 +1053,20 @@ def forward(self, input): def _unpool_output_size_check( - input, kernel_size: List[int], stride: List[int], padding: List[int], output_size: Optional[List[int]] + input, + kernel_size: List[int], + stride: List[int], + padding: List[int], + output_size: Optional[List[int]], ) -> List[int]: input_size = input.size() default_size = [] for d in range(len(kernel_size)): - default_size.append((input_size[-len(kernel_size) + d] - 1) * stride[d] + kernel_size[d] - 2 * padding[d]) + default_size.append( + (input_size[-len(kernel_size) + d] - 1) * stride[d] + + kernel_size[d] + - 2 * padding[d] + ) if output_size is None: ret = default_size else: @@ -1158,7 +1170,9 @@ def forward(self, x, indices, output_size=None): else: _stride = kernel_size padding = _single(self.padding) - check_output_size = _unpool_output_size_check(x, kernel_size, _stride, padding, output_size) + check_output_size = _unpool_output_size_check( + x, kernel_size, _stride, padding, output_size + ) assert ( len(check_output_size) == 1 ), f"There should be exactly one element in output_size, but got {len(check_output_size)}" @@ -1265,7 +1279,9 @@ def forward(self, x, indices, output_size=None): else: _stride = kernel_size padding = _pair(self.padding) - check_output_size = _unpool_output_size_check(x, kernel_size, _stride, padding, output_size) + check_output_size = _unpool_output_size_check( + x, kernel_size, _stride, padding, output_size + ) assert ( len(check_output_size) == 2 ), f"There should be exactly two elements in output_size, but got {len(check_output_size)}" @@ -1362,7 +1378,9 @@ def forward(self, x, indices, output_size=None): else: _stride = kernel_size padding = _triple(self.padding) - check_output_size = _unpool_output_size_check(x, kernel_size, _stride, padding, output_size) + check_output_size = _unpool_output_size_check( + x, kernel_size, _stride, padding, output_size + ) assert ( len(check_output_size) == 3 ), f"There should be exactly three elements in output_size, but got {len(check_output_size)}" From ef5005c625311972c97a7f626b057e69a22cee0c Mon Sep 17 00:00:00 2001 From: Dmovic <944388576@qq.com> Date: Mon, 27 May 2024 11:11:59 +0000 Subject: [PATCH 06/22] add Tensor shape check --- oneflow/api/python/functional/tensor_api.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/oneflow/api/python/functional/tensor_api.cpp b/oneflow/api/python/functional/tensor_api.cpp index 32e12312cd9..dd996d3bdb1 100644 --- a/oneflow/api/python/functional/tensor_api.cpp +++ b/oneflow/api/python/functional/tensor_api.cpp @@ -203,6 +203,9 @@ class TensorWithShapeGenericCtorFunctor { Maybe operator()(const Shape& shape, const Symbol& dtype, const Optional>& device) const { // NOTE(chengcheng): flow.Tensor or flow.tensor ONLY created by EagerTensor now. + for (const auto& s : shape) { + CHECK_OR_THROW(s > 0) << "Trying to create tensor with negative dimension " << s << ": " << shape; + } LazyMode::Guard lazy_mode_disabled_guard(/*is_enabled*/ false); Symbol device_; if (device) { From 33766ca5881822bfc789f865e0a57184ab5984a0 Mon Sep 17 00:00:00 2001 From: oneflow-ci-bot Date: Mon, 27 May 2024 11:13:34 +0000 Subject: [PATCH 07/22] auto format by CI --- oneflow/api/python/functional/tensor_api.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/oneflow/api/python/functional/tensor_api.cpp b/oneflow/api/python/functional/tensor_api.cpp index dd996d3bdb1..902fa064fcd 100644 --- a/oneflow/api/python/functional/tensor_api.cpp +++ b/oneflow/api/python/functional/tensor_api.cpp @@ -204,7 +204,8 @@ class TensorWithShapeGenericCtorFunctor { const Optional>& device) const { // NOTE(chengcheng): flow.Tensor or flow.tensor ONLY created by EagerTensor now. for (const auto& s : shape) { - CHECK_OR_THROW(s > 0) << "Trying to create tensor with negative dimension " << s << ": " << shape; + CHECK_OR_THROW(s > 0) << "Trying to create tensor with negative dimension " << s << ": " + << shape; } LazyMode::Guard lazy_mode_disabled_guard(/*is_enabled*/ false); Symbol device_; From a1daa406eb09e0b1eee0de2c63922c4305cf2809 Mon Sep 17 00:00:00 2001 From: Dmovic <944388576@qq.com> Date: Tue, 28 May 2024 07:39:38 +0000 Subject: [PATCH 08/22] add rand shape check --- oneflow/core/functional/impl/random_functor.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/oneflow/core/functional/impl/random_functor.cpp b/oneflow/core/functional/impl/random_functor.cpp index 5a97e29ab1d..0b042b9d5cb 100644 --- a/oneflow/core/functional/impl/random_functor.cpp +++ b/oneflow/core/functional/impl/random_functor.cpp @@ -203,6 +203,11 @@ class RandFunctor { } } + for (const auto& s : shape) { + CHECK_OR_THROW(s > 0) << "Trying to create tensor with negative dimension " << s << ": " + << shape; + } + auto gen = generator.value_or(JUST(one::DefaultAutoGenerator())); gen = JUST(GetGeneratorForLazyOrGlobal(gen, LazyMode::is_enabled(), NullOpt, NullOpt)); From 2481e92cf55ededf4dc6b3f18ecef50d4f68e7c0 Mon Sep 17 00:00:00 2001 From: Dmovic <944388576@qq.com> Date: Tue, 28 May 2024 07:52:04 +0000 Subject: [PATCH 09/22] fix non negative --- oneflow/api/python/functional/tensor_api.cpp | 2 +- oneflow/core/functional/impl/random_functor.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/oneflow/api/python/functional/tensor_api.cpp b/oneflow/api/python/functional/tensor_api.cpp index 902fa064fcd..b7e2fc1b0f4 100644 --- a/oneflow/api/python/functional/tensor_api.cpp +++ b/oneflow/api/python/functional/tensor_api.cpp @@ -204,7 +204,7 @@ class TensorWithShapeGenericCtorFunctor { const Optional>& device) const { // NOTE(chengcheng): flow.Tensor or flow.tensor ONLY created by EagerTensor now. for (const auto& s : shape) { - CHECK_OR_THROW(s > 0) << "Trying to create tensor with negative dimension " << s << ": " + CHECK_OR_THROW(s >= 0) << "Trying to create tensor with negative dimension " << s << ": " << shape; } LazyMode::Guard lazy_mode_disabled_guard(/*is_enabled*/ false); diff --git a/oneflow/core/functional/impl/random_functor.cpp b/oneflow/core/functional/impl/random_functor.cpp index 0b042b9d5cb..19aa56b6728 100644 --- a/oneflow/core/functional/impl/random_functor.cpp +++ b/oneflow/core/functional/impl/random_functor.cpp @@ -204,7 +204,7 @@ class RandFunctor { } for (const auto& s : shape) { - CHECK_OR_THROW(s > 0) << "Trying to create tensor with negative dimension " << s << ": " + CHECK_OR_THROW(s >= 0) << "Trying to create tensor with negative dimension " << s << ": " << shape; } From 65dceb2ec1d587ca69659a519509df0504cd67f3 Mon Sep 17 00:00:00 2001 From: Dmovic <944388576@qq.com> Date: Tue, 28 May 2024 08:11:27 +0000 Subject: [PATCH 10/22] add zeros ones shape check --- python/oneflow/nn/modules/constant.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/python/oneflow/nn/modules/constant.py b/python/oneflow/nn/modules/constant.py index 9a8ca57da1a..cd135894cbd 100644 --- a/python/oneflow/nn/modules/constant.py +++ b/python/oneflow/nn/modules/constant.py @@ -44,6 +44,9 @@ def __init__( self.device = flow.device(self.device) self.requires_grad = requires_grad size = _single(size) + assert ( + all(s >= 0 for s in size) + ), f"Trying to create tensor with negative dimension: {size}" if dtype is None: dtype = flow.get_default_dtype() if placement is None: From ad0986a345267291426c79660f2ffc32c77caa35 Mon Sep 17 00:00:00 2001 From: oneflow-ci-bot Date: Tue, 28 May 2024 08:16:24 +0000 Subject: [PATCH 11/22] auto format by CI --- oneflow/api/python/functional/tensor_api.cpp | 2 +- oneflow/core/functional/impl/random_functor.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/oneflow/api/python/functional/tensor_api.cpp b/oneflow/api/python/functional/tensor_api.cpp index b7e2fc1b0f4..2e056d09295 100644 --- a/oneflow/api/python/functional/tensor_api.cpp +++ b/oneflow/api/python/functional/tensor_api.cpp @@ -205,7 +205,7 @@ class TensorWithShapeGenericCtorFunctor { // NOTE(chengcheng): flow.Tensor or flow.tensor ONLY created by EagerTensor now. for (const auto& s : shape) { CHECK_OR_THROW(s >= 0) << "Trying to create tensor with negative dimension " << s << ": " - << shape; + << shape; } LazyMode::Guard lazy_mode_disabled_guard(/*is_enabled*/ false); Symbol device_; diff --git a/oneflow/core/functional/impl/random_functor.cpp b/oneflow/core/functional/impl/random_functor.cpp index 19aa56b6728..26df526e929 100644 --- a/oneflow/core/functional/impl/random_functor.cpp +++ b/oneflow/core/functional/impl/random_functor.cpp @@ -205,7 +205,7 @@ class RandFunctor { for (const auto& s : shape) { CHECK_OR_THROW(s >= 0) << "Trying to create tensor with negative dimension " << s << ": " - << shape; + << shape; } auto gen = generator.value_or(JUST(one::DefaultAutoGenerator())); From 170a197bab0eb6e8400d58f4f76b422ed48ad766 Mon Sep 17 00:00:00 2001 From: oneflow-ci-bot Date: Tue, 28 May 2024 08:32:40 +0000 Subject: [PATCH 12/22] auto format by CI --- python/oneflow/nn/modules/constant.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/oneflow/nn/modules/constant.py b/python/oneflow/nn/modules/constant.py index cd135894cbd..c2ac7762344 100644 --- a/python/oneflow/nn/modules/constant.py +++ b/python/oneflow/nn/modules/constant.py @@ -44,8 +44,8 @@ def __init__( self.device = flow.device(self.device) self.requires_grad = requires_grad size = _single(size) - assert ( - all(s >= 0 for s in size) + assert all( + s >= 0 for s in size ), f"Trying to create tensor with negative dimension: {size}" if dtype is None: dtype = flow.get_default_dtype() From 656879eb810a46cd806d36584411fc5f2cd20949 Mon Sep 17 00:00:00 2001 From: Dmovic <944388576@qq.com> Date: Wed, 29 May 2024 08:04:17 +0000 Subject: [PATCH 13/22] add check non negative function --- oneflow/api/python/functional/tensor_api.cpp | 5 +---- oneflow/core/functional/impl/common.cpp | 9 +++++++++ oneflow/core/functional/impl/common.h | 1 + oneflow/core/functional/impl/random_functor.cpp | 6 +----- 4 files changed, 12 insertions(+), 9 deletions(-) diff --git a/oneflow/api/python/functional/tensor_api.cpp b/oneflow/api/python/functional/tensor_api.cpp index 2e056d09295..86b9466e689 100644 --- a/oneflow/api/python/functional/tensor_api.cpp +++ b/oneflow/api/python/functional/tensor_api.cpp @@ -203,10 +203,7 @@ class TensorWithShapeGenericCtorFunctor { Maybe operator()(const Shape& shape, const Symbol& dtype, const Optional>& device) const { // NOTE(chengcheng): flow.Tensor or flow.tensor ONLY created by EagerTensor now. - for (const auto& s : shape) { - CHECK_OR_THROW(s >= 0) << "Trying to create tensor with negative dimension " << s << ": " - << shape; - } + JUST(CheckSizeNonNegative(shape)); LazyMode::Guard lazy_mode_disabled_guard(/*is_enabled*/ false); Symbol device_; if (device) { diff --git a/oneflow/core/functional/impl/common.cpp b/oneflow/core/functional/impl/common.cpp index af27a73c0ed..24b123aee87 100644 --- a/oneflow/core/functional/impl/common.cpp +++ b/oneflow/core/functional/impl/common.cpp @@ -204,6 +204,15 @@ Maybe CheckInplaceShapeCanExpandTo(const Shape& shape, const Shape& expand return Maybe::Ok(); } +Maybe CheckSizeNonNegative(const Shape& shape) { + for (const auto& s : shape) { + CHECK_OR_THROW(s >= 0) + << "Trying to create tensor with negative dimension " << s << ": " + << shape; + } + return Maybe::Ok(); +} + Optional ComputeStride(const Shape& shape, const Stride& stride, const Shape& target_shape) { /************************************************* diff --git a/oneflow/core/functional/impl/common.h b/oneflow/core/functional/impl/common.h index cb97f4f616b..334edad1418 100644 --- a/oneflow/core/functional/impl/common.h +++ b/oneflow/core/functional/impl/common.h @@ -39,6 +39,7 @@ Maybe CheckInplaceValid(const std::shared_ptr& x); Maybe CheckInplaceCastValid(const std::shared_ptr& x, const std::shared_ptr& x_cast); Maybe CheckInplaceShapeCanExpandTo(const Shape& shape, const Shape& expand_shape); +Maybe CheckSizeNonNegative(const Shape& shape); Optional ComputeStride(const Shape& shape, const Stride& stride, const Shape& target_shape); Maybe InferShapeUnspecifiedDim(const int64_t& elem_count, const Shape& shape); diff --git a/oneflow/core/functional/impl/random_functor.cpp b/oneflow/core/functional/impl/random_functor.cpp index 26df526e929..03565d51434 100644 --- a/oneflow/core/functional/impl/random_functor.cpp +++ b/oneflow/core/functional/impl/random_functor.cpp @@ -202,11 +202,7 @@ class RandFunctor { OF_UNIMPLEMENTED() << "Only support floating dtype in rand()."; } } - - for (const auto& s : shape) { - CHECK_OR_THROW(s >= 0) << "Trying to create tensor with negative dimension " << s << ": " - << shape; - } + JUST(CheckSizeNonNegative(shape)); auto gen = generator.value_or(JUST(one::DefaultAutoGenerator())); gen = JUST(GetGeneratorForLazyOrGlobal(gen, LazyMode::is_enabled(), NullOpt, NullOpt)); From b0881a82396350c5fb8bdc96e87fd655654b6da7 Mon Sep 17 00:00:00 2001 From: Dmovic <944388576@qq.com> Date: Wed, 29 May 2024 08:51:56 +0000 Subject: [PATCH 14/22] update check non negative --- oneflow/core/functional/impl/common.cpp | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/oneflow/core/functional/impl/common.cpp b/oneflow/core/functional/impl/common.cpp index 24b123aee87..a175873c06a 100644 --- a/oneflow/core/functional/impl/common.cpp +++ b/oneflow/core/functional/impl/common.cpp @@ -206,9 +206,8 @@ Maybe CheckInplaceShapeCanExpandTo(const Shape& shape, const Shape& expand Maybe CheckSizeNonNegative(const Shape& shape) { for (const auto& s : shape) { - CHECK_OR_THROW(s >= 0) - << "Trying to create tensor with negative dimension " << s << ": " - << shape; + CHECK_OR_RETURN(s >= 0) << "Trying to create tensor with negative dimension " << s << ": " + << shape; } return Maybe::Ok(); } From 0bc50c9dd89630300fb82751072abdcb13e82164 Mon Sep 17 00:00:00 2001 From: Dmovic <944388576@qq.com> Date: Wed, 29 May 2024 09:01:03 +0000 Subject: [PATCH 15/22] update check negative --- oneflow/core/functional/impl/common.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/oneflow/core/functional/impl/common.cpp b/oneflow/core/functional/impl/common.cpp index a175873c06a..4c261b37f01 100644 --- a/oneflow/core/functional/impl/common.cpp +++ b/oneflow/core/functional/impl/common.cpp @@ -204,7 +204,7 @@ Maybe CheckInplaceShapeCanExpandTo(const Shape& shape, const Shape& expand return Maybe::Ok(); } -Maybe CheckSizeNonNegative(const Shape& shape) { +inline Maybe CheckSizeNonNegative(const Shape& shape) { for (const auto& s : shape) { CHECK_OR_RETURN(s >= 0) << "Trying to create tensor with negative dimension " << s << ": " << shape; From 4548aaccd14f2e01d08d6da4af75396ce96ee054 Mon Sep 17 00:00:00 2001 From: Dmovic <944388576@qq.com> Date: Wed, 29 May 2024 09:24:51 +0000 Subject: [PATCH 16/22] inline check non negative --- oneflow/core/functional/impl/common.cpp | 8 -------- oneflow/core/functional/impl/common.h | 10 +++++++++- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/oneflow/core/functional/impl/common.cpp b/oneflow/core/functional/impl/common.cpp index 4c261b37f01..af27a73c0ed 100644 --- a/oneflow/core/functional/impl/common.cpp +++ b/oneflow/core/functional/impl/common.cpp @@ -204,14 +204,6 @@ Maybe CheckInplaceShapeCanExpandTo(const Shape& shape, const Shape& expand return Maybe::Ok(); } -inline Maybe CheckSizeNonNegative(const Shape& shape) { - for (const auto& s : shape) { - CHECK_OR_RETURN(s >= 0) << "Trying to create tensor with negative dimension " << s << ": " - << shape; - } - return Maybe::Ok(); -} - Optional ComputeStride(const Shape& shape, const Stride& stride, const Shape& target_shape) { /************************************************* diff --git a/oneflow/core/functional/impl/common.h b/oneflow/core/functional/impl/common.h index 334edad1418..9a099afc799 100644 --- a/oneflow/core/functional/impl/common.h +++ b/oneflow/core/functional/impl/common.h @@ -39,7 +39,15 @@ Maybe CheckInplaceValid(const std::shared_ptr& x); Maybe CheckInplaceCastValid(const std::shared_ptr& x, const std::shared_ptr& x_cast); Maybe CheckInplaceShapeCanExpandTo(const Shape& shape, const Shape& expand_shape); -Maybe CheckSizeNonNegative(const Shape& shape); + +inline Maybe CheckSizeNonNegative(const Shape& shape) { + for (const auto& s : shape) { + CHECK_OR_RETURN(s >= 0) << "Trying to create tensor with negative dimension " << s << ": " + << shape; + } + return Maybe::Ok(); +} + Optional ComputeStride(const Shape& shape, const Stride& stride, const Shape& target_shape); Maybe InferShapeUnspecifiedDim(const int64_t& elem_count, const Shape& shape); From 314a3ccb0bed678b77dacf1da45db7aa104087ea Mon Sep 17 00:00:00 2001 From: Dmovic <944388576@qq.com> Date: Wed, 29 May 2024 09:43:01 +0000 Subject: [PATCH 17/22] add empty shape check --- python/oneflow/nn/modules/empty.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/python/oneflow/nn/modules/empty.py b/python/oneflow/nn/modules/empty.py index 7050a1c4eb9..0e25547dafa 100644 --- a/python/oneflow/nn/modules/empty.py +++ b/python/oneflow/nn/modules/empty.py @@ -36,6 +36,10 @@ def empty_op( shape = _single(_handle_size_arg(size)) + assert all( + s >= 0 for s in shape + ), f"Trying to create tensor with negative dimension: {shape}" + if dtype is None: dtype = flow.get_default_dtype() if placement is not None: From df929ec337bfc81ebfbedb17d27fd2ad2bcce524 Mon Sep 17 00:00:00 2001 From: Dmovic <944388576@qq.com> Date: Thu, 30 May 2024 02:57:24 +0000 Subject: [PATCH 18/22] update adaptive none --- python/oneflow/nn/modules/pooling.py | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/python/oneflow/nn/modules/pooling.py b/python/oneflow/nn/modules/pooling.py index 4a0fc547b3b..81942b386c0 100644 --- a/python/oneflow/nn/modules/pooling.py +++ b/python/oneflow/nn/modules/pooling.py @@ -676,7 +676,7 @@ def __init__(self, output_size: _size_1_t) -> None: self.output_size = _single(output_size) assert len(self.output_size) == 1, "'output_size' should contain one int" assert ( - self.output_size[0] >= 0 + self.output_size[0] is None or self.output_size[0] >= 0 ), f"elements of output_size must be greater than or equal to 0, but got {self.output_size}" def forward(self, x): @@ -747,7 +747,8 @@ def __init__(self, output_size, data_format=None) -> None: self.output_size = _pair(output_size) assert len(self.output_size) == 2, "'output_size' must be 2" assert ( - self.output_size[0] >= 0 and self.output_size[1] >= 0 + (self.output_size[0] is None or self.output_size[0] >= 0) + and (self.output_size[1] is None or self.output_size[1] >= 0) ), f"elements of output_size must be greater than or equal to 0, but got {self.output_size}" if data_format: if not data_format in ["channels_first", "channels_last"]: @@ -834,9 +835,9 @@ def __init__(self, output_size) -> None: self.output_size = _triple(output_size) assert len(self.output_size) == 3, "'output_size' must be 3" assert ( - self.output_size[0] >= 0 - and self.output_size[1] >= 0 - and self.output_size[2] >= 0 + (self.output_size[0] is None or self.output_size[0] >= 0) + and (self.output_size[1] is None or self.output_size[1] >= 0) + and (self.output_size[2] is None or self.output_size[2] >= 0) ), f"elements of output_size must be greater than or equal to 0, but got {self.output_size}" def forward(self, x): @@ -907,7 +908,7 @@ def forward(self, input): len(input.shape) == 3 and len(self.output_size) == 1 ), "the length of 'output_size' does not match the input size, 1 expected" assert ( - self.output_size[0] >= 0 + self.output_size[0] is None or self.output_size[0] >= 0 ), f"elements of output_size must be greater than or equal to 0, but got {self.output_size}" new_output_size = _generate_output_size(input.shape, self.output_size) return flow.nn.functional.adaptive_max_pool1d( @@ -983,7 +984,8 @@ def forward(self, input): ), f"expected 4-dimensional tensor, but got {len(input.shape)}-dimensional tensor" assert len(self.output_size) == 2, "'output_size' must be 2" assert ( - self.output_size[0] >= 0 and self.output_size[1] >= 0 + (self.output_size[0] is None or self.output_size[0] >= 0) + and (self.output_size[1] is None or self.output_size[1] >= 0) ), f"elements of output_size must be greater than or equal to 0, but got {self.output_size}" new_output_size = _generate_output_size(input.shape, self.output_size) return flow.nn.functional.adaptive_max_pool2d( @@ -1042,9 +1044,9 @@ def forward(self, input): ), f"expected 5-dimensional tensor, but got {len(input.shape)}-dimensional tensor" assert len(self.output_size) == 3, "'output_size' must be 3" assert ( - self.output_size[0] >= 0 - and self.output_size[1] >= 0 - and self.output_size[2] >= 0 + (self.output_size[0] is None or self.output_size[0] >= 0) + and (self.output_size[1] is None or self.output_size[1] >= 0) + and (self.output_size[2] is None or self.output_size[2] >= 0) ), f"elements of output_size must be greater than or equal to 0, but got {self.output_size}" new_output_size = _generate_output_size(input.shape, self.output_size) return flow.nn.functional.adaptive_max_pool3d( From 3438b2f954341e68f4e0a716e01cf297ba958829 Mon Sep 17 00:00:00 2001 From: oneflow-ci-bot Date: Thu, 30 May 2024 03:00:32 +0000 Subject: [PATCH 19/22] auto format by CI --- python/oneflow/nn/modules/pooling.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/python/oneflow/nn/modules/pooling.py b/python/oneflow/nn/modules/pooling.py index 81942b386c0..992caef81ce 100644 --- a/python/oneflow/nn/modules/pooling.py +++ b/python/oneflow/nn/modules/pooling.py @@ -746,9 +746,8 @@ def __init__(self, output_size, data_format=None) -> None: assert output_size is not None, "'output_size' cannot be NoneType" self.output_size = _pair(output_size) assert len(self.output_size) == 2, "'output_size' must be 2" - assert ( - (self.output_size[0] is None or self.output_size[0] >= 0) - and (self.output_size[1] is None or self.output_size[1] >= 0) + assert (self.output_size[0] is None or self.output_size[0] >= 0) and ( + self.output_size[1] is None or self.output_size[1] >= 0 ), f"elements of output_size must be greater than or equal to 0, but got {self.output_size}" if data_format: if not data_format in ["channels_first", "channels_last"]: @@ -983,9 +982,8 @@ def forward(self, input): len(input.shape) == 4 ), f"expected 4-dimensional tensor, but got {len(input.shape)}-dimensional tensor" assert len(self.output_size) == 2, "'output_size' must be 2" - assert ( - (self.output_size[0] is None or self.output_size[0] >= 0) - and (self.output_size[1] is None or self.output_size[1] >= 0) + assert (self.output_size[0] is None or self.output_size[0] >= 0) and ( + self.output_size[1] is None or self.output_size[1] >= 0 ), f"elements of output_size must be greater than or equal to 0, but got {self.output_size}" new_output_size = _generate_output_size(input.shape, self.output_size) return flow.nn.functional.adaptive_max_pool2d( From 4ce4bab3653028cffdf524faead7fa100a30a30b Mon Sep 17 00:00:00 2001 From: Dmovic <944388576@qq.com> Date: Thu, 30 May 2024 06:21:25 +0000 Subject: [PATCH 20/22] add randn shape check --- oneflow/core/functional/impl/random_functor.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/oneflow/core/functional/impl/random_functor.cpp b/oneflow/core/functional/impl/random_functor.cpp index 03565d51434..6f2170f9e27 100644 --- a/oneflow/core/functional/impl/random_functor.cpp +++ b/oneflow/core/functional/impl/random_functor.cpp @@ -276,6 +276,7 @@ class RandNFunctor { if (dtype.has_value() && !JUST(dtype)->is_floating_point()) { OF_UNIMPLEMENTED() << "Only support floating dtype in randn()."; } + JUST(CheckSizeNonNegative(shape)); const auto& out = Optional(); return Normal(static_cast(0), static_cast(1), shape, out, dtype, device, generator, requires_grad); From 4dc75dc5858e88bcf8a5119528c04e122d0dde28 Mon Sep 17 00:00:00 2001 From: Dmovic <944388576@qq.com> Date: Fri, 31 May 2024 06:16:57 +0000 Subject: [PATCH 21/22] update check function --- oneflow/api/python/functional/tensor_api.cpp | 2 +- oneflow/core/functional/impl/common.h | 2 +- .../core/functional/impl/random_functor.cpp | 4 +- python/oneflow/nn/modules/pooling.py | 163 ++++++++---------- 4 files changed, 73 insertions(+), 98 deletions(-) diff --git a/oneflow/api/python/functional/tensor_api.cpp b/oneflow/api/python/functional/tensor_api.cpp index 86b9466e689..b1273f5c083 100644 --- a/oneflow/api/python/functional/tensor_api.cpp +++ b/oneflow/api/python/functional/tensor_api.cpp @@ -203,7 +203,7 @@ class TensorWithShapeGenericCtorFunctor { Maybe operator()(const Shape& shape, const Symbol& dtype, const Optional>& device) const { // NOTE(chengcheng): flow.Tensor or flow.tensor ONLY created by EagerTensor now. - JUST(CheckSizeNonNegative(shape)); + JUST(CheckShapeNonNegative(shape)); LazyMode::Guard lazy_mode_disabled_guard(/*is_enabled*/ false); Symbol device_; if (device) { diff --git a/oneflow/core/functional/impl/common.h b/oneflow/core/functional/impl/common.h index 9a099afc799..c548ddea178 100644 --- a/oneflow/core/functional/impl/common.h +++ b/oneflow/core/functional/impl/common.h @@ -40,7 +40,7 @@ Maybe CheckInplaceCastValid(const std::shared_ptr& x, const std::shared_ptr& x_cast); Maybe CheckInplaceShapeCanExpandTo(const Shape& shape, const Shape& expand_shape); -inline Maybe CheckSizeNonNegative(const Shape& shape) { +inline Maybe CheckShapeNonNegative(const Shape& shape) { for (const auto& s : shape) { CHECK_OR_RETURN(s >= 0) << "Trying to create tensor with negative dimension " << s << ": " << shape; diff --git a/oneflow/core/functional/impl/random_functor.cpp b/oneflow/core/functional/impl/random_functor.cpp index 6f2170f9e27..94751591958 100644 --- a/oneflow/core/functional/impl/random_functor.cpp +++ b/oneflow/core/functional/impl/random_functor.cpp @@ -202,7 +202,7 @@ class RandFunctor { OF_UNIMPLEMENTED() << "Only support floating dtype in rand()."; } } - JUST(CheckSizeNonNegative(shape)); + JUST(CheckShapeNonNegative(shape)); auto gen = generator.value_or(JUST(one::DefaultAutoGenerator())); gen = JUST(GetGeneratorForLazyOrGlobal(gen, LazyMode::is_enabled(), NullOpt, NullOpt)); @@ -276,7 +276,7 @@ class RandNFunctor { if (dtype.has_value() && !JUST(dtype)->is_floating_point()) { OF_UNIMPLEMENTED() << "Only support floating dtype in randn()."; } - JUST(CheckSizeNonNegative(shape)); + JUST(CheckShapeNonNegative(shape)); const auto& out = Optional(); return Normal(static_cast(0), static_cast(1), shape, out, dtype, device, generator, requires_grad); diff --git a/python/oneflow/nn/modules/pooling.py b/python/oneflow/nn/modules/pooling.py index 992caef81ce..ae648f435b2 100644 --- a/python/oneflow/nn/modules/pooling.py +++ b/python/oneflow/nn/modules/pooling.py @@ -1052,41 +1052,76 @@ def forward(self, input): ) -def _unpool_output_size_check( - input, - kernel_size: List[int], - stride: List[int], - padding: List[int], - output_size: Optional[List[int]], -) -> List[int]: - input_size = input.size() - default_size = [] - for d in range(len(kernel_size)): - default_size.append( - (input_size[-len(kernel_size) + d] - 1) * stride[d] - + kernel_size[d] - - 2 * padding[d] - ) - if output_size is None: - ret = default_size - else: - if len(output_size) == len(kernel_size) + 2: - output_size = output_size[2:] - if len(output_size) != len(kernel_size): - raise ValueError( - "output_size should be a sequence containing " - f"{len(kernel_size)} or {len(kernel_size) + 2} elements, but it has a length of '{len(output_size)}'" - ) +def _unpool_input_check(module, x, indices, output_size): + def _unpool_output_size_check( + input, + kernel_size: List[int], + stride: List[int], + padding: List[int], + output_size: Optional[List[int]], + ) -> List[int]: + input_size = input.size() + default_size = [] for d in range(len(kernel_size)): - min_size = default_size[d] - stride[d] - max_size = default_size[d] + stride[d] - if not (min_size < output_size[d] < max_size): + default_size.append( + (input_size[-len(kernel_size) + d] - 1) * stride[d] + + kernel_size[d] + - 2 * padding[d] + ) + if output_size is None: + ret = default_size + else: + if len(output_size) == len(kernel_size) + 2: + output_size = output_size[2:] + if len(output_size) != len(kernel_size): raise ValueError( - f'invalid output_size "{output_size}" (dim {d} must be between {min_size} and {max_size})' + "output_size should be a sequence containing " + f"{len(kernel_size)} or {len(kernel_size) + 2} elements, but it has a length of '{len(output_size)}'" ) + for d in range(len(kernel_size)): + min_size = default_size[d] - stride[d] + max_size = default_size[d] + stride[d] + if not (min_size < output_size[d] < max_size): + raise ValueError( + f'invalid output_size "{output_size}" (dim {d} must be between {min_size} and {max_size})' + ) + + ret = output_size + return ret + + if isinstance(module, MaxUnpool1d): + functor = _single + expected_out_size = 1 + elif isinstance(module, MaxUnpool2d): + functor = _pair + expected_out_size = 2 + elif isinstance(module, MaxUnpool3d): + functor = _triple + expected_out_size = 3 + else: + raise NotImplementError("Not implement") - ret = output_size - return ret + kernel_size = functor(module.kernel_size) + if module.stride is not None: + _stride = functor(module.stride) + else: + _stride = kernel_size + padding = functor(module.padding) + check_output_size = _unpool_output_size_check( + x, kernel_size, _stride, padding, output_size + ) + assert ( + len(check_output_size) == expected_out_size + ), f"There should be exactly {expected_out_size} element in output_size, but got {len(check_output_size)}" + assert ( + indices.dtype == flow.int64 + ), f"elements in indices should be type int64 but got: {indices.dtype}" + assert ( + (len(x.size()) == (expected_out_size + 1)) or (len(x.size()) == expected_out_size + 2) + ), f"Input to max_unpooling1d should be a {expected_out_size + 1}d or {expected_out_size + 2}d Tensor, but got {len(x.size())} dimensions" + assert ( + x.size() == indices.size() + ), f"Expected shape of indices to be same as that of the input tensor" class MaxUnpool1d(Module): @@ -1164,27 +1199,7 @@ def __init__( self.padding = padding def forward(self, x, indices, output_size=None): - kernel_size = _single(self.kernel_size) - if self.stride is not None: - _stride = _single(self.stride) - else: - _stride = kernel_size - padding = _single(self.padding) - check_output_size = _unpool_output_size_check( - x, kernel_size, _stride, padding, output_size - ) - assert ( - len(check_output_size) == 1 - ), f"There should be exactly one element in output_size, but got {len(check_output_size)}" - assert ( - indices.dtype == flow.int64 - ), f"elements in indices should be type int64 but got: {indices.dtype}" - assert ( - len(x.size()) == 2 or len(x.size()) == 3 - ), f"Input to max_unpooling1d should be a 2d or 3d Tensor, but got {len(x.size())} dimensions" - assert ( - x.size() == indices.size() - ), f"Expected shape of indices to be same as that of the input tensor" + _unpool_input_check(self, x, indices, output_size) return flow._C.max_unpool1d( x, indices, self.kernel_size, self.stride, self.padding, output_size ) @@ -1273,27 +1288,7 @@ def __init__( self.padding = padding def forward(self, x, indices, output_size=None): - kernel_size = _pair(self.kernel_size) - if self.stride is not None: - _stride = _pair(self.stride) - else: - _stride = kernel_size - padding = _pair(self.padding) - check_output_size = _unpool_output_size_check( - x, kernel_size, _stride, padding, output_size - ) - assert ( - len(check_output_size) == 2 - ), f"There should be exactly two elements in output_size, but got {len(check_output_size)}" - assert ( - indices.dtype == flow.int64 - ), f"elements in indices should be type int64 but got: {indices.dtype}" - assert ( - len(x.size()) == 3 or len(x.size()) == 4 - ), f"Input to max_unpooling1d should be a 3d or 4d Tensor, but got {len(x.size())} dimensions" - assert ( - x.size() == indices.size() - ), f"Expected shape of indices to be same as that of the input tensor" + _unpool_input_check(self, x, indices, output_size) return flow._C.max_unpool2d( x, indices, self.kernel_size, self.stride, self.padding, output_size ) @@ -1372,27 +1367,7 @@ def __init__( self.padding = padding def forward(self, x, indices, output_size=None): - kernel_size = _triple(self.kernel_size) - if self.stride is not None: - _stride = _triple(self.stride) - else: - _stride = kernel_size - padding = _triple(self.padding) - check_output_size = _unpool_output_size_check( - x, kernel_size, _stride, padding, output_size - ) - assert ( - len(check_output_size) == 3 - ), f"There should be exactly three elements in output_size, but got {len(check_output_size)}" - assert ( - indices.dtype == flow.int64 - ), f"elements in indices should be type int64 but got: {indices.dtype}" - assert ( - len(x.size()) == 4 or len(x.size()) == 5 - ), f"Input to max_unpooling1d should be a 4d or 5d Tensor, but got {len(x.size())} dimensions" - assert ( - x.size() == indices.size() - ), f"Expected shape of indices to be same as that of the input tensor" + _unpool_input_check(self, x, indices, output_size) return flow._C.max_unpool3d( x, indices, self.kernel_size, self.stride, self.padding, output_size ) From f0e5e24c87e7dd8768c6646edae9d3bd90f25819 Mon Sep 17 00:00:00 2001 From: oneflow-ci-bot Date: Fri, 31 May 2024 06:18:53 +0000 Subject: [PATCH 22/22] auto format by CI --- python/oneflow/nn/modules/pooling.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/oneflow/nn/modules/pooling.py b/python/oneflow/nn/modules/pooling.py index ae648f435b2..81a166928c4 100644 --- a/python/oneflow/nn/modules/pooling.py +++ b/python/oneflow/nn/modules/pooling.py @@ -1116,8 +1116,8 @@ def _unpool_output_size_check( assert ( indices.dtype == flow.int64 ), f"elements in indices should be type int64 but got: {indices.dtype}" - assert ( - (len(x.size()) == (expected_out_size + 1)) or (len(x.size()) == expected_out_size + 2) + assert (len(x.size()) == (expected_out_size + 1)) or ( + len(x.size()) == expected_out_size + 2 ), f"Input to max_unpooling1d should be a {expected_out_size + 1}d or {expected_out_size + 2}d Tensor, but got {len(x.size())} dimensions" assert ( x.size() == indices.size()