diff --git a/oneflow/api/python/functional/tensor_api.cpp b/oneflow/api/python/functional/tensor_api.cpp index 32e12312cd9..b1273f5c083 100644 --- a/oneflow/api/python/functional/tensor_api.cpp +++ b/oneflow/api/python/functional/tensor_api.cpp @@ -203,6 +203,7 @@ class TensorWithShapeGenericCtorFunctor { Maybe operator()(const Shape& shape, const Symbol& dtype, const Optional>& device) const { // NOTE(chengcheng): flow.Tensor or flow.tensor ONLY created by EagerTensor now. + JUST(CheckShapeNonNegative(shape)); LazyMode::Guard lazy_mode_disabled_guard(/*is_enabled*/ false); Symbol device_; if (device) { diff --git a/oneflow/core/functional/impl/common.h b/oneflow/core/functional/impl/common.h index cb97f4f616b..c548ddea178 100644 --- a/oneflow/core/functional/impl/common.h +++ b/oneflow/core/functional/impl/common.h @@ -39,6 +39,15 @@ Maybe CheckInplaceValid(const std::shared_ptr& x); Maybe CheckInplaceCastValid(const std::shared_ptr& x, const std::shared_ptr& x_cast); Maybe CheckInplaceShapeCanExpandTo(const Shape& shape, const Shape& expand_shape); + +inline Maybe CheckShapeNonNegative(const Shape& shape) { + for (const auto& s : shape) { + CHECK_OR_RETURN(s >= 0) << "Trying to create tensor with negative dimension " << s << ": " + << shape; + } + return Maybe::Ok(); +} + Optional ComputeStride(const Shape& shape, const Stride& stride, const Shape& target_shape); Maybe InferShapeUnspecifiedDim(const int64_t& elem_count, const Shape& shape); diff --git a/oneflow/core/functional/impl/random_functor.cpp b/oneflow/core/functional/impl/random_functor.cpp index 5a97e29ab1d..94751591958 100644 --- a/oneflow/core/functional/impl/random_functor.cpp +++ b/oneflow/core/functional/impl/random_functor.cpp @@ -202,6 +202,7 @@ class RandFunctor { OF_UNIMPLEMENTED() << "Only support floating dtype in rand()."; } } + JUST(CheckShapeNonNegative(shape)); auto gen = generator.value_or(JUST(one::DefaultAutoGenerator())); gen = JUST(GetGeneratorForLazyOrGlobal(gen, LazyMode::is_enabled(), NullOpt, NullOpt)); @@ -275,6 +276,7 @@ class RandNFunctor { if (dtype.has_value() && !JUST(dtype)->is_floating_point()) { OF_UNIMPLEMENTED() << "Only support floating dtype in randn()."; } + JUST(CheckShapeNonNegative(shape)); const auto& out = Optional(); return Normal(static_cast(0), static_cast(1), shape, out, dtype, device, generator, requires_grad); diff --git a/python/oneflow/nn/modules/constant.py b/python/oneflow/nn/modules/constant.py index 9a8ca57da1a..c2ac7762344 100644 --- a/python/oneflow/nn/modules/constant.py +++ b/python/oneflow/nn/modules/constant.py @@ -44,6 +44,9 @@ def __init__( self.device = flow.device(self.device) self.requires_grad = requires_grad size = _single(size) + assert all( + s >= 0 for s in size + ), f"Trying to create tensor with negative dimension: {size}" if dtype is None: dtype = flow.get_default_dtype() if placement is None: diff --git a/python/oneflow/nn/modules/empty.py b/python/oneflow/nn/modules/empty.py index 7050a1c4eb9..0e25547dafa 100644 --- a/python/oneflow/nn/modules/empty.py +++ b/python/oneflow/nn/modules/empty.py @@ -36,6 +36,10 @@ def empty_op( shape = _single(_handle_size_arg(size)) + assert all( + s >= 0 for s in shape + ), f"Trying to create tensor with negative dimension: {shape}" + if dtype is None: dtype = flow.get_default_dtype() if placement is not None: diff --git a/python/oneflow/nn/modules/pooling.py b/python/oneflow/nn/modules/pooling.py index d764e69e581..81a166928c4 100644 --- a/python/oneflow/nn/modules/pooling.py +++ b/python/oneflow/nn/modules/pooling.py @@ -674,6 +674,10 @@ def __init__(self, output_size: _size_1_t) -> None: super().__init__() assert output_size is not None, "'output_size' cannot be NoneType" self.output_size = _single(output_size) + assert len(self.output_size) == 1, "'output_size' should contain one int" + assert ( + self.output_size[0] is None or self.output_size[0] >= 0 + ), f"elements of output_size must be greater than or equal to 0, but got {self.output_size}" def forward(self, x): assert ( @@ -741,6 +745,10 @@ def __init__(self, output_size, data_format=None) -> None: super().__init__() assert output_size is not None, "'output_size' cannot be NoneType" self.output_size = _pair(output_size) + assert len(self.output_size) == 2, "'output_size' must be 2" + assert (self.output_size[0] is None or self.output_size[0] >= 0) and ( + self.output_size[1] is None or self.output_size[1] >= 0 + ), f"elements of output_size must be greater than or equal to 0, but got {self.output_size}" if data_format: if not data_format in ["channels_first", "channels_last"]: raise ValueError( @@ -824,6 +832,12 @@ def __init__(self, output_size) -> None: super().__init__() assert output_size is not None, "'output_size' cannot be NoneType" self.output_size = _triple(output_size) + assert len(self.output_size) == 3, "'output_size' must be 3" + assert ( + (self.output_size[0] is None or self.output_size[0] >= 0) + and (self.output_size[1] is None or self.output_size[1] >= 0) + and (self.output_size[2] is None or self.output_size[2] >= 0) + ), f"elements of output_size must be greater than or equal to 0, but got {self.output_size}" def forward(self, x): assert ( @@ -892,6 +906,9 @@ def forward(self, input): assert ( len(input.shape) == 3 and len(self.output_size) == 1 ), "the length of 'output_size' does not match the input size, 1 expected" + assert ( + self.output_size[0] is None or self.output_size[0] >= 0 + ), f"elements of output_size must be greater than or equal to 0, but got {self.output_size}" new_output_size = _generate_output_size(input.shape, self.output_size) return flow.nn.functional.adaptive_max_pool1d( input, self.output_size, self.return_indices @@ -964,6 +981,10 @@ def forward(self, input): assert ( len(input.shape) == 4 ), f"expected 4-dimensional tensor, but got {len(input.shape)}-dimensional tensor" + assert len(self.output_size) == 2, "'output_size' must be 2" + assert (self.output_size[0] is None or self.output_size[0] >= 0) and ( + self.output_size[1] is None or self.output_size[1] >= 0 + ), f"elements of output_size must be greater than or equal to 0, but got {self.output_size}" new_output_size = _generate_output_size(input.shape, self.output_size) return flow.nn.functional.adaptive_max_pool2d( input, self.output_size, self.return_indices, self.channel_pos @@ -1019,12 +1040,90 @@ def forward(self, input): assert ( len(input.shape) == 5 ), f"expected 5-dimensional tensor, but got {len(input.shape)}-dimensional tensor" + assert len(self.output_size) == 3, "'output_size' must be 3" + assert ( + (self.output_size[0] is None or self.output_size[0] >= 0) + and (self.output_size[1] is None or self.output_size[1] >= 0) + and (self.output_size[2] is None or self.output_size[2] >= 0) + ), f"elements of output_size must be greater than or equal to 0, but got {self.output_size}" new_output_size = _generate_output_size(input.shape, self.output_size) return flow.nn.functional.adaptive_max_pool3d( input, self.output_size, self.return_indices ) +def _unpool_input_check(module, x, indices, output_size): + def _unpool_output_size_check( + input, + kernel_size: List[int], + stride: List[int], + padding: List[int], + output_size: Optional[List[int]], + ) -> List[int]: + input_size = input.size() + default_size = [] + for d in range(len(kernel_size)): + default_size.append( + (input_size[-len(kernel_size) + d] - 1) * stride[d] + + kernel_size[d] + - 2 * padding[d] + ) + if output_size is None: + ret = default_size + else: + if len(output_size) == len(kernel_size) + 2: + output_size = output_size[2:] + if len(output_size) != len(kernel_size): + raise ValueError( + "output_size should be a sequence containing " + f"{len(kernel_size)} or {len(kernel_size) + 2} elements, but it has a length of '{len(output_size)}'" + ) + for d in range(len(kernel_size)): + min_size = default_size[d] - stride[d] + max_size = default_size[d] + stride[d] + if not (min_size < output_size[d] < max_size): + raise ValueError( + f'invalid output_size "{output_size}" (dim {d} must be between {min_size} and {max_size})' + ) + + ret = output_size + return ret + + if isinstance(module, MaxUnpool1d): + functor = _single + expected_out_size = 1 + elif isinstance(module, MaxUnpool2d): + functor = _pair + expected_out_size = 2 + elif isinstance(module, MaxUnpool3d): + functor = _triple + expected_out_size = 3 + else: + raise NotImplementError("Not implement") + + kernel_size = functor(module.kernel_size) + if module.stride is not None: + _stride = functor(module.stride) + else: + _stride = kernel_size + padding = functor(module.padding) + check_output_size = _unpool_output_size_check( + x, kernel_size, _stride, padding, output_size + ) + assert ( + len(check_output_size) == expected_out_size + ), f"There should be exactly {expected_out_size} element in output_size, but got {len(check_output_size)}" + assert ( + indices.dtype == flow.int64 + ), f"elements in indices should be type int64 but got: {indices.dtype}" + assert (len(x.size()) == (expected_out_size + 1)) or ( + len(x.size()) == expected_out_size + 2 + ), f"Input to max_unpooling1d should be a {expected_out_size + 1}d or {expected_out_size + 2}d Tensor, but got {len(x.size())} dimensions" + assert ( + x.size() == indices.size() + ), f"Expected shape of indices to be same as that of the input tensor" + + class MaxUnpool1d(Module): r"""Computes a partial inverse of :class:`MaxPool1d`. @@ -1100,6 +1199,7 @@ def __init__( self.padding = padding def forward(self, x, indices, output_size=None): + _unpool_input_check(self, x, indices, output_size) return flow._C.max_unpool1d( x, indices, self.kernel_size, self.stride, self.padding, output_size ) @@ -1188,6 +1288,7 @@ def __init__( self.padding = padding def forward(self, x, indices, output_size=None): + _unpool_input_check(self, x, indices, output_size) return flow._C.max_unpool2d( x, indices, self.kernel_size, self.stride, self.padding, output_size ) @@ -1266,6 +1367,7 @@ def __init__( self.padding = padding def forward(self, x, indices, output_size=None): + _unpool_input_check(self, x, indices, output_size) return flow._C.max_unpool3d( x, indices, self.kernel_size, self.stride, self.padding, output_size )