From 5b359bb1e3585de45bec79fd6c18934897662cdf Mon Sep 17 00:00:00 2001 From: SXX Date: Sat, 9 Nov 2024 15:35:46 +0800 Subject: [PATCH] =?UTF-8?q?ggml:=20fix=20zero=20division=20in=20=E2=80=98d?= =?UTF-8?q?ne=E2=80=99=20calculation=20in=20CUDA=20COUNT=5FEQUAL=20operato?= =?UTF-8?q?r=20when=20=E2=80=98ne=E2=80=99=20is=20small=20(#10213)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ggml/src/ggml-cuda/count-equal.cu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml/src/ggml-cuda/count-equal.cu b/ggml/src/ggml-cuda/count-equal.cu index ffb053b101818..08898115daed2 100644 --- a/ggml/src/ggml-cuda/count-equal.cu +++ b/ggml/src/ggml-cuda/count-equal.cu @@ -44,7 +44,7 @@ void ggml_cuda_count_equal(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const int64_t ne = ggml_nelements(src0); GGML_ASSERT(ne < (1 << 30) && "atomicAdd implementation only supports int"); - const int64_t dne = GGML_PAD(ne / (4*nsm), CUDA_COUNT_EQUAL_CHUNK_SIZE); + const int64_t dne = GGML_PAD((ne + 4*nsm - 1) / (4*nsm), CUDA_COUNT_EQUAL_CHUNK_SIZE); CUDA_CHECK(cudaMemsetAsync(dst_d, 0, ggml_nbytes(dst), stream));