From 420cf62838f03df50953e3c28001a7cf18e43d72 Mon Sep 17 00:00:00 2001 From: Flipbook <26732651+TheFlipbook@users.noreply.github.com> Date: Sun, 7 Apr 2024 14:13:22 -0700 Subject: [PATCH] Fixed mismatch type errors * cited in macOS CI tests * Missed in original updates based on PR feedback in https://github.com/ggerganov/llama.cpp/pull/6519 --- llama.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/llama.cpp b/llama.cpp index d700a53fe811b..7e0ef2991be94 100644 --- a/llama.cpp +++ b/llama.cpp @@ -15536,9 +15536,9 @@ float * llama_get_logits_ith(struct llama_context * ctx, int32_t i) { if (j < 0) { throw std::runtime_error(format("batch.logits[%d] != true", i)); } - if ((size_t) j >= ctx->n_outputs) { + if (j >= ctx->n_outputs) { // This should not happen - throw std::runtime_error(format("corrupt output buffer (j=%d, n_outputs=%lu)", j, ctx->n_outputs)); + throw std::runtime_error(format("corrupt output buffer (j=%d, n_outputs=%d)", j, ctx->n_outputs)); } return ctx->logits + j*ctx->model.hparams.n_vocab; @@ -15581,9 +15581,9 @@ float * llama_get_embeddings_ith(struct llama_context * ctx, int32_t i) { if (j < 0) { throw std::runtime_error(format("batch.logits[%d] != true", i)); } - if ((size_t) j >= ctx->n_outputs) { + if (j >= ctx->n_outputs) { // This should not happen - throw std::runtime_error(format("corrupt output buffer (j=%d, n_outputs=%lu)", j, ctx->n_outputs)); + throw std::runtime_error(format("corrupt output buffer (j=%d, n_outputs=%d)", j, ctx->n_outputs)); } return ctx->embd + j*ctx->model.hparams.n_embd;