From 940b1e8f636e24007f659e0f54c9c03cb6f61898 Mon Sep 17 00:00:00 2001 From: Douglas Hanley Date: Mon, 24 Jun 2024 00:55:16 -0500 Subject: [PATCH] fix output counting comment --- llama.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/llama.cpp b/llama.cpp index a0c9d1d745c986..0566adca51a79a 100644 --- a/llama.cpp +++ b/llama.cpp @@ -12618,8 +12618,7 @@ static int llama_decode_internal( std::vector seq_id_arr; std::vector> seq_id; - // this indicates we are doing pooling on an embedding model. non-embedding models always - // use "output_ids" so we need to preserve all outputs in that case (somewhat inefficiently) + // this indicates we are doing pooled embedding, so we ignore batch.logits and output all tokens bool embed_pooled = cparams.embeddings && cparams.pooling_type != LLAMA_POOLING_TYPE_NONE; // count outputs