Skip to content

Commit

Permalink
llama : fix n_vocab init for 'no_vocab' case (ggerganov#9511)
Browse files Browse the repository at this point in the history
* llama: fixed n_vocab for `no_vocab` models

* llama: updated error output for `llama_decode_internal` and `llama_encode_internal`

* llama: log warning if there's no vocab_size in metadata

* llama: correct vocab size for logging

Co-authored-by: Georgi Gerganov <[email protected]>

---------

Co-authored-by: Georgi Gerganov <[email protected]>
  • Loading branch information
2 people authored and arthw committed Nov 15, 2024
1 parent eeca119 commit 051580c
Showing 1 changed file with 12 additions and 5 deletions.
17 changes: 12 additions & 5 deletions src/llama.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6107,8 +6107,15 @@ static void llm_load_vocab(
vocab.special_mask_id = -1;
vocab.linefeed_id = -1;

// read vocab size from metadata
if (!ml.get_key(LLM_KV_VOCAB_SIZE, vocab.n_vocab, false)) {
vocab.n_vocab = 0;
LLAMA_LOG_WARN("%s: there is no vocab_size in metadata, vocab.n_vocab will be set to %u\n", __func__, vocab.n_vocab);
}
return;
} else if (tokenizer_model == "llama") {
}

if (tokenizer_model == "llama") {
vocab.type = LLAMA_VOCAB_TYPE_SPM;

// default special tokens
Expand Down Expand Up @@ -16657,7 +16664,7 @@ static int llama_decode_internal(
const uint32_t n_tokens_all = batch_all.n_tokens;

if (n_tokens_all == 0) {
LLAMA_LOG_ERROR("%s: n_tokens == 0", __func__);
LLAMA_LOG_ERROR("%s: n_tokens == 0\n", __func__);
return -1;
}

Expand All @@ -16670,7 +16677,7 @@ static int llama_decode_internal(
if (batch_all.token) {
for (uint32_t i = 0; i < n_tokens_all; ++i) {
if (batch_all.token[i] < 0 || (uint32_t)batch_all.token[i] >= model.vocab.n_vocab) {
LLAMA_LOG_ERROR("%s: invalid token[%d] = %d", __func__, i, batch_all.token[i]);
LLAMA_LOG_ERROR("%s: invalid token[%d] = %d\n", __func__, i, batch_all.token[i]);
return -1;
}
}
Expand Down Expand Up @@ -16958,7 +16965,7 @@ static int llama_encode_internal(
const uint32_t n_tokens = batch.n_tokens;

if (n_tokens == 0) {
LLAMA_LOG_ERROR("%s: n_tokens == 0", __func__);
LLAMA_LOG_ERROR("%s: n_tokens == 0\n", __func__);
return -1;
}

Expand All @@ -16971,7 +16978,7 @@ static int llama_encode_internal(
if (batch.token) {
for (uint32_t i = 0; i < n_tokens; ++i) {
if (batch.token[i] < 0 || (uint32_t)batch.token[i] >= model.vocab.n_vocab) {
LLAMA_LOG_ERROR("%s: invalid token[%d] = %d", __func__, i, batch.token[i]);
LLAMA_LOG_ERROR("%s: invalid token[%d] = %d\n", __func__, i, batch.token[i]);
return -1;
}
}
Expand Down

0 comments on commit 051580c

Please sign in to comment.