diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index ae23b1271212f..166e5ded2f90f 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -967,10 +967,14 @@ def set_vocab(self): from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(dir_model) vocab_size = hparams.get("vocab_size", len(tokenizer.vocab)) - if max(tokenizer.get_vocab().values()) >= vocab_size: + # Since we are checking the maximum index, we need to ensure it's strictly less than vocab_size, + # because vocab_size is the count of items, and indexes start at 0. + max_vocab_index = max(tokenizer.get_vocab().values()) + if max_vocab_index >= vocab_size: raise ValueError("Vocabulary size exceeds expected maximum size.") + reverse_vocab: dict[int, str] = {id_: encoded_tok for encoded_tok, id_ in tokenizer.vocab.items()} added_vocab = tokenizer.get_added_vocab()