diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index a6751cc80e682..ae23b1271212f 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -967,7 +967,9 @@ def set_vocab(self): from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(dir_model) vocab_size = hparams.get("vocab_size", len(tokenizer.vocab)) - assert max(tokenizer.vocab.values()) < vocab_size + if max(tokenizer.get_vocab().values()) >= vocab_size: + raise ValueError("Vocabulary size exceeds expected maximum size.") + reverse_vocab: dict[int, str] = {id_: encoded_tok for encoded_tok, id_ in tokenizer.vocab.items()} added_vocab = tokenizer.get_added_vocab()