diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 9f1419e29eb4e..d7a94ac1d9164 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -1989,6 +1989,15 @@ def set_vocab(self): except FileNotFoundError: self._set_vocab_gpt2() + def set_gguf_parameters(self): + super().set_gguf_parameters() + if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]: + if self.hparams["rope_scaling"].get("type") == "yarn": + self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN) + self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"]) + self.gguf_writer.add_rope_scaling_orig_ctx_len(self.hparams["rope_scaling"]["original_max_position_embeddings"]) + self.gguf_writer.add_rope_scaling_yarn_log_mul(0.1) + @Model.register("Qwen2MoeForCausalLM") class Qwen2MoeModel(Model):