Skip to content

Commit

Permalink
fix(convert_hf_to_gguf/gguf-py): _multiplier -> _scale
Browse files Browse the repository at this point in the history
The transformers names with _multiplier will now be converted to the _scale
equivalent during conversion.

Branch: GraniteLM

Signed-off-by: Gabe Goodhart <[email protected]>
  • Loading branch information
gabe-l-hart committed Sep 16, 2024
1 parent 0bdf04e commit 65c5bb9
Show file tree
Hide file tree
Showing 3 changed files with 20 additions and 18 deletions.
20 changes: 11 additions & 9 deletions convert_hf_to_gguf.py
Original file line number Diff line number Diff line change
Expand Up @@ -4090,20 +4090,22 @@ def set_gguf_parameters(self):
- No head_dim support
- New multiplier params:
- attention_multiplier
- embedding_multiplier
- residual_multiplier
- attention_scale
- embedding_scale
- residual_scale
- logits_scaling
"""
if head_dim := self.hparams.pop("head_dim", None):
logger.warning("Ignoring head_dim (%s) from config for Granite", head_dim)
super().set_gguf_parameters()
if attention_multiplier := self.hparams.get("attention_multiplier"):
self.gguf_writer.add_attention_multiplier(attention_multiplier)
if embedding_multiplier := self.hparams.get("embedding_multiplier"):
self.gguf_writer.add_embedding_multiplier(embedding_multiplier)
if residual_multiplier := self.hparams.get("residual_multiplier"):
self.gguf_writer.add_residual_multiplier(residual_multiplier)
# NOTE: Convert _multiplier params to _scale params for naming
# consistency
if attention_scale := self.hparams.get("attention_multiplier"):
self.gguf_writer.add_attention_scale(attention_scale)
if embedding_scale := self.hparams.get("embedding_multiplier"):
self.gguf_writer.add_embedding_scale(embedding_scale)
if residual_scale := self.hparams.get("residual_multiplier"):
self.gguf_writer.add_residual_scale(residual_scale)
if logits_scaling := self.hparams.get("logits_scaling"):
self.gguf_writer.add_logit_scale(logits_scaling)

Expand Down
6 changes: 3 additions & 3 deletions gguf-py/gguf/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,8 +97,8 @@ class LLM:
RESCALE_EVERY_N_LAYERS = "{arch}.rescale_every_n_layers"
TIME_MIX_EXTRA_DIM = "{arch}.time_mix_extra_dim"
TIME_DECAY_EXTRA_DIM = "{arch}.time_decay_extra_dim"
RESIDUAL_MULTIPLIER = "{arch}.residual_multiplier"
EMBEDDING_MULTIPLIER = "{arch}.embedding_multiplier"
RESIDUAL_SCALE = "{arch}.residual_scale"
EMBEDDING_SCALE = "{arch}.embedding_scale"

class Attention:
HEAD_COUNT = "{arch}.attention.head_count"
Expand All @@ -114,7 +114,7 @@ class Attention:
KV_LORA_RANK = "{arch}.attention.kv_lora_rank"
REL_BUCKETS_COUNT = "{arch}.attention.relative_buckets_count"
SLIDING_WINDOW = "{arch}.attention.sliding_window"
MULTIPLIER = "{arch}.attention.multiplier"
SCALE = "{arch}.attention.scale"

class Rope:
DIMENSION_COUNT = "{arch}.rope.dimension_count"
Expand Down
12 changes: 6 additions & 6 deletions gguf-py/gguf/gguf_writer.py
Original file line number Diff line number Diff line change
Expand Up @@ -679,11 +679,11 @@ def add_time_mix_extra_dim(self, dim: int) -> None:
def add_time_decay_extra_dim(self, dim: int) -> None:
self.add_uint32(Keys.LLM.TIME_DECAY_EXTRA_DIM.format(arch=self.arch), dim)

def add_residual_multiplier(self, value: float) -> None:
self.add_float32(Keys.LLM.RESIDUAL_MULTIPLIER.format(arch=self.arch), value)
def add_residual_scale(self, value: float) -> None:
self.add_float32(Keys.LLM.RESIDUAL_SCALE.format(arch=self.arch), value)

def add_embedding_multiplier(self, value: float) -> None:
self.add_float32(Keys.LLM.EMBEDDING_MULTIPLIER.format(arch=self.arch), value)
def add_embedding_scale(self, value: float) -> None:
self.add_float32(Keys.LLM.EMBEDDING_SCALE.format(arch=self.arch), value)

def add_wkv_head_size(self, size: int) -> None:
self.add_uint32(Keys.WKV.HEAD_SIZE.format(arch=self.arch), size)
Expand All @@ -709,8 +709,8 @@ def add_relative_attn_buckets_count(self, value: int) -> None:
def add_sliding_window(self, value: int) -> None:
self.add_uint32(Keys.Attention.SLIDING_WINDOW.format(arch=self.arch), value)

def add_attention_multiplier(self, value: float) -> None:
self.add_float32(Keys.Attention.MULTIPLIER.format(arch=self.arch), value)
def add_attention_scale(self, value: float) -> None:
self.add_float32(Keys.Attention.SCALE.format(arch=self.arch), value)

def add_pooling_type(self, value: PoolingType) -> None:
self.add_uint32(Keys.LLM.POOLING_TYPE.format(arch=self.arch), value.value)
Expand Down

0 comments on commit 65c5bb9

Please sign in to comment.