From 47e02eb7bc1931a2112b83249f806233c7af102d Mon Sep 17 00:00:00 2001 From: Francis Couture-Harpin Date: Tue, 30 Apr 2024 14:07:28 -0400 Subject: [PATCH 01/15] convert-hf : begin refactoring write_tensor --- .devops/nix/package.nix | 1 + convert-hf-to-gguf.py | 1214 +++++------------ examples/server/tests/features/steps/steps.py | 2 +- gguf-py/gguf/constants.py | 2 +- gguf-py/gguf/gguf_reader.py | 8 +- gguf-py/gguf/gguf_writer.py | 6 +- gguf-py/gguf/vocab.py | 6 +- gguf-py/scripts/gguf-dump.py | 2 +- gguf-py/scripts/gguf-new-metadata.py | 10 +- pyrightconfig.json | 3 + 10 files changed, 394 insertions(+), 860 deletions(-) create mode 100644 pyrightconfig.json diff --git a/.devops/nix/package.nix b/.devops/nix/package.nix index 2c0ae4e2a071b..86cc6e54fe694 100644 --- a/.devops/nix/package.nix +++ b/.devops/nix/package.nix @@ -86,6 +86,7 @@ let # TODO(Green-Sky): find a better way to opt-into the heavy ml python runtime llama-python-extra = python3.withPackages ( ps: [ + ps.einops ps.numpy ps.sentencepiece ps.tiktoken diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index d1b8cef11277d..ad54f01bf278c 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -8,11 +8,10 @@ import os import re import sys -from abc import ABC, abstractmethod from enum import IntEnum from pathlib import Path from hashlib import sha256 -from typing import TYPE_CHECKING, Any, Callable, ContextManager, Iterator, Sequence, TypeVar, cast +from typing import TYPE_CHECKING, Any, Callable, ContextManager, Iterable, Iterator, Protocol, Sequence, TypeVar, cast import numpy as np import torch @@ -41,9 +40,26 @@ class SentencePieceTokenTypes(IntEnum): AnyModel = TypeVar("AnyModel", bound="type[Model]") -class Model(ABC): +class Model(Protocol): _model_classes: dict[str, type[Model]] = {} + dir_model: Path + ftype: int + fname_out: Path + is_big_endian: bool + endianess: gguf.GGUFEndian + use_temp_file: bool + is_safetensors: bool + num_parts: int + part_names: Iterable[str] + hparams: dict[str, Any] + gguf_writer: gguf.GGUFWriter + block_count: int + tensor_map: gguf.TensorNameMap + tensors: dict[str, Tensor] + + model_arch: gguf.MODEL_ARCH + def __init__(self, dir_model: Path, ftype: int, fname_out: Path, is_big_endian: bool, use_temp_file: bool): self.dir_model = dir_model self.ftype = ftype @@ -57,13 +73,10 @@ def __init__(self, dir_model: Path, ftype: int, fname_out: Path, is_big_endian: self.hparams = Model.load_hparams(self.dir_model) self.gguf_writer = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[self.model_arch], endianess=self.endianess, use_temp_file=self.use_temp_file) self.block_count = self.find_hparam(["n_layers", "num_hidden_layers", "n_layer"]) + self.tensor_map = gguf.get_tensor_name_map(self.model_arch, self.block_count) + self.tensors = dict(self.get_tensors()) - @property - @abstractmethod - def model_arch(self) -> gguf.MODEL_ARCH: - pass - - def find_hparam(self, keys: Sequence[str], optional: bool = False) -> Any: + def find_hparam(self, keys: Iterable[str], optional: bool = False) -> Any: key = next((k for k in keys if k in self.hparams), None) if key is not None: return self.hparams[key] @@ -89,6 +102,23 @@ def get_tensors(self) -> Iterator[tuple[str, Tensor]]: data = model_part.get_tensor(name) if self.is_safetensors else model_part[name] yield name, data + def format_tensor_name(self, key: gguf.MODEL_TENSOR, bid: int | None = None, suffix: str = ".weight") -> str: + name: str = gguf.TENSOR_NAMES[key] + if key not in gguf.MODEL_TENSORS[self.model_arch]: + print(f"Missing {key!r} for MODEL_TENSORS of {self.model_arch!r}") + sys.exit() + if "{bid}" in name: + assert bid is not None + name = name.format(bid) + return name + suffix + + def map_tensor_name(self, name: str, try_suffixes: Sequence[str] = (".weight", ".bias")) -> str: + new_name = self.tensor_map.get_name(key=name, try_suffixes=try_suffixes) + if new_name is None: + print(f"Can not map tensor {name!r}") + sys.exit() + return new_name + def set_gguf_parameters(self): self.gguf_writer.add_name(self.dir_model.name) self.gguf_writer.add_block_count(self.block_count) @@ -132,12 +162,19 @@ def set_gguf_parameters(self): self.gguf_writer.add_file_type(self.ftype) print(f"gguf: file type = {self.ftype}") + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + return [(self.map_tensor_name(name), data_torch)] + + def extra_f32_tensors(self, n_dims: int, name: str, new_name: str, bid: int | None) -> bool: + return False + + def extra_f16_tensors(self, n_dims: int, name: str, new_name: str, bid: int | None) -> bool: + return False + def write_tensors(self): - block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer"))) - tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) - for name, data_torch in self.get_tensors(): + for name, data_torch in self.tensors.items(): # we don't need these - if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")): + if name.endswith((".attention.masked_bias", ".attention.bias", ".rotary_emb.inv_freq")): continue old_dtype = data_torch.dtype @@ -146,32 +183,36 @@ def write_tensors(self): if data_torch.dtype not in (torch.float16, torch.float32): data_torch = data_torch.to(torch.float32) - data = data_torch.squeeze().numpy() + # use the first number-like part of the tensor name as the block id + bid = None + for part in name.split("."): + if part.isdecimal(): + bid = int(part) + break - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) - if new_name is None: - print(f"Can not map tensor {name!r}") - sys.exit() + for new_name, data in ((n, d.squeeze().numpy()) for n, d in self.modify_tensors(data_torch, name, bid)): + n_dims = len(data.shape) + data_dtype = data.dtype - n_dims = len(data.shape) - data_dtype = data.dtype + # if f32 desired, convert any float16 to float32 + if self.ftype == 0 and data_dtype == np.float16: + data = data.astype(np.float32) - # if f32 desired, convert any float16 to float32 - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) + # when both are true, the tensor keeps its original type + extra_f32 = self.extra_f32_tensors(n_dims, name, new_name, bid) + extra_f16 = self.extra_f16_tensors(n_dims, name, new_name, bid) - # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 - if self.ftype == 1 and data_dtype == np.float16 and (n_dims == 1 or new_name.endswith("_norm.weight")): - data = data.astype(np.float32) + # 1d tensors need to be converted to float32 + if self.ftype == 1 and data_dtype == np.float16 and (n_dims == 1 or extra_f32) and not extra_f16: + data = data.astype(np.float32) - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: - data = data.astype(np.float16) + # if f16 desired, convert any float32 2-dim weight tensors to float16 + if self.ftype == 1 and data_dtype == np.float32 and (name.endswith(".weight") and n_dims == 2 or extra_f16) and not extra_f32: + data = data.astype(np.float16) - print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") + print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") - self.gguf_writer.add_tensor(new_name, data) + self.gguf_writer.add_tensor(new_name, data) def write(self): self.write_tensors() @@ -203,7 +244,7 @@ def load_hparams(dir_model): def register(cls, *names: str) -> Callable[[AnyModel], AnyModel]: assert names - def func(modelcls: type[Model]): + def func(modelcls: AnyModel) -> AnyModel: for name in names: cls._model_classes[name] = modelcls return modelcls @@ -219,7 +260,7 @@ def from_model_architecture(cls, arch): def _is_model_safetensors(self) -> bool: return Model.count_model_parts(self.dir_model, ".safetensors") > 0 - def _get_part_names(self): + def _get_part_names(self) -> Iterable[str]: if self.is_safetensors: if self.num_parts == 1: # there's only one .safetensors file return ("model.safetensors",) @@ -399,22 +440,24 @@ def _set_vocab_sentencepiece(self): if not tokenizer_path.is_file(): raise FileNotFoundError(f"File not found: {tokenizer_path}") - tokenizer = SentencePieceProcessor(str(tokenizer_path)) + tokenizer = SentencePieceProcessor() + tokenizer.LoadFromFile(tokenizer_path) + vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size()) for token_id in range(tokenizer.vocab_size()): - piece = tokenizer.id_to_piece(token_id) + piece = tokenizer.IdToPiece(token_id) text = piece.encode("utf-8") - score = tokenizer.get_score(token_id) + score = tokenizer.GetScore(token_id) toktype = SentencePieceTokenTypes.NORMAL - if tokenizer.is_unknown(token_id): + if tokenizer.IsUnknown(token_id): toktype = SentencePieceTokenTypes.UNKNOWN - elif tokenizer.is_control(token_id): + elif tokenizer.IsControl(token_id): toktype = SentencePieceTokenTypes.CONTROL - elif tokenizer.is_unused(token_id): + elif tokenizer.IsUnused(token_id): toktype = SentencePieceTokenTypes.UNUSED - elif tokenizer.is_byte(token_id): + elif tokenizer.IsByte(token_id): toktype = SentencePieceTokenTypes.BYTE tokens.append(text) @@ -439,7 +482,7 @@ def _set_vocab_sentencepiece(self): f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]" ) for i in range(1, pad_count + 1): - tokens.append(f"[PAD{i}]") + tokens.append(bytes(f"[PAD{i}]", encoding="utf-8")) scores.append(-1000.0) toktypes.append(SentencePieceTokenTypes.UNUSED) @@ -514,82 +557,50 @@ def set_gguf_parameters(self): self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"]) self.gguf_writer.add_file_type(self.ftype) - def write_tensors(self): - block_count = self.hparams["n_layer"] - tensors = dict(self.get_tensors()) - tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) - has_lm_head = True + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + del bid # unused + n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads")) n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed")) - for name, data_torch in tensors.items(): - if "lm_head.weight" not in tensors.keys() and "output.weight" not in tensors.keys(): - has_lm_head = False - - name = re.sub(r'transformer\.', '', name) - - old_dtype = data_torch.dtype - - # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): - data_torch = data_torch.to(torch.float32) - - data = data_torch.squeeze().numpy() - - if re.match(r"h\.\d+\.self_attention\.query_key_value\.weight", name): - # Map bloom-style qkv_linear to gpt-style qkv_linear - # bloom: https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py#L238-L252 # noqa - # gpt-2: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_gpt2.py#L312 # noqa - qkv_weights = data.reshape((n_head, 3, n_embed // n_head, n_embed)) - data = np.concatenate( - ( - qkv_weights[:, 0, :, :].reshape((-1, n_embed)), - qkv_weights[:, 1, :, :].reshape((-1, n_embed)), - qkv_weights[:, 2, :, :].reshape((-1, n_embed)), - ), - axis=0, - ) - print("re-format attention.linear_qkv.weight") - elif re.match(r"h\.\d+\.self_attention\.query_key_value\.bias", name): - qkv_bias = data.reshape((n_head, 3, n_embed // n_head)) - data = np.concatenate( - ( - qkv_bias[:, 0, :].reshape((n_embed,)), - qkv_bias[:, 1, :].reshape((n_embed,)), - qkv_bias[:, 2, :].reshape((n_embed,)), - ), - axis=0, - ) - print("re-format attention.linear_qkv.bias") - - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) - if new_name is None: - print(f"Can not map tensor {name!r}") - sys.exit() - - n_dims = len(data.shape) - data_dtype = data.dtype - - # if f32 desired, convert any float16 to float32 - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) - - # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 - if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: - data = data.astype(np.float32) - - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: - data = data.astype(np.float16) + name = re.sub(r'transformer\.', '', name) + + tensors: list[tuple[str, Tensor]] = [] + + if re.match(r"h\.\d+\.self_attention\.query_key_value\.weight", name): + # Map bloom-style qkv_linear to gpt-style qkv_linear + # bloom: https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py#L238-L252 # noqa + # gpt-2: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_gpt2.py#L312 # noqa + qkv_weights = data_torch.reshape((n_head, 3, n_embed // n_head, n_embed)) + data_torch = torch.cat( + ( + qkv_weights[:, 0, :, :].reshape((-1, n_embed)), + qkv_weights[:, 1, :, :].reshape((-1, n_embed)), + qkv_weights[:, 2, :, :].reshape((-1, n_embed)), + ), + dim=0, + ) + print("re-format attention.linear_qkv.weight") + elif re.match(r"h\.\d+\.self_attention\.query_key_value\.bias", name): + qkv_bias = data_torch.reshape((n_head, 3, n_embed // n_head)) + data_torch = torch.cat( + ( + qkv_bias[:, 0, :].reshape((n_embed,)), + qkv_bias[:, 1, :].reshape((n_embed,)), + qkv_bias[:, 2, :].reshape((n_embed,)), + ), + dim=0, + ) + print("re-format attention.linear_qkv.bias") - print(f"=> {new_name}, shape = {data.shape}, {old_dtype} --> {data.dtype}") + tensors.append((self.map_tensor_name(name), data_torch)) - self.gguf_writer.add_tensor(new_name, data) + if name == "word_embeddings.weight": + # TODO: tie them at runtime, don't duplicate in the model file + if "lm_head.weight" not in self.tensors and "output.weight" not in self.tensors: + tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT), data_torch)) - if not has_lm_head and name == "word_embeddings.weight": - self.gguf_writer.add_tensor("output.weight", data) - print(name, f"=> output.weight, shape = {data.shape}, {old_dtype} --> {data.dtype}") + return tensors @Model.register("MPTForCausalLM") @@ -625,51 +636,16 @@ def set_gguf_parameters(self): else: self.gguf_writer.add_max_alibi_bias(0.0) - def write_tensors(self): - block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers")) - tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) - for name, data_torch in self.get_tensors(): - # we don't need these - if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")): - continue - - old_dtype = data_torch.dtype - - # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): - data_torch = data_torch.to(torch.float32) - - data = data_torch.squeeze().numpy() - - # map tensor names - if "scales" in name: - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias", ".scales")) - if new_name is not None: - new_name = new_name.replace("scales", "act.scales") - else: - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) - if new_name is None: - print(f"Can not map tensor {name!r}") - sys.exit() - - n_dims = len(data.shape) - data_dtype = data.dtype - - # if f32 desired, convert any float16 to float32 - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) - - # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 - if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: - data = data.astype(np.float32) - - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: - data = data.astype(np.float16) + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + del bid # unused - print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") + if "scales" in name: + new_name = self.map_tensor_name(name, try_suffixes=(".weight", ".bias", ".scales")) + new_name = new_name.replace("scales", "act.scales") + else: + new_name = self.map_tensor_name(name, try_suffixes=(".weight", ".bias")) - self.gguf_writer.add_tensor(new_name, data) + return [(new_name, data_torch)] @Model.register("OrionForCausalLM") @@ -710,49 +686,6 @@ def set_gguf_parameters(self): # ref: https://huggingface.co/OrionStarAI/Orion-14B-Chat/blob/276a17221ce42beb45f66fac657a41540e71f4f5/modeling_orion.py#L570-L571 self.gguf_writer.add_layer_norm_eps(self.hparams["rms_norm_eps"]) - def write_tensors(self): - # Collect tensors from generator object - model_kv = dict(self.get_tensors()) - block_count = self.hparams["num_hidden_layers"] - tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) - - for name, data_torch in model_kv.items(): - # we don't need these - if name.endswith(".rotary_emb.inv_freq"): - continue - - old_dtype = data_torch.dtype - - # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): - data_torch = data_torch.to(torch.float32) - - data = data_torch.squeeze().numpy() - - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) - if new_name is None: - print(f"Can not map tensor {name!r}") - sys.exit() - - n_dims = len(data.shape) - data_dtype = data.dtype - - # if f32 desired, convert any float16 to float32 - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) - - # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 - if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: - data = data.astype(np.float32) - - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: - data = data.astype(np.float16) - - print(f"{name} -> {new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") - self.gguf_writer.add_tensor(new_name, data) - @Model.register("BaichuanForCausalLM", "BaiChuanForCausalLM") class BaichuanModel(Model): @@ -795,61 +728,26 @@ def set_gguf_parameters(self): self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR) self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"]) - def write_tensors(self): - # Collect tensors from generator object - model_kv = dict(self.get_tensors()) - block_count = self.hparams["num_hidden_layers"] + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: head_count = self.hparams["num_attention_heads"] - tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) head_count_kv = self.hparams.get("num_key_value_heads", head_count) - for i in range(block_count): - if (w := model_kv.get(f"model.layers.{i}.self_attn.W_pack.weight")) is not None: - print(f"Unpacking and permuting layer {i}") - model_kv[f"model.layers.{i}.self_attn.q_proj.weight"] = \ - self._reverse_hf_permute_part(w, 0, head_count, head_count) - model_kv[f"model.layers.{i}.self_attn.k_proj.weight"] = \ - self._reverse_hf_permute_part(w, 1, head_count, head_count_kv) - model_kv[f"model.layers.{i}.self_attn.v_proj.weight"] = \ - self._reverse_hf_part(w, 2) - del model_kv[f"model.layers.{i}.self_attn.W_pack.weight"] - - for name, data_torch in model_kv.items(): - # we don't need these - if name.endswith(".rotary_emb.inv_freq"): - continue - - old_dtype = data_torch.dtype - - # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): - data_torch = data_torch.to(torch.float32) - - data = data_torch.squeeze().numpy() - - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) - if new_name is None: - print(f"Can not map tensor {name!r}") - sys.exit() - - n_dims = len(data.shape) - data_dtype = data.dtype - - # if f32 desired, convert any float16 to float32 - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) - - # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 - if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: - data = data.astype(np.float32) - - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: - data = data.astype(np.float16) + tensors: list[tuple[str, Tensor]] = [] + + if bid is not None and name == f"model.layers.{bid}.self_attn.W_pack.weight": + print(f"Unpacking and permuting layer {bid}") + tensors = [ + (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), + self._reverse_hf_permute_part(data_torch, 0, head_count, head_count)), + (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), + self._reverse_hf_permute_part(data_torch, 1, head_count, head_count_kv)), + (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), + self._reverse_hf_part(data_torch, 2)), + ] + else: + tensors = [(self.map_tensor_name(name), data_torch)] - print(f"{name} -> {new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") - self.gguf_writer.add_tensor(new_name, data) + return tensors def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor: if n_kv_head is not None and n_head != n_kv_head: @@ -881,7 +779,7 @@ def set_vocab(self): dir_model = self.dir_model hparams = self.hparams - tokens: list[bytearray] = [] + tokens: list[bytes] = [] toktypes: list[int] = [] from transformers import AutoTokenizer @@ -889,7 +787,7 @@ def set_vocab(self): vocab_size = hparams.get("vocab_size", len(tokenizer.vocab)) assert max(tokenizer.vocab.values()) < vocab_size - reverse_vocab = {id_: encoded_tok for encoded_tok, id_ in tokenizer.vocab.items()} + reverse_vocab: dict[int, str] = {id_: encoded_tok for encoded_tok, id_ in tokenizer.vocab.items()} added_vocab = tokenizer.get_added_vocab() for token_id in range(vocab_size): @@ -953,56 +851,19 @@ def set_gguf_parameters(self): self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR) self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"]) - def write_tensors(self): - # Collect tensors from generator object - model_kv = dict(self.get_tensors()) - block_count = self.hparams["num_hidden_layers"] + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + del bid # unused + head_count = self.hparams["num_attention_heads"] - tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) head_count_kv = self.hparams.get("num_key_value_heads", head_count) - for name, data_torch in model_kv.items(): - # we don't need these - if name.endswith(".rotary_emb.inv_freq"): - continue - - old_dtype = data_torch.dtype - - # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): - data_torch = data_torch.to(torch.float32) - - # HF models permute some of the tensors, so we need to undo that - if name.endswith(("q_proj.weight")): - data_torch = self._reverse_hf_permute(data_torch, head_count, head_count) - if name.endswith(("k_proj.weight")): - data_torch = self._reverse_hf_permute(data_torch, head_count, head_count_kv) - - data = data_torch.squeeze().numpy() - - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) - if new_name is None: - print(f"Can not map tensor {name!r}") - sys.exit() - - n_dims = len(data.shape) - data_dtype = data.dtype - - # if f32 desired, convert any float16 to float32 - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) - - # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 - if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: - data = data.astype(np.float32) - - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: - data = data.astype(np.float16) + # HF models permute some of the tensors, so we need to undo that + if name.endswith("q_proj.weight"): + data_torch = self._reverse_hf_permute(data_torch, head_count, head_count) + if name.endswith("k_proj.weight"): + data_torch = self._reverse_hf_permute(data_torch, head_count, head_count_kv) - print(f"{name} -> {new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") - self.gguf_writer.add_tensor(new_name, data) + return [(self.map_tensor_name(name), data_torch)] def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor: if n_kv_head is not None and n_head != n_kv_head: @@ -1043,80 +904,39 @@ def set_gguf_parameters(self): self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"]) self.gguf_writer.add_file_type(self.ftype) - def write_tensors(self): - block_count = self.hparams.get("num_hidden_layers") - if block_count is None: - block_count = self.hparams["n_layer"] # old name - - n_head = self.hparams.get("num_attention_heads") - if n_head is None: - n_head = self.hparams["n_head"] # old name + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + del bid # unused - n_head_kv = self.hparams.get("num_kv_heads") - if n_head_kv is None: - n_head_kv = self.hparams.get("n_head_kv", 1) # old name + # QKV tensor transform + # The original query_key_value tensor contains n_head_kv "kv groups", + # each consisting of n_head/n_head_kv query weights followed by one key + # and one value weight (shared by all query heads in the kv group). + # This layout makes it a big pain to work with in GGML. + # So we rearrange them here,, so that we have n_head query weights + # followed by n_head_kv key weights followed by n_head_kv value weights, + # in contiguous fashion. + # ref: https://github.com/jploski/ggml/blob/falcon40b/examples/falcon/convert-hf-to-ggml.py - head_dim = self.hparams["hidden_size"] // n_head - tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) + if "query_key_value" in name: + n_head = self.find_hparam(["num_attention_heads", "n_head"]) + n_head_kv = self.find_hparam(["num_kv_heads", "n_head_kv"], optional=True) or 1 + head_dim = self.hparams["hidden_size"] // n_head - for name, data_torch in self.get_tensors(): - old_dtype = data_torch.dtype + qkv = data_torch.view(n_head_kv, n_head // n_head_kv + 2, head_dim, head_dim * n_head) + q = qkv[:, :-2].reshape(n_head * head_dim, head_dim * n_head) + k = qkv[:, [-2]].reshape(n_head_kv * head_dim, head_dim * n_head) + v = qkv[:, [-1]].reshape(n_head_kv * head_dim, head_dim * n_head) + data_torch = torch.cat((q, k, v)).reshape_as(data_torch) - # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): - data_torch = data_torch.to(torch.float32) + return [(self.map_tensor_name(name), data_torch)] - # QKV tensor transform - # The original query_key_value tensor contains n_head_kv "kv groups", - # each consisting of n_head/n_head_kv query weights followed by one key - # and one value weight (shared by all query heads in the kv group). - # This layout makes it a big pain to work with in GGML. - # So we rearrange them here,, so that we have n_head query weights - # followed by n_head_kv key weights followed by n_head_kv value weights, - # in contiguous fashion. - # ref: https://github.com/jploski/ggml/blob/falcon40b/examples/falcon/convert-hf-to-ggml.py - - if "query_key_value" in name: - qkv = data_torch.view(n_head_kv, n_head // n_head_kv + 2, head_dim, head_dim * n_head) - q = qkv[:, :-2].reshape(n_head * head_dim, head_dim * n_head) - k = qkv[:, [-2]].reshape(n_head_kv * head_dim, head_dim * n_head) - v = qkv[:, [-1]].reshape(n_head_kv * head_dim, head_dim * n_head) - data_torch = torch.cat((q, k, v)).reshape_as(data_torch) - data = data_torch.squeeze().numpy() +@Model.register("GPTBigCodeForCausalLM") +class StarCoderModel(Model): + model_arch = gguf.MODEL_ARCH.STARCODER - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) - if new_name is None: - print(f"Can not map tensor {name!r}") - sys.exit() - - n_dims = len(data.shape) - data_dtype = data.dtype - - # if f32 desired, convert any float16 to float32 - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) - - # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 - if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: - data = data.astype(np.float32) - - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: - data = data.astype(np.float16) - - print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") - - self.gguf_writer.add_tensor(new_name, data) - - -@Model.register("GPTBigCodeForCausalLM") -class StarCoderModel(Model): - model_arch = gguf.MODEL_ARCH.STARCODER - - def set_gguf_parameters(self): - block_count = self.hparams["n_layer"] + def set_gguf_parameters(self): + block_count = self.hparams["n_layer"] self.gguf_writer.add_name("StarCoder") self.gguf_writer.add_context_length(self.hparams["n_positions"]) @@ -1154,7 +974,7 @@ def set_gguf_parameters(self): self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"]) self.gguf_writer.add_file_type(self.ftype) - def write_tensors(self): + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: hidden_dim = self.hparams["n_embd"] inner_dim = 4 * hidden_dim hidden_dim = int(2 * inner_dim / 3) @@ -1162,58 +982,24 @@ def write_tensors(self): ff_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of) n_head = self.hparams["n_head"] n_head_kv = 1 - head_dim = self.hparams["n_embd"] // n_head - block_count = self.hparams["n_layer"] - - tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) - - tensors = dict(self.get_tensors()) - for i in range(block_count): - if (w := tensors.get(f"transformer.h.{i}.attn.kv.weight")) is not None: - tensors[f"model.layers.{i}.self_attn.k_proj.weight"] = w[:n_head_kv * head_dim] - tensors[f"model.layers.{i}.self_attn.v_proj.weight"] = w[n_head_kv * head_dim:] - del tensors[f"transformer.h.{i}.attn.kv.weight"] - if (w := tensors.get(f"transformer.h.{i}.attn.q.weight")) is not None: - tensors[f"model.layers.{i}.self_attn.q_proj.weight"] = w - del tensors[f"transformer.h.{i}.attn.q.weight"] - if (w := tensors.get(f"transformer.h.{i}.mlp.gate_up_proj.weight")) is not None: - tensors[f"model.layers.{i}.mlp.gate_proj.weight"] = w[:ff_dim] - tensors[f"model.layers.{i}.mlp.up_proj.weight"] = w[ff_dim:] - del tensors[f"transformer.h.{i}.mlp.gate_up_proj.weight"] - - for name, data_torch in tensors.items(): - old_dtype = data_torch.dtype - - # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): - data_torch = data_torch.to(torch.float32) - - data = data_torch.squeeze().numpy() - - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes=(".weight",)) - if new_name is None: - print(f"Can not map tensor {name!r}") - sys.exit() - - n_dims = len(data.shape) - data_dtype = data.dtype + head_dim = hidden_dim // n_head - # if f32 desired, convert any float16 to float32 - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) + tensors: list[tuple[str, Tensor]] = [] - # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 - if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: - data = data.astype(np.float32) + if bid is not None: + if name == f"transformer.h.{bid}.attn.kv.weight": + tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), data_torch[:n_head_kv * head_dim])) + tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), data_torch[n_head_kv * head_dim:])) + elif name == f"transformer.h.{bid}.attn.q.weight": + tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), data_torch)) + elif name == f"transformer.h.{bid}.mlp.gate_up_proj.weight": + tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE, bid), data_torch[:ff_dim])) + tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP, bid), data_torch[ff_dim:])) - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: - data = data.astype(np.float16) + if len(tensors) == 0: + tensors.append((self.map_tensor_name(name), data_torch)) - print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") - - self.gguf_writer.add_tensor(new_name, data) + return tensors @Model.register("PersimmonForCausalLM") @@ -1248,23 +1034,11 @@ def set_vocab(self): # self.gguf_writer.add_bos_token_id(71013) # self.gguf_writer.add_eos_token_id(71013) - def write_tensors(self): - block_count = self.hparams.get("num_layers", self.hparams.get("num_hidden_layers")) - tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) + def extra_f32_tensors(self, n_dims: int, name: str, new_name: str) -> bool: + del n_dims, name, new_name # unused - for name, data_torch in self.get_tensors(): - if name.endswith(".self_attention.rotary_emb.inv_freq"): - continue - old_dtype = data_torch.dtype - # TODO: FP16 conversion produces garbage outputs. (Q8_0 does not, so..?) - data = data_torch.to(torch.float32).squeeze().numpy() - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) - if new_name is None: - print(f"Can not map tensor {name!r}") - sys.exit() - n_dims = len(data.shape) - print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") - self.gguf_writer.add_tensor(new_name, data) + # TODO: FP16 conversion produces garbage outputs. (Q8_0 does not, so..?) + return True @Model.register("StableLmForCausalLM", "StableLMEpochForCausalLM", "LlavaStableLMEpochForCausalLM") @@ -1294,6 +1068,10 @@ def set_gguf_parameters(self): self.gguf_writer.add_parallel_residual(hparams["use_parallel_residual"] if "use_parallel_residual" in hparams else True) self.gguf_writer.add_layer_norm_eps(self.find_hparam(["layer_norm_eps", "norm_eps"])) + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + # FIXME + return super().modify_tensors(data_torch, name, bid) + def write_tensors(self): block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer"))) tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) @@ -1413,6 +1191,10 @@ def set_gguf_parameters(self): self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR) self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"]) + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + # FIXME + return super().modify_tensors(data_torch, name, bid) + # Same as super class, but permuting q_proj, k_proj def write_tensors(self): block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer"))) @@ -1527,6 +1309,10 @@ def set_gguf_parameters(self): super().set_gguf_parameters() self.gguf_writer.add_name("Grok") + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + # FIXME + return super().modify_tensors(data_torch, name, bid) + def write_tensors(self): block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer"))) tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) @@ -1644,6 +1430,10 @@ def set_gguf_parameters(self): self.gguf_writer.add_file_type(self.ftype) print(f"gguf: file type = {self.ftype}") + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + # FIXME + return super().modify_tensors(data_torch, name, bid) + def write_tensors(self): block_count = self.hparams.get("n_layers") tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) @@ -1740,54 +1530,19 @@ def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | Non .reshape(weights.shape) ) - def write_tensors(self): - block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer"))) - tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) - n_head = self.hparams.get("num_attention_heads") - n_kv_head = self.hparams.get("num_key_value_heads") - for name, data_torch in self.get_tensors(): - # we don't need these - if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")): - continue - - old_dtype = data_torch.dtype - - # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): - data_torch = data_torch.to(torch.float32) - - # HF models permute some of the tensors, so we need to undo that - if name.endswith(("q_proj.weight")): - data_torch = self._reverse_hf_permute(data_torch, n_head, n_head) - if name.endswith(("k_proj.weight")): - data_torch = self._reverse_hf_permute(data_torch, n_head, n_kv_head) + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + del bid # unused - data = data_torch.squeeze().numpy() - - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) - if new_name is None: - print(f"Can not map tensor {name!r}") - sys.exit() - - n_dims = len(data.shape) - data_dtype = data.dtype - - # if f32 desired, convert any float16 to float32 - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) - - # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 - if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: - data = data.astype(np.float32) - - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: - data = data.astype(np.float16) + n_head = self.hparams["num_attention_heads"] + n_kv_head = self.hparams.get("num_key_value_heads") - print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") + # HF models permute some of the tensors, so we need to undo that + if name.endswith(("q_proj.weight")): + data_torch = self._reverse_hf_permute(data_torch, n_head, n_head) + if name.endswith(("k_proj.weight")): + data_torch = self._reverse_hf_permute(data_torch, n_head, n_kv_head) - self.gguf_writer.add_tensor(new_name, data) + return [(self.map_tensor_name(name), data_torch)] @Model.register("QWenLMHeadModel") @@ -1831,47 +1586,6 @@ def set_gguf_parameters(self): self.gguf_writer.add_head_count(self.hparams["num_attention_heads"]) self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"]) - def write_tensors(self): - block_count = self.hparams["num_hidden_layers"] - model_kv = dict(self.get_tensors()) - tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) - for name, data_torch in model_kv.items(): - # we don't need these - if name.endswith(".rotary_emb.inv_freq"): - continue - - old_dtype = data_torch.dtype - - # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): - data_torch = data_torch.to(torch.float32) - - data = data_torch.squeeze().numpy() - - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) - if new_name is None: - print(f"Can not map tensor {name!r}") - sys.exit() - - n_dims = len(data.shape) - data_dtype = data.dtype - - # if f32 desired, convert any float16 to float32 - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) - - # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 - if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: - data = data.astype(np.float32) - - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: - data = data.astype(np.float16) - - print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") - self.gguf_writer.add_tensor(new_name, data) - @Model.register("Qwen2ForCausalLM") class Qwen2Model(Model): @@ -1893,6 +1607,10 @@ def set_gguf_parameters(self): if (n_experts := self.hparams.get("num_experts")) is not None: self.gguf_writer.add_expert_count(n_experts) + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + # FIXME + return super().modify_tensors(data_torch, name, bid) + def write_tensors(self): block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer"))) tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) @@ -1997,55 +1715,27 @@ def set_gguf_parameters(self): self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"]) self.gguf_writer.add_file_type(self.ftype) - def write_tensors(self): - block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer"))) - tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + del bid # unused - for name, data_torch in self.get_tensors(): - # we don't need these - if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq", ".attn.bias", ".attn.masked_bias")): - continue - - if name.endswith((".c_attn.weight", ".c_proj.weight", ".c_fc.weight", ".c_proj.weight")): - data_torch = data_torch.transpose(1, 0) - - old_dtype = data_torch.dtype - - # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): - data_torch = data_torch.to(torch.float32) - - data = data_torch.squeeze().numpy() - - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) - if new_name is None: - print(f"Can not map tensor {name!r}") - sys.exit() + tensors: list[tuple[str, Tensor]] = [] - n_dims = len(data.shape) - data_dtype = data.dtype - - # if f32 desired, convert any float16 to float32 - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) + # we don't need these + if name.endswith((".attn.bias", ".attn.masked_bias")): + return tensors - # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 - if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: - data = data.astype(np.float32) + if name.endswith((".c_attn.weight", ".c_proj.weight", ".c_fc.weight", ".c_proj.weight")): + data_torch = data_torch.transpose(1, 0) - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: - data = data.astype(np.float16) + new_name = self.map_tensor_name(name) - print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") + tensors.append((new_name, data_torch)) - self.gguf_writer.add_tensor(new_name, data) + # note: GPT2 output is tied to (same as) wte in original model + if new_name == self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD): + tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT), data_torch)) - # note: GPT2 output is tied to (same as) wte in original model - if new_name == "token_embd.weight": - print(f"output.weight, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") - self.gguf_writer.add_tensor("output.weight", data) + return tensors @Model.register("PhiForCausalLM") @@ -2086,7 +1776,8 @@ def set_vocab(self): print(f'Error: Missing {tokenizer_path}', file=sys.stderr) sys.exit(1) - tokenizer = SentencePieceProcessor(str(tokenizer_path)) + tokenizer = SentencePieceProcessor() + tokenizer.LoadFromFile(tokenizer_path) vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size()) @@ -2096,18 +1787,18 @@ def set_vocab(self): for token_id in range(tokenizer.vocab_size()): - piece = tokenizer.id_to_piece(token_id) + piece = tokenizer.IdToPiece(token_id) text = piece.encode("utf-8") - score = tokenizer.get_score(token_id) + score = tokenizer.GetScore(token_id) toktype = SentencePieceTokenTypes.NORMAL - if tokenizer.is_unknown(token_id): + if tokenizer.IsUnknown(token_id): toktype = SentencePieceTokenTypes.UNKNOWN - elif tokenizer.is_control(token_id): + elif tokenizer.IsControl(token_id): toktype = SentencePieceTokenTypes.CONTROL - elif tokenizer.is_unused(token_id): + elif tokenizer.IsUnused(token_id): toktype = SentencePieceTokenTypes.UNUSED - elif tokenizer.is_byte(token_id): + elif tokenizer.IsByte(token_id): toktype = SentencePieceTokenTypes.BYTE tokens[token_id] = text @@ -2193,52 +1884,18 @@ def shuffle_attn_output_weight(self, data_torch): data_torch = torch.reshape(data_torch, (5120, 5120)) return data_torch - def write_tensors(self): - block_count = self.hparams.get("num_layers", self.hparams.get("num_hidden_layers")) - tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) - - for name, data_torch in self.get_tensors(): - if "self_attn.rotary_emb.inv_freq" in name: - continue - - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) - if new_name is None: - print(f"Can not map tensor {name!r}") - sys.exit() - - # shuffle for broadcasting of gqa in ggml_mul_mat - if new_name.endswith("attn_q.weight"): - data_torch = self.shuffle_attn_q_weight(data_torch) - elif new_name.endswith("attn_output.weight"): - data_torch = self.shuffle_attn_output_weight(data_torch) - - old_dtype = data_torch.dtype - - # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): - data_torch = data_torch.to(torch.float32) + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + del bid # unused - data = data_torch.squeeze().numpy() + new_name = self.map_tensor_name(name) - n_dims = len(data.shape) - data_dtype = data.dtype + # shuffle for broadcasting of gqa in ggml_mul_mat + if new_name.endswith("attn_q.weight"): + data_torch = self.shuffle_attn_q_weight(data_torch) + elif new_name.endswith("attn_output.weight"): + data_torch = self.shuffle_attn_output_weight(data_torch) - # if f32 desired, convert any float16 to float32 - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) - - # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 - if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: - data = data.astype(np.float32) - - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: - data = data.astype(np.float16) - - print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") - - self.gguf_writer.add_tensor(new_name, data) + return [(new_name, data_torch)] @Model.register("CodeShellForCausalLM") @@ -2261,52 +1918,18 @@ def set_gguf_parameters(self): self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR) self.gguf_writer.add_rope_scaling_factor(1.0) - def write_tensors(self): - block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer"))) - tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) - tensors = dict(self.get_tensors()) - has_lm_head = "lm_head.weight" in tensors.keys() or "output.weight" in tensors.keys() - for name, data_torch in tensors.items(): - # we don't need these - if name.endswith((".attn.rotary_emb.inv_freq")): - continue - - old_dtype = data_torch.dtype - - # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): - data_torch = data_torch.to(torch.float32) - - data = data_torch.squeeze().numpy() - - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) - if new_name is None: - print(f"Can not map tensor {name!r}") - sys.exit() - - n_dims = len(data.shape) - data_dtype = data.dtype - - # if f32 desired, convert any float16 to float32 - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) - - # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 - if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: - data = data.astype(np.float32) + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + del bid # unused - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: - data = data.astype(np.float16) + new_name = self.map_tensor_name(name) - print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") + tensors: list[tuple[str, Tensor]] = [(new_name, data_torch)] - self.gguf_writer.add_tensor(new_name, data) + if new_name == self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD): + if "lm_head.weight" not in self.tensors and "output.weight" not in self.tensors: + tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT), data_torch)) - if not has_lm_head and name == "transformer.wte.weight": - self.gguf_writer.add_tensor("output.weight", data) - print(name, f"=> output.weight, shape = {data.shape}, {old_dtype} --> {data.dtype}") + return tensors @Model.register("InternLM2ForCausalLM") @@ -2335,27 +1958,30 @@ def set_vocab(self): sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read()) add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix - tokenizer = SentencePieceProcessor(str(tokenizer_path)) + tokenizer = SentencePieceProcessor() + tokenizer.LoadFromFile(tokenizer_path) + tokenizer.serialized_model_proto + vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size()) for token_id in range(vocab_size): - piece = tokenizer.id_to_piece(token_id) + piece = tokenizer.IdToPiece(token_id) text = piece.encode("utf-8") - score = tokenizer.get_score(token_id) + score = tokenizer.GetScore(token_id) if text == b"\x00": # (TODO): fixme # Hack here and replace the \x00 characters. print(f"InternLM2 convert token '{text}' to '🐉'!") - text = "🐉" + text = "🐉".encode("utf-8") toktype = SentencePieceTokenTypes.NORMAL - if tokenizer.is_unknown(token_id): + if tokenizer.IsUnknown(token_id): toktype = SentencePieceTokenTypes.UNKNOWN - elif tokenizer.is_control(token_id): + elif tokenizer.IsControl(token_id): toktype = SentencePieceTokenTypes.CONTROL - elif tokenizer.is_unused(token_id): + elif tokenizer.IsUnused(token_id): toktype = SentencePieceTokenTypes.UNUSED - elif tokenizer.is_byte(token_id): + elif tokenizer.IsByte(token_id): toktype = SentencePieceTokenTypes.BYTE tokens.append(text) @@ -2392,13 +2018,15 @@ def set_vocab(self): special_vocab.add_to_gguf(self.gguf_writer) def _try_get_sft_eos(self, tokenizer): - unused_145_list = tokenizer.encode('[UNUSED_TOKEN_145]') - im_end_list = tokenizer.encode('<|im_end|>') + unused_145_list = tokenizer.Encode('[UNUSED_TOKEN_145]') + im_end_list = tokenizer.Encode('<|im_end|>') + eos_token = None assert (len(unused_145_list) == 1) ^ (len(im_end_list) == 1) if len(unused_145_list) == 1: eos_token = unused_145_list[0] if len(im_end_list) == 1: eos_token = im_end_list[0] + assert eos_token return eos_token def _hf_permute_qk(self, weights, n_head: int, n_head_kv: int): @@ -2419,72 +2047,34 @@ def set_gguf_parameters(self): self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"]) self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"]) - def post_write_tensors(self, tensor_map, name, data_torch): - old_dtype = data_torch.dtype - - # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): - data_torch = data_torch.to(torch.float32) - - data = data_torch.squeeze().numpy() - - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) - if new_name is None: - print(f"Can not map tensor {name!r}") - sys.exit() - - n_dims = len(data.shape) - data_dtype = data.dtype - - # if f32 desired, convert any float16 to float32 - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) - - # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 - if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: - data = data.astype(np.float32) - - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: - data = data.astype(np.float16) - - print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") - self.gguf_writer.add_tensor(new_name, data) - - def write_tensors(self): - from einops import rearrange - - num_heads = self.hparams.get("num_attention_heads") - num_kv_heads = self.hparams.get("num_key_value_heads") - hidden_size = self.hparams.get("hidden_size") + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + num_heads = self.hparams["num_attention_heads"] + num_kv_heads = self.hparams["num_key_value_heads"] + hidden_size = self.hparams["hidden_size"] q_per_kv = num_heads // num_kv_heads head_dim = hidden_size // num_heads num_groups = num_heads // q_per_kv - block_count = self.hparams["num_hidden_layers"] - model_kv = dict(self.get_tensors()) - tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) qkv_pattern = r"model\.layers\.(\d+)\.attention\.wqkv" - for name, data_torch in model_kv.items(): - # we don't need these - if name.endswith(".rotary_emb.inv_freq"): - continue - if re.match(qkv_pattern, name): - bid = re.findall(qkv_pattern, name)[0] - qkv = data_torch - qkv = rearrange(qkv.T, " o (g n i) ->o g n i", g=num_groups, n=q_per_kv + 2, i=head_dim) - q, k, v = qkv[..., : q_per_kv, :], qkv[..., q_per_kv: q_per_kv + 1, :], qkv[..., q_per_kv + 1: q_per_kv + 2, :] - # The model weights of q and k equire additional reshape. - q = self._hf_permute_qk(rearrange(q, " o g n i -> o (g n i)").T, num_heads, num_heads) - k = self._hf_permute_qk(rearrange(k, " o g n i -> o (g n i)").T, num_heads, num_kv_heads) - v = rearrange(v, " o g n i -> o (g n i)").T - self.post_write_tensors(tensor_map, f"model.layers.{bid}.attention.wq.weight", q) - self.post_write_tensors(tensor_map, f"model.layers.{bid}.attention.wk.weight", k) - self.post_write_tensors(tensor_map, f"model.layers.{bid}.attention.wv.weight", v) - else: - self.post_write_tensors(tensor_map, name, data_torch) + if re.match(qkv_pattern, name): + from einops import rearrange + + bid = re.findall(qkv_pattern, name)[0] + qkv = data_torch + qkv = rearrange(qkv.T, " o (g n i) ->o g n i", g=num_groups, n=q_per_kv + 2, i=head_dim) + q, k, v = qkv[..., : q_per_kv, :], qkv[..., q_per_kv: q_per_kv + 1, :], qkv[..., q_per_kv + 1: q_per_kv + 2, :] + # The model weights of q and k equire additional reshape. + q = self._hf_permute_qk(rearrange(q, " o g n i -> o (g n i)").T, num_heads, num_heads) + k = self._hf_permute_qk(rearrange(k, " o g n i -> o (g n i)").T, num_heads, num_kv_heads) + v = rearrange(v, " o g n i -> o (g n i)").T + return [ + (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), q), + (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), k), + (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), v), + ] + else: + return [(self.map_tensor_name(name), data_torch)] @Model.register("BertModel", "CamembertModel") @@ -2549,44 +2139,20 @@ def phantom(tok): special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens)) special_vocab.add_to_gguf(self.gguf_writer) - def write_tensors(self): - tensor_map = gguf.get_tensor_name_map(self.model_arch, self.block_count) - tensors = dict(self.get_tensors()) - for name, data_torch in tensors.items(): - # we are only using BERT for embeddings so we don't need the pooling layer - if name in ("embeddings.position_ids", "pooler.dense.weight", "pooler.dense.bias"): - continue # we don't need these - - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) - if new_name is None: - print(f"Can not map tensor {name!r}") - sys.exit() - - # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): - data_torch = data_torch.to(torch.float32) - - data = data_torch.squeeze().numpy() - n_dims = len(data.shape) - new_dtype: type[np.floating[Any]] + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + del bid # unused - if ( - self.ftype == 1 and name.endswith(".weight") and n_dims == 2 - and name != "embeddings.token_type_embeddings.weight" # not used with get_rows, must be F32 - ): - # if f16 desired, convert any float32 2-dim weight tensors to float16 - new_dtype = np.float16 - else: - # if f32 desired, convert any float16 to float32 - new_dtype = np.float32 + # we are only using BERT for embeddings so we don't need the pooling layer + if name in ("embeddings.position_ids", "pooler.dense.weight", "pooler.dense.bias"): + return [] # we don't need these - print(f"{new_name}, n_dims = {n_dims}, {data_torch.dtype} --> {new_dtype}") + return [(self.map_tensor_name(name), data_torch)] - if data.dtype != new_dtype: - data = data.astype(new_dtype) + def extra_f32_tensors(self, n_dims: int, name: str, new_name: str, bid: int | None) -> bool: + del n_dims, new_name, bid # unused - self.gguf_writer.add_tensor(new_name, data) + # not used with get_rows, must be F32 + return name == "embeddings.token_type_embeddings.weight" @Model.register("NomicBertModel") @@ -2652,46 +2218,18 @@ def set_gguf_parameters(self): self.gguf_writer.add_value_length(hparams["head_dim"]) self.gguf_writer.add_file_type(self.ftype) - def write_tensors(self): - block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer"))) - tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + # lm_head is not used in llama.cpp, while autoawq will include this tensor in model + # To prevent errors, skip loading lm_head.weight. + if name == "lm_head.weight": + print(f"Skipping get tensor {name!r} in safetensors so that convert can end normally.") + return [] - for name, data_torch in self.get_tensors(): - # lm_head is not used in llama.cpp, while autoawq will include this tensor in model - # To prevent errors, skip loading lm_head.weight. - if name == "lm_head.weight": - print(f"Skipping get tensor {name!r} in safetensors so that convert can end normally.") - continue + # ref: https://github.com/huggingface/transformers/blob/fc37f38915372c15992b540dfcbbe00a916d4fc6/src/transformers/models/gemma/modeling_gemma.py#L89 + if name.endswith("norm.weight"): + data_torch = data_torch + 1 - old_dtype = data_torch.dtype - - # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): - data_torch = data_torch.to(torch.float32) - - # ref: https://github.com/huggingface/transformers/blob/fc37f38915372c15992b540dfcbbe00a916d4fc6/src/transformers/models/gemma/modeling_gemma.py#L89 - if name.endswith("norm.weight"): - data_torch = data_torch + 1 - data = data_torch.squeeze().numpy() - - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) - if new_name is None: - print(f"Can not map tensor {name!r}") - sys.exit() - - n_dims = len(data.shape) - data_dtype = data.dtype - - data = data.astype(np.float32) - - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: - data = data.astype(np.float16) - - print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") - - self.gguf_writer.add_tensor(new_name, data) + return super().modify_tensors(data_torch, name, bid) @Model.register("Starcoder2ForCausalLM") @@ -2714,6 +2252,8 @@ def set_vocab(self): if (self.dir_model / "tokenizer.json").is_file(): self._set_vocab_gpt2() + elif (self.dir_model / "tokenizer.model").is_file(): + self._set_vocab_sentencepiece() else: # Use the GPT-NeoX tokenizer when no tokenizer files are present tokenizer_path = Path(sys.path[0]) / "models" / "ggml-vocab-gpt-neox.gguf" @@ -2721,28 +2261,34 @@ def set_vocab(self): neox_reader = gguf.GGUFReader(tokenizer_path, "r") field = neox_reader.get_field(gguf.Keys.Tokenizer.MODEL) - self.gguf_writer.add_tokenizer_model(bytes(field.parts[-1])) + self.gguf_writer.add_tokenizer_model(bytes(field.parts[-1]).decode("utf-8") if field else "gpt2") field = neox_reader.get_field(gguf.Keys.Tokenizer.PRE) - self.gguf_writer.add_tokenizer_pre(bytes(field.parts[-1])) + self.gguf_writer.add_tokenizer_pre(bytes(field.parts[-1]).decode("utf-8") if field else "mpt") field = neox_reader.get_field(gguf.Keys.Tokenizer.LIST) + assert field self.gguf_writer.add_token_list([bytes(field.parts[i]) for i in field.data][:vocab_size]) field = neox_reader.get_field(gguf.Keys.Tokenizer.TOKEN_TYPE) + assert field self.gguf_writer.add_token_types([field.parts[i].tolist()[0] for i in field.data][:vocab_size]) field = neox_reader.get_field(gguf.Keys.Tokenizer.MERGES) + assert field self.gguf_writer.add_token_merges([bytes(field.parts[i]) for i in field.data]) field = neox_reader.get_field(gguf.Keys.Tokenizer.BOS_ID) - self.gguf_writer.add_bos_token_id(field.parts[-1].tolist()[0]) + self.gguf_writer.add_bos_token_id(field.parts[-1].tolist()[0] if field else 1) field = neox_reader.get_field(gguf.Keys.Tokenizer.EOS_ID) - self.gguf_writer.add_eos_token_id(field.parts[-1].tolist()[0]) + self.gguf_writer.add_eos_token_id(field.parts[-1].tolist()[0] if field else 0) field = neox_reader.get_field(gguf.Keys.Tokenizer.UNK_ID) - self.gguf_writer.add_unk_token_id(field.parts[-1].tolist()[0]) + self.gguf_writer.add_unk_token_id(field.parts[-1].tolist()[0] if field else 0) + + field = neox_reader.get_field(gguf.Keys.Tokenizer.PAD_ID) + self.gguf_writer.add_pad_token_id(field.parts[-1].tolist()[0] if field else 0) def set_gguf_parameters(self): d_model = self.find_hparam(["hidden_size", "d_model"]) @@ -2771,60 +2317,40 @@ def set_gguf_parameters(self): self.gguf_writer.add_layer_norm_rms_eps(rms_norm_eps) self.gguf_writer.add_file_type(self.ftype) - def write_tensors(self): - block_count = self.hparams["n_layer"] - tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) + _tok_embd = None - tok_embd = None - tok_embd_name = gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.TOKEN_EMBD] + ".weight" - output_name = gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.OUTPUT] + ".weight" + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + del bid # unused - for name, data_torch in self.get_tensors(): - old_dtype = data_torch.dtype + output_name = self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT) + tok_embd_name = self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD) - # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): - data_torch = data_torch.to(torch.float32) + new_name = self.map_tensor_name(name) - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) - if new_name is None: - print(f"Can not map tensor {name!r}") - sys.exit() - - if name.endswith(".A_log"): - print("A_log --> A ==> " + new_name) - data_torch = -torch.exp(data_torch) - - # assuming token_embd.weight is seen before output.weight - if tok_embd is not None and new_name == output_name: - if torch.equal(tok_embd, data_torch): - print(f"{output_name} is equivalent to {tok_embd_name}, omitting") - continue - if new_name == tok_embd_name: - tok_embd = data_torch - - data = data_torch.squeeze().numpy() - - n_dims = len(data.shape) - data_dtype = data.dtype + if name.endswith(".A_log"): + print("A_log --> A ==> " + new_name) + data_torch = -torch.exp(data_torch) - # if f32 desired, convert any float16 to float32 - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) + # assuming token_embd.weight is seen before output.weight + if self._tok_embd is not None and new_name == output_name: + if torch.equal(self._tok_embd, data_torch): + print(f"{output_name} is equivalent to {tok_embd_name}, omitting") + return [] + elif new_name == tok_embd_name: + self._tok_embd = data_torch - # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 - if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: - data = data.astype(np.float32) + return [(new_name, data_torch)] - # if f16 desired, convert big float32 2-dim weight tensors to float16 - new_weight_name = new_name[:-len(".weight")] if new_name.endswith(".weight") else "" - if self.ftype == 1 and data_dtype == np.float32 and new_weight_name.endswith((".ssm_in", ".ssm_out", "token_embd", "output")) and n_dims == 2: - data = data.astype(np.float16) + def extra_f32_tensors(self, n_dims: int, name: str, new_name: str, bid: int | None) -> bool: + del n_dims # unused - print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") - - self.gguf_writer.add_tensor(new_name, data) + return new_name in (self.format_tensor_name(n, bid, ".weight" if name.endswith(".weight") else "") for n in [ + gguf.MODEL_TENSOR.SSM_CONV1D, + gguf.MODEL_TENSOR.SSM_X, + gguf.MODEL_TENSOR.SSM_DT, + gguf.MODEL_TENSOR.SSM_A, + gguf.MODEL_TENSOR.SSM_D, + ]) @Model.register("CohereForCausalLM") @@ -2857,6 +2383,10 @@ def set_gguf_parameters(self): # Same as super class, but permuting q_proj, k_proj # Copied from: LlamaModel + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + # FIXME + return super().modify_tensors(data_torch, name, bid) + def write_tensors(self): block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer"))) tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) diff --git a/examples/server/tests/features/steps/steps.py b/examples/server/tests/features/steps/steps.py index f71e0d706cca9..2be1c7cd4b2dd 100644 --- a/examples/server/tests/features/steps/steps.py +++ b/examples/server/tests/features/steps/steps.py @@ -882,7 +882,7 @@ async def oai_chat_completions(user_prompt, while event_received: event_received = False async for line_in_bytes in response.content: - line = line_in_bytes.decode('utf8') + line = line_in_bytes.decode('utf-8') line = line.rstrip('\n').rstrip('\r') if line == '': continue diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py index 6d597bfd9d621..b36adcff6db5b 100644 --- a/gguf-py/gguf/constants.py +++ b/gguf-py/gguf/constants.py @@ -861,7 +861,7 @@ def get_type(val: Any) -> GGUFValueType: # Note: Does not support GGML_QKK_64 QK_K = 256 # Items here are (block size, type size) -GGML_QUANT_SIZES = { +GGML_QUANT_SIZES: dict[GGMLQuantizationType, tuple[int, int]] = { GGMLQuantizationType.F32: (1, 4), GGMLQuantizationType.F16: (1, 2), GGMLQuantizationType.Q4_0: (32, 2 + 16), diff --git a/gguf-py/gguf/gguf_reader.py b/gguf-py/gguf/gguf_reader.py index 2bdb15525b1a1..64e043f331048 100644 --- a/gguf-py/gguf/gguf_reader.py +++ b/gguf-py/gguf/gguf_reader.py @@ -63,7 +63,7 @@ class ReaderTensor(NamedTuple): class GGUFReader: # I - same as host, S - swapped - byte_order: Literal['I' | 'S'] = 'I' + byte_order: Literal['I'] | Literal['S'] = 'I' alignment: int = GGUF_DEFAULT_ALIGNMENT # Note: Internal helper, API may change. @@ -81,7 +81,7 @@ class GGUFReader: GGUFValueType.BOOL: np.bool_, } - def __init__(self, path: os.PathLike[str] | str, mode: Literal['r' | 'r+' | 'c'] = 'r'): + def __init__(self, path: os.PathLike[str] | str, mode: Literal['r'] | Literal['r+'] | Literal['c'] = 'r'): self.data = np.memmap(path, mode = mode) offs = 0 if self._get(offs, np.uint32, override_order = '<')[0] != GGUF_MAGIC: @@ -126,7 +126,7 @@ def get_tensor(self, idx: int) -> ReaderTensor: return self.tensors[idx] def _get( - self, offset: int, dtype: npt.DTypeLike, count: int = 1, override_order: None | Literal['I' | 'S' | '<'] = None, + self, offset: int, dtype: npt.DTypeLike, count: int = 1, override_order: None | Literal['I'] | Literal['S'] | Literal['<'] = None, ) -> npt.NDArray[Any]: count = int(count) itemsize = int(np.empty([], dtype = dtype).itemsize) @@ -248,7 +248,7 @@ def _build_tensors(self, start_offs: int, fields: list[ReaderField]) -> None: raise ValueError(f'Found duplicated tensor with name {tensor_name}') tensor_names.add(tensor_name) ggml_type = GGMLQuantizationType(raw_dtype[0]) - n_elems = np.prod(dims) + n_elems = int(np.prod(dims)) block_size, type_size = GGML_QUANT_SIZES[ggml_type] n_bytes = n_elems * type_size // block_size data_offs = int(start_offs + offset_tensor[0]) diff --git a/gguf-py/gguf/gguf_writer.py b/gguf-py/gguf/gguf_writer.py index 089aece876a93..d782037be27aa 100644 --- a/gguf-py/gguf/gguf_writer.py +++ b/gguf-py/gguf/gguf_writer.py @@ -173,7 +173,7 @@ def add_val(self, val: Any, vtype: GGUFValueType | None = None, add_vtype: bool if pack_fmt is not None: self.kv_data += self._pack(pack_fmt, val, skip_pack_prefix = vtype == GGUFValueType.BOOL) elif vtype == GGUFValueType.STRING: - encoded_val = val.encode("utf8") if isinstance(val, str) else val + encoded_val = val.encode("utf-8") if isinstance(val, str) else val self.kv_data += self._pack("Q", len(encoded_val)) self.kv_data += encoded_val elif vtype == GGUFValueType.ARRAY and isinstance(val, Sequence) and val: @@ -202,7 +202,7 @@ def add_tensor_info( raise ValueError(f'Duplicated tensor name {name}') self.ti_names.add(name) - encoded_name = name.encode("utf8") + encoded_name = name.encode("utf-8") self.ti_data += self._pack("Q", len(encoded_name)) self.ti_data += encoded_name n_dims = len(tensor_shape) @@ -476,7 +476,7 @@ def add_add_space_prefix(self, value: bool) -> None: self.add_bool(Keys.Tokenizer.ADD_PREFIX, value) def add_chat_template(self, value: str | Sequence[Mapping[str, str]]) -> None: - if isinstance(value, list): + if not isinstance(value, str): template_default = None template_names = set() diff --git a/gguf-py/gguf/vocab.py b/gguf-py/gguf/vocab.py index 378eaecad05ba..b12d107f6b49f 100644 --- a/gguf-py/gguf/vocab.py +++ b/gguf-py/gguf/vocab.py @@ -4,7 +4,7 @@ import os import sys from pathlib import Path -from typing import Any, Callable +from typing import Any, Callable, Sequence, Mapping, Iterable from .gguf_writer import GGUFWriter @@ -13,11 +13,11 @@ class SpecialVocab: merges: list[str] add_special_token: dict[str, bool] special_token_ids: dict[str, int] - chat_template: str | None + chat_template: str | Sequence[Mapping[str, str]] | None def __init__( self, path: str | os.PathLike[str], load_merges: bool = False, - special_token_types: tuple[str, ...] | None = None, + special_token_types: Iterable[str] | None = None, n_vocab: int | None = None, ): self.special_token_ids = {} diff --git a/gguf-py/scripts/gguf-dump.py b/gguf-py/scripts/gguf-dump.py index dbf8915089275..c9c5f4c553c0d 100755 --- a/gguf-py/scripts/gguf-dump.py +++ b/gguf-py/scripts/gguf-dump.py @@ -43,7 +43,7 @@ def dump_metadata(reader: GGUFReader, args: argparse.Namespace) -> None: if len(field.types) == 1: curr_type = field.types[0] if curr_type == GGUFValueType.STRING: - print(' = {0}'.format(repr(str(bytes(field.parts[-1]), encoding='utf8')[:60])), end = '') + print(' = {0}'.format(repr(str(bytes(field.parts[-1]), encoding='utf-8')[:60])), end = '') elif field.types[0] in reader.gguf_scalar_to_np: print(' = {0}'.format(field.parts[-1][0]), end = '') print() diff --git a/gguf-py/scripts/gguf-new-metadata.py b/gguf-py/scripts/gguf-new-metadata.py index 3444ab41802c5..8cb60ef659596 100644 --- a/gguf-py/scripts/gguf-new-metadata.py +++ b/gguf-py/scripts/gguf-new-metadata.py @@ -34,7 +34,7 @@ def get_byteorder(reader: gguf.GGUFReader) -> gguf.GGUFEndian: return host_endian -def decode_field(field: gguf.ReaderField) -> Any: +def decode_field(field: gguf.ReaderField | None) -> Any: if field and field.types: main_type = field.types[0] @@ -42,11 +42,11 @@ def decode_field(field: gguf.ReaderField) -> Any: sub_type = field.types[-1] if sub_type == gguf.GGUFValueType.STRING: - return [str(bytes(field.parts[idx]), encoding='utf8') for idx in field.data] + return [str(bytes(field.parts[idx]), encoding='utf-8') for idx in field.data] else: return [pv for idx in field.data for pv in field.parts[idx].tolist()] if main_type == gguf.GGUFValueType.STRING: - return str(bytes(field.parts[-1]), encoding='utf8') + return str(bytes(field.parts[-1]), encoding='utf-8') else: return field.parts[-1][0] @@ -59,7 +59,7 @@ def get_field_data(reader: gguf.GGUFReader, key: str) -> Any: return decode_field(field) -def copy_with_new_metadata(reader: gguf.GGUFReader, writer: gguf.GGUFWriter, new_metadata: Mapping[str, str], remove_metadata: Sequence[str]) -> None: +def copy_with_new_metadata(reader: gguf.GGUFReader, writer: gguf.GGUFWriter, new_metadata: dict[str, str], remove_metadata: Sequence[str]) -> None: for field in reader.fields.values(): # Suppress virtual fields and fields written by GGUFWriter if field.name == gguf.Keys.General.ARCHITECTURE or field.name.startswith('GGUF.'): @@ -101,7 +101,7 @@ def copy_with_new_metadata(reader: gguf.GGUFReader, writer: gguf.GGUFWriter, new for tensor in reader.tensors: # Dimensions are written in reverse order, so flip them first - shape = np.flipud(tensor.shape) + shape = np.flipud(tensor.shape).tolist() writer.add_tensor_info(tensor.name, shape, tensor.data.dtype, tensor.data.nbytes, tensor.tensor_type) writer.write_header_to_file() diff --git a/pyrightconfig.json b/pyrightconfig.json new file mode 100644 index 0000000000000..020a71a4ec214 --- /dev/null +++ b/pyrightconfig.json @@ -0,0 +1,3 @@ +{ + "extraPaths": ["gguf-py"], +} From c33775bcc7f523907f7409d7e19287ea9ef3bf67 Mon Sep 17 00:00:00 2001 From: Francis Couture-Harpin Date: Tue, 30 Apr 2024 15:01:23 -0400 Subject: [PATCH 02/15] convert : upgrade to sentencepiece v0.2.0 --- convert.py | 20 ++++++++++++-------- requirements/requirements-convert.txt | 2 +- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/convert.py b/convert.py index 1c700cf6a3d65..fd5487dbb3755 100755 --- a/convert.py +++ b/convert.py @@ -281,6 +281,7 @@ def loadOriginalParamsJson(model: LazyModel, config_path: Path) -> Params: n_experts = None n_experts_used = None f_rope_freq_base = None + n_ff = None # hack to determine LLaMA v1 vs v2 vs CodeLlama if config.get("moe"): @@ -305,6 +306,8 @@ def loadOriginalParamsJson(model: LazyModel, config_path: Path) -> Params: n_experts_used = config["moe"]["num_experts_per_tok"] f_rope_freq_base = 1e6 + assert n_ff is not None + return Params( n_vocab = model["tok_embeddings.weight"].shape[0], n_embd = config["dim"], @@ -459,7 +462,8 @@ def __init__(self, base_path: Path): # not found in alternate location either raise FileNotFoundError('Cannot find tokenizer.model') - self.sentencepiece_tokenizer = SentencePieceProcessor(str(fname_tokenizer)) + self.sentencepiece_tokenizer = SentencePieceProcessor() + self.sentencepiece_tokenizer.LoadFromFile(fname_tokenizer) vocab_size = self.sentencepiece_tokenizer.vocab_size() new_tokens = {id: piece for piece, id in added_tokens.items() if id >= vocab_size} @@ -479,23 +483,23 @@ def __init__(self, base_path: Path): def sentencepiece_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]: tokenizer = self.sentencepiece_tokenizer for i in range(tokenizer.vocab_size()): - piece = tokenizer.id_to_piece(i) + piece = tokenizer.IdToPiece(i) text = piece.encode("utf-8") - score: float = tokenizer.get_score(i) + score: float = tokenizer.GetScore(i) toktype = gguf.TokenType.NORMAL - if tokenizer.is_unknown(i): + if tokenizer.IsUnknown(i): toktype = gguf.TokenType.UNKNOWN - if tokenizer.is_control(i): + if tokenizer.IsControl(i): toktype = gguf.TokenType.CONTROL # NOTE: I think added_tokens are user defined. # ref: https://github.com/google/sentencepiece/blob/master/src/sentencepiece_model.proto # if tokenizer.is_user_defined(i): toktype = gguf.TokenType.USER_DEFINED - if tokenizer.is_unused(i): + if tokenizer.IsUnused(i): toktype = gguf.TokenType.UNUSED - if tokenizer.is_byte(i): + if tokenizer.IsByte(i): toktype = gguf.TokenType.BYTE yield text, score, toktype @@ -904,7 +908,7 @@ def load() -> UnquantizedTensor: def rebuild_from_type_v2(func, new_type, args, state): return func(*args) - CLASSES = { + CLASSES: dict[tuple[str, str], type[LazyTensor] | LazyStorageKind] = { # getattr used here as a workaround for mypy not being smart enough to determine # the staticmethods have a __func__ attribute. ('torch._tensor', '_rebuild_from_type_v2'): getattr(rebuild_from_type_v2, '__func__'), diff --git a/requirements/requirements-convert.txt b/requirements/requirements-convert.txt index a3d6ecec0ac04..66af98abec8d7 100644 --- a/requirements/requirements-convert.txt +++ b/requirements/requirements-convert.txt @@ -1,5 +1,5 @@ numpy~=1.24.4 -sentencepiece~=0.1.98 +sentencepiece~=0.2.0 transformers>=4.35.2,<5.0.0 gguf>=0.1.0 protobuf>=4.21.0,<5.0.0 From 698f0b34793acb1fb2d0e465701f1a92b8d80615 Mon Sep 17 00:00:00 2001 From: Francis Couture-Harpin Date: Tue, 30 Apr 2024 15:02:34 -0400 Subject: [PATCH 03/15] convert-hf : remove unused n_dims in extra_*_tensors --- convert-hf-to-gguf.py | 20 +++++++++----------- gguf-py/scripts/gguf-new-metadata.py | 2 +- 2 files changed, 10 insertions(+), 12 deletions(-) diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index 374b081ee485f..7d5c6601ba7e7 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -165,10 +165,10 @@ def set_gguf_parameters(self): def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: return [(self.map_tensor_name(name), data_torch)] - def extra_f32_tensors(self, n_dims: int, name: str, new_name: str, bid: int | None) -> bool: + def extra_f32_tensors(self, name: str, new_name: str, bid: int | None) -> bool: return False - def extra_f16_tensors(self, n_dims: int, name: str, new_name: str, bid: int | None) -> bool: + def extra_f16_tensors(self, name: str, new_name: str, bid: int | None) -> bool: return False def write_tensors(self): @@ -199,8 +199,8 @@ def write_tensors(self): data = data.astype(np.float32) # when both are true, the tensor keeps its original type - extra_f32 = self.extra_f32_tensors(n_dims, name, new_name, bid) - extra_f16 = self.extra_f16_tensors(n_dims, name, new_name, bid) + extra_f32 = self.extra_f32_tensors(name, new_name, bid) + extra_f16 = self.extra_f16_tensors(name, new_name, bid) # 1d tensors need to be converted to float32 if self.ftype == 1 and data_dtype == np.float16 and (n_dims == 1 or extra_f32) and not extra_f16: @@ -1038,8 +1038,8 @@ def set_vocab(self): # self.gguf_writer.add_bos_token_id(71013) # self.gguf_writer.add_eos_token_id(71013) - def extra_f32_tensors(self, n_dims: int, name: str, new_name: str) -> bool: - del n_dims, name, new_name # unused + def extra_f32_tensors(self, name: str, new_name: str, bid: int | None) -> bool: + del name, new_name, bid # unused # TODO: FP16 conversion produces garbage outputs. (Q8_0 does not, so..?) return True @@ -2152,8 +2152,8 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter return [(self.map_tensor_name(name), data_torch)] - def extra_f32_tensors(self, n_dims: int, name: str, new_name: str, bid: int | None) -> bool: - del n_dims, new_name, bid # unused + def extra_f32_tensors(self, name: str, new_name: str, bid: int | None) -> bool: + del new_name, bid # unused # not used with get_rows, must be F32 return name == "embeddings.token_type_embeddings.weight" @@ -2345,9 +2345,7 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter return [(new_name, data_torch)] - def extra_f32_tensors(self, n_dims: int, name: str, new_name: str, bid: int | None) -> bool: - del n_dims # unused - + def extra_f32_tensors(self, name: str, new_name: str, bid: int | None) -> bool: return new_name in (self.format_tensor_name(n, bid, ".weight" if name.endswith(".weight") else "") for n in [ gguf.MODEL_TENSOR.SSM_CONV1D, gguf.MODEL_TENSOR.SSM_X, diff --git a/gguf-py/scripts/gguf-new-metadata.py b/gguf-py/scripts/gguf-new-metadata.py index 8cb60ef659596..c8e3a83dfbd78 100644 --- a/gguf-py/scripts/gguf-new-metadata.py +++ b/gguf-py/scripts/gguf-new-metadata.py @@ -7,7 +7,7 @@ from pathlib import Path import numpy as np -from typing import Any, Mapping, Sequence +from typing import Any, Sequence # Necessary to load the local gguf package if "NO_LOCAL_GGUF" not in os.environ and (Path(__file__).parent.parent.parent / 'gguf-py').exists(): From cde9ea65e8bd2c9f1fd6d9e8d769df1c93cec577 Mon Sep 17 00:00:00 2001 From: Francis Couture-Harpin Date: Tue, 30 Apr 2024 18:12:01 -0400 Subject: [PATCH 04/15] convert-hf : simplify MoE weights stacking --- convert-hf-to-gguf.py | 641 ++++++++++++++---------------------------- 1 file changed, 215 insertions(+), 426 deletions(-) diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index 7d5c6601ba7e7..cec042baeda40 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -23,7 +23,7 @@ sys.path.insert(1, str(Path(__file__).parent / 'gguf-py')) import gguf -from convert import LlamaHfVocab, permute +from convert import LlamaHfVocab ###### MODEL DEFINITIONS ###### @@ -165,10 +165,10 @@ def set_gguf_parameters(self): def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: return [(self.map_tensor_name(name), data_torch)] - def extra_f32_tensors(self, name: str, new_name: str, bid: int | None) -> bool: + def extra_f32_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool: return False - def extra_f16_tensors(self, name: str, new_name: str, bid: int | None) -> bool: + def extra_f16_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool: return False def write_tensors(self): @@ -199,15 +199,16 @@ def write_tensors(self): data = data.astype(np.float32) # when both are true, the tensor keeps its original type - extra_f32 = self.extra_f32_tensors(name, new_name, bid) - extra_f16 = self.extra_f16_tensors(name, new_name, bid) + extra_f32 = self.extra_f32_tensors(name, new_name, bid, n_dims) + extra_f16 = self.extra_f16_tensors(name, new_name, bid, n_dims) # 1d tensors need to be converted to float32 + # Most of the codebase that takes in 1D tensors only handles F32 tensors if self.ftype == 1 and data_dtype == np.float16 and (n_dims == 1 or extra_f32) and not extra_f16: data = data.astype(np.float32) # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and (name.endswith(".weight") and n_dims == 2 or extra_f16) and not extra_f32: + if self.ftype == 1 and data_dtype == np.float32 and (name.endswith(".weight") and n_dims >= 2 or extra_f16) and not extra_f32: data = data.astype(np.float16) print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") @@ -1038,8 +1039,8 @@ def set_vocab(self): # self.gguf_writer.add_bos_token_id(71013) # self.gguf_writer.add_eos_token_id(71013) - def extra_f32_tensors(self, name: str, new_name: str, bid: int | None) -> bool: - del name, new_name, bid # unused + def extra_f32_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool: + del name, new_name, bid, n_dims # unused # TODO: FP16 conversion produces garbage outputs. (Q8_0 does not, so..?) return True @@ -1072,90 +1073,73 @@ def set_gguf_parameters(self): self.gguf_writer.add_parallel_residual(hparams["use_parallel_residual"] if "use_parallel_residual" in hparams else True) self.gguf_writer.add_layer_norm_eps(self.find_hparam(["layer_norm_eps", "norm_eps"])) + _q_norms: list[dict[str, Tensor]] | None = None + _k_norms: list[dict[str, Tensor]] | None = None + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - # FIXME - return super().modify_tensors(data_torch, name, bid) + n_head = self.hparams["num_attention_heads"] + n_kv_head = self.hparams["num_key_value_heads"] - def write_tensors(self): - block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer"))) - tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) - n_head = self.hparams.get("num_attention_heads") - n_kv_head = self.hparams.get("num_key_value_heads") - q_norms = dict() - k_norms = dict() - for name, data_torch in self.get_tensors(): - # we don't need these - if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")): - continue - old_dtype = data_torch.dtype + if name.find("q_layernorm.norms") != -1: + assert bid is not None - # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): - data_torch = data_torch.to(torch.float32) + if self._q_norms is None: + self._q_norms = [{} for _ in range(self.block_count)] - data = data_torch.squeeze().numpy() - n_dims = len(data.shape) - if name.find("q_layernorm.norms") != -1: - q_norms[name] = data - if len(q_norms) >= (block_count * n_head): - self._stack_qk_norm(block_count, name, tensor_map, n_head, q_norms, n_dims, layer_name="q_layernorm") - continue - if name.find("k_layernorm.norms") != -1: - k_norms[name] = data - if len(k_norms) >= (block_count * n_kv_head): - self._stack_qk_norm(block_count, name, tensor_map, n_kv_head, k_norms, n_dims, layer_name="k_layernorm") - continue + self._q_norms[bid][name] = data_torch - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) - if new_name is None: - print(f"Can not map tensor {name!r}") - sys.exit() + if len(self._q_norms[bid]) >= n_head: + return self._stack_qk_norm(bid, n_head, self._q_norms[bid], "q_layernorm") + else: + return [] + + if name.find("k_layernorm.norms") != -1: + assert bid is not None - n_dims = len(data.shape) - data_dtype = data.dtype + if self._k_norms is None: + self._k_norms = [{} for _ in range(self.block_count)] - # if f32 desired, convert any float16 to float32 - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) + self._k_norms[bid][name] = data_torch - # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 - if self.ftype == 1 and data_dtype == np.float16 and (n_dims == 1 or new_name.endswith("_norm.weight")): - data = data.astype(np.float32) + if len(self._k_norms[bid]) >= n_kv_head: + return self._stack_qk_norm(bid, n_kv_head, self._k_norms[bid], "k_layernorm") + else: + return [] - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and not new_name.endswith("_norm.weight") and n_dims == 2: - data = data.astype(np.float16) + return [(self.map_tensor_name(name), data_torch)] - print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") + def extra_f32_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool: + del name, bid, n_dims # unused - self.gguf_writer.add_tensor(new_name, data) + return new_name.endswith("_norm.weight") - def _stack_qk_norm(self, block_count, name, tensor_map, n_head, norms, n_dims, layer_name="q_layernorm"): - for bid in range(block_count): - datas = [] - for xid in range(n_head): - ename = f"model.layers.{bid}.self_attn.{layer_name}.norms.{xid}.weight" - datas.append(norms[ename]) - del norms[ename] - data = np.stack(datas, axis=0) - data_dtype = data.dtype - merged_name = f"model.layers.{bid}.self_attn.{layer_name}.weight" - new_name = tensor_map.get_name(merged_name, try_suffixes=(".weight", ".bias")) - if new_name is None: - print(f"Can not map tensor {name!r}") - sys.exit() - if self.ftype == 1 and data_dtype == np.float16 and (n_dims == 1 or new_name.endswith("_norm.weight")): - data = data.astype(np.float32) + def _stack_qk_norm(self, bid: int, n_head: int, norms: dict[str, Tensor], layer_name: str = "q_layernorm"): + datas: list[Tensor] = [] + # extract the norms in order + for xid in range(n_head): + ename = f"model.layers.{bid}.self_attn.{layer_name}.norms.{xid}.weight" + datas.append(norms[ename]) + del norms[ename] + data_torch = torch.cat(datas, dim=0) - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and not new_name.endswith("_norm.weight") and n_dims == 2: - data = data.astype(np.float16) + merged_name = f"model.layers.{bid}.self_attn.{layer_name}.weight" + new_name = self.map_tensor_name(merged_name) - print(f"{new_name}, n_dims = {len(data.shape)}, shape = {data.shape} --> {data.dtype}") + return [(new_name, data_torch)] - self.gguf_writer.add_tensor(new_name, data) + def write_tensors(self): + super().write_tensors() + + if self._q_norms is not None or self._k_norms is not None: + # flatten two `list[dict[str, Tensor]]` into a single `list[str]` + norms = ( + [k for d in self._q_norms for k in d.keys()] if self._q_norms is not None else [] + ) + ( + [k for d in self._k_norms for k in d.keys()] if self._k_norms is not None else [] + ) + if len(norms) > 0: + raise ValueError(f"Unprocessed norms: {norms}") @Model.register("LlamaForCausalLM", "MistralForCausalLM", "MixtralForCausalLM") @@ -1195,108 +1179,69 @@ def set_gguf_parameters(self): self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR) self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"]) - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - # FIXME - return super().modify_tensors(data_torch, name, bid) - - # Same as super class, but permuting q_proj, k_proj - def write_tensors(self): - block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer"))) - tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) - n_head = self.hparams.get("num_attention_heads") - n_kv_head = self.hparams.get("num_key_value_heads") - n_experts = self.hparams.get("num_local_experts") - experts = dict() - for name, data_torch in self.get_tensors(): - # we don't need these - if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")): - continue - - old_dtype = data_torch.dtype - - # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): - data_torch = data_torch.to(torch.float32) - - data = data_torch.numpy() - - if name.endswith("q_proj.weight"): - data = permute(data, n_head, n_head) - if name.endswith("k_proj.weight"): - data = permute(data, n_head, n_kv_head) - - data = data.squeeze() + @staticmethod + def permute(weights: Tensor, n_head: int, n_head_kv: int | None): + if n_head_kv is not None and n_head != n_head_kv: + n_head = n_head_kv + return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:]) + .swapaxes(1, 2) + .reshape(weights.shape)) - # process the experts separately - if name.find("block_sparse_moe.experts") != -1: - experts[name] = data - if len(experts) >= n_experts: - # merge the experts into a single 3d tensor - for bid in range(block_count): - for wid in range(1, 4): - full = True - for xid in range(n_experts): - ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.w{wid}.weight" - if ename not in experts: - full = False - break - if not full: - continue + _experts: list[dict[str, Tensor]] | None = None - datas = [] - for xid in range(n_experts): - ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.w{wid}.weight" - datas.append(experts[ename]) - del experts[ename] + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + n_head = self.hparams["num_attention_heads"] + n_kv_head = self.hparams.get("num_key_value_heads") - data = np.stack(datas, axis=0) - data_dtype = data.dtype + if name.endswith("q_proj.weight"): + data_torch = LlamaModel.permute(data_torch, n_head, n_head) + if name.endswith("k_proj.weight"): + data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head) - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) + # process the experts separately + if name.find("block_sparse_moe.experts") != -1: + n_experts = self.hparams["num_local_experts"] - if self.ftype == 1 and data_dtype == np.float32: - data = data.astype(np.float16) + assert bid is not None - merged_name = f"layers.{bid}.feed_forward.experts.w{wid}.weight" + if self._experts is None: + self._experts = [{} for _ in range(n_experts)] - new_name = tensor_map.get_name(merged_name, try_suffixes=(".weight", ".bias")) - if new_name is None: - print(f"Can not map tensor {name!r}") - sys.exit() + self._experts[bid][name] = data_torch - print(f"{new_name}, n_dims = {len(data.shape)}, shape = {data.shape} --> {data.dtype}") + if len(self._experts[bid]) >= n_experts * 3: + tensors: list[tuple[str, Tensor]] = [] - self.gguf_writer.add_tensor(new_name, data) - continue + # merge the experts into a single 3d tensor + for wid in ["w1", "w2", "w3"]: + datas: list[Tensor] = [] - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) - if new_name is None: - print(f"Can not map tensor {name!r}") - sys.exit() + for xid in range(n_experts): + ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.{wid}.weight" + datas.append(self._experts[bid][ename]) + del self._experts[bid][ename] - n_dims = len(data.shape) - data_dtype = data.dtype + data_torch = torch.cat(datas, dim=0) - # if f32 desired, convert any float16 to float32 - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) + merged_name = f"layers.{bid}.feed_forward.experts.{wid}.weight" - # 1d tensors need to be converted to float32 - if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: - data = data.astype(np.float32) + new_name = self.map_tensor_name(merged_name) - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: - data = data.astype(np.float16) + tensors.append((new_name, data_torch)) + return tensors + else: + return [] - print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") + return [(self.map_tensor_name(name), data_torch)] - self.gguf_writer.add_tensor(new_name, data) + def write_tensors(self): + super().write_tensors() - if len(experts) > 0: - raise ValueError(f"Unprocessed experts: {experts.keys()}") + if self._experts is not None: + # flatten `list[dict[str, Tensor]]` into `list[str]` + experts = [k for d in self._experts for k in d.keys()] + if len(experts) > 0: + raise ValueError(f"Unprocessed experts: {experts}") @Model.register("GrokForCausalLM") @@ -1313,95 +1258,44 @@ def set_gguf_parameters(self): super().set_gguf_parameters() self.gguf_writer.add_name("Grok") + _experts: list[dict[str, Tensor]] | None = None + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - # FIXME - return super().modify_tensors(data_torch, name, bid) + # process the experts separately + if name.find(".moe.") != -1: + n_experts = self.hparams["num_local_experts"] - def write_tensors(self): - block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer"))) - tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) - n_experts = self.hparams.get("num_local_experts") - experts = dict() - for name, data_torch in self.get_tensors(): - # we don't need these - if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")): - continue + assert bid is not None - old_dtype = data_torch.dtype + if self._experts is None: + self._experts = [{} for _ in range(n_experts)] - # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): - data_torch = data_torch.to(torch.float32) + self._experts[bid][name] = data_torch - data = data_torch.squeeze().numpy() - - # process the experts separately - if name.find(".moe.") != -1: - experts[name] = data - if len(experts) >= n_experts: - # merge the experts into a single 3d tensor - for bid in range(block_count): - for wid in ["linear", "linear_1", "linear_v"]: - full = True - for xid in range(n_experts): - ename = f"transformer.decoder_layer.{bid}.moe.{xid}.{wid}.weight" - if ename not in experts: - full = False - break - if not full: - continue - - datas = [] - for xid in range(n_experts): - ename = f"transformer.decoder_layer.{bid}.moe.{xid}.{wid}.weight" - datas.append(experts[ename]) - del experts[ename] - - data = np.stack(datas, axis=0) - data_dtype = data.dtype - - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) - - if self.ftype == 1 and data_dtype == np.float32: - data = data.astype(np.float16) - - merged_name = f"transformer.decoder_layer.{bid}.moe.{wid}.weight" - - new_name = tensor_map.get_name(merged_name, try_suffixes=(".weight", ".bias")) - if new_name is None: - print(f"Can not map tensor {name!r}") - sys.exit() - - print(f"{new_name}, n_dims = {len(data.shape)}, shape = {data.shape} --> {data.dtype}") - - self.gguf_writer.add_tensor(new_name, data) - continue + if len(self._experts[bid]) >= n_experts * 3: + tensors: list[tuple[str, Tensor]] = [] - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) - if new_name is None: - print(f"Can not map tensor {name!r}") - sys.exit() + # merge the experts into a single 3d tensor + for wid in ["linear", "linear_1", "linear_v"]: + datas: list[Tensor] = [] - n_dims = len(data.shape) - data_dtype = data.dtype + for xid in range(n_experts): + ename = f"transformer.decoder_layer.{bid}.moe.{xid}.{wid}.weight" + datas.append(self._experts[bid][ename]) + del self._experts[bid][ename] - # if f32 desired, convert any float16 to float32 - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) + data_torch = torch.cat(datas, dim=0) - # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 - if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: - data = data.astype(np.float32) + merged_name = f"transformer.decoder_layer.{bid}.moe.{wid}.weight" - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: - data = data.astype(np.float16) + new_name = self.map_tensor_name(merged_name) - print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") + tensors.append((new_name, data_torch)) + return tensors + else: + return [] - self.gguf_writer.add_tensor(new_name, data) + return [(self.map_tensor_name(name), data_torch)] @Model.register("DbrxForCausalLM") @@ -1435,73 +1329,44 @@ def set_gguf_parameters(self): print(f"gguf: file type = {self.ftype}") def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - # FIXME - return super().modify_tensors(data_torch, name, bid) - - def write_tensors(self): - block_count = self.hparams.get("n_layers") - tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) - for name, data_torch in self.get_tensors(): - n_expert = self.hparams["ffn_config"]["moe_num_experts"] - n_ff = self.hparams["ffn_config"]["ffn_hidden_size"] - n_embd = self.hparams["d_model"] - - # Specific behavior for experts tensors: suffix .weight, view as 3D and transpose - # original implementation expects (n_expert, n_ff, n_embd) for all experts weights - # But llama.cpp moe graph works differently - # AND the dimensions in ggml are typically in the reverse order of the pytorch dimensions - # so (n_expert, n_ff, n_embd) in pytorch is {n_embd, n_ff, n_expert} in ggml_tensor - exp_tensor_names = {"ffn.experts.mlp.w1": None, # LLM_TENSOR_FFN_GATE_EXPS ggml_tensor->ne{n_embd, n_ff, n_expert} - "ffn.experts.mlp.w2": (0, 2, 1), # LLM_TENSOR_FFN_DOWN_EXPS ggml_tensor->ne{n_ff, n_embd, n_expert} - "ffn.experts.mlp.v1": None} # LLM_TENSOR_FFN_UP_EXPS ggml_tensor->ne{n_embd, n_ff, n_expert} - experts = False - for exp_tensor_name in exp_tensor_names.keys(): - if name.find(exp_tensor_name) != -1 and name.find(".weight") == -1: - experts = True - data_torch = data_torch.view(n_expert, n_ff, n_embd) - if (permute_tensor := exp_tensor_names[exp_tensor_name]) is not None: - data_torch = data_torch.permute(*permute_tensor) - break - - old_dtype = data_torch.dtype - - # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): - data_torch = data_torch.to(torch.float32) - - data = data_torch.squeeze().numpy() - - # map tensor names - # In MoE models the ffn tensors are typically most of the model weights, - # and need to be quantizable. Quantize expects tensor names to be suffixed by .weight. - # Every other model has the weight names ending in .weight, - # let's assume that is the convention which is not the case for dbrx: - # https://huggingface.co/databricks/dbrx-instruct/blob/main/model.safetensors.index.json#L15 - new_name = tensor_map.get_name(name if not experts else name + ".weight", try_suffixes=(".weight",)) - if new_name is None: - print(f"Can not map tensor {name!r}") - sys.exit() - - n_dims = len(data.shape) - data_dtype = data.dtype + del bid # unused - # Most of the codebase that takes in 1D tensors only handles F32 tensors - # and most of the outputs tensors are F32. - if data_dtype != np.float32 and n_dims == 1: - print(f"Can not map tensor {name!r}: all 1D tensors must be F32") - sys.exit() + n_expert = self.hparams["ffn_config"]["moe_num_experts"] + n_ff = self.hparams["ffn_config"]["ffn_hidden_size"] + n_embd = self.hparams["d_model"] + + # Specific behavior for experts tensors: suffix .weight, view as 3D and transpose + # original implementation expects (n_expert, n_ff, n_embd) for all experts weights + # But llama.cpp moe graph works differently + # AND the dimensions in ggml are typically in the reverse order of the pytorch dimensions + # so (n_expert, n_ff, n_embd) in pytorch is {n_embd, n_ff, n_expert} in ggml_tensor + exp_tensor_names = {"ffn.experts.mlp.w1": None, # LLM_TENSOR_FFN_GATE_EXPS ggml_tensor->ne{n_embd, n_ff, n_expert} + "ffn.experts.mlp.w2": (0, 2, 1), # LLM_TENSOR_FFN_DOWN_EXPS ggml_tensor->ne{n_ff, n_embd, n_expert} + "ffn.experts.mlp.v1": None} # LLM_TENSOR_FFN_UP_EXPS ggml_tensor->ne{n_embd, n_ff, n_expert} + experts = False + + for exp_tensor_name in exp_tensor_names.keys(): + if name.find(exp_tensor_name) != -1 and name.find(".weight") == -1: + experts = True + data_torch = data_torch.view(n_expert, n_ff, n_embd) + if (permute_tensor := exp_tensor_names[exp_tensor_name]) is not None: + data_torch = data_torch.permute(*permute_tensor) + break - # if f32 desired, convert any float16 to float32 - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) + # map tensor names + # In MoE models the ffn tensors are typically most of the model weights, + # and need to be quantizable. Quantize expects tensor names to be suffixed by .weight. + # Every other model has the weight names ending in .weight, + # let's assume that is the convention which is not the case for dbrx: + # https://huggingface.co/databricks/dbrx-instruct/blob/main/model.safetensors.index.json#L15 + new_name = self.map_tensor_name(name if not experts else name + ".weight", try_suffixes=(".weight",)) - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and n_dims > 1: - data = data.astype(np.float16) + return [(new_name, data_torch)] - print(f"{new_name}, n_dims = {n_dims}, shape = {data.shape}, {old_dtype} --> {data.dtype}") + def extra_f16_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool: + del name, new_name, bid # unused - self.gguf_writer.add_tensor(new_name, data) + return n_dims > 1; @Model.register("MiniCPMForCausalLM") @@ -1611,98 +1476,57 @@ def set_gguf_parameters(self): if (n_experts := self.hparams.get("num_experts")) is not None: self.gguf_writer.add_expert_count(n_experts) + _experts: list[dict[str, Tensor]] | None = None + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - # FIXME - return super().modify_tensors(data_torch, name, bid) + # process the experts separately + if name.find("experts") != -1: + n_experts = self.hparams["num_experts"] + assert bid is not None - def write_tensors(self): - block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer"))) - tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) - n_experts = self.hparams.get("num_experts") - experts = dict() - for name, data_torch in self.get_tensors(): - # we don't need these - if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")): - continue + if self._experts is None: + self._experts = [{} for _ in range(n_experts)] - old_dtype = data_torch.dtype + self._experts[bid][name] = data_torch - # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): - data_torch = data_torch.to(torch.float32) + if len(self._experts[bid]) >= n_experts * 3: + tensors: list[tuple[str, Tensor]] = [] - data = data_torch.squeeze().numpy() - - # process the experts separately - if name.find("experts") != -1: - experts[name] = data - if len(experts) >= n_experts * 3: - # merge the experts into a single 3d tensor - for bid in range(block_count): - for w_name in ["down_proj", "gate_proj", "up_proj"]: - full = True - for xid in range(n_experts): - ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight" - if ename not in experts: - full = False - break - if not full: - continue - - datas = [] - for xid in range(n_experts): - ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight" - datas.append(experts[ename]) - del experts[ename] - - data = np.stack(datas, axis=0) - data_dtype = data.dtype - - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) - - if self.ftype == 1 and data_dtype == np.float32: - data = data.astype(np.float16) - - merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight" - - new_name = tensor_map.get_name(merged_name, try_suffixes=(".weight", ".bias")) - if new_name is None: - print(f"Can not map tensor {name!r}") - sys.exit() - - print(f"{new_name}, n_dims = {len(data.shape)}, shape = {data.shape} --> {data.dtype}") - - self.gguf_writer.add_tensor(new_name, data) - continue + # merge the experts into a single 3d tensor + for w_name in ["down_proj", "gate_proj", "up_proj"]: + datas: list[Tensor] = [] - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) - if new_name is None: - print(f"Can not map tensor {name!r}") - sys.exit() + for xid in range(n_experts): + ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight" + datas.append(self._experts[bid][ename]) + del self._experts[bid][ename] - n_dims = len(data.shape) - data_dtype = data.dtype + data_torch = torch.cat(datas, dim=0) - # if f32 desired, convert any float16 to float32 - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) + merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight" + + new_name = self.map_tensor_name(merged_name) + + tensors.append((new_name, data_torch)) + return tensors + else: + return [] - # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 - if self.ftype == 1 and data_dtype == np.float16 and (n_dims == 1 or new_name.endswith("_norm.weight")): - data = data.astype(np.float32) + return [(self.map_tensor_name(name), data_torch)] - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: - data = data.astype(np.float16) + def extra_f32_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool: + del name, bid, n_dims # unused - print(f"{new_name}, n_dims = {n_dims}, shape = {data.shape}, {old_dtype} --> {data.dtype}") + return new_name.endswith("_norm.weight") - self.gguf_writer.add_tensor(new_name, data) + def write_tensors(self): + super().write_tensors() - if len(experts) > 0: - raise ValueError(f"Unprocessed experts: {experts.keys()}") + if self._experts is not None: + # flatten `list[dict[str, Tensor]]` into `list[str]` + experts = [k for d in self._experts for k in d.keys()] + if len(experts) > 0: + raise ValueError(f"Unprocessed experts: {experts}") @Model.register("GPT2LMHeadModel") @@ -2152,8 +1976,8 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter return [(self.map_tensor_name(name), data_torch)] - def extra_f32_tensors(self, name: str, new_name: str, bid: int | None) -> bool: - del new_name, bid # unused + def extra_f32_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool: + del new_name, bid, n_dims # unused # not used with get_rows, must be F32 return name == "embeddings.token_type_embeddings.weight" @@ -2345,7 +2169,9 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter return [(new_name, data_torch)] - def extra_f32_tensors(self, name: str, new_name: str, bid: int | None) -> bool: + def extra_f32_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool: + del n_dims # unused + return new_name in (self.format_tensor_name(n, bid, ".weight" if name.endswith(".weight") else "") for n in [ gguf.MODEL_TENSOR.SSM_CONV1D, gguf.MODEL_TENSOR.SSM_X, @@ -2386,54 +2212,17 @@ def set_gguf_parameters(self): # Same as super class, but permuting q_proj, k_proj # Copied from: LlamaModel def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - # FIXME - return super().modify_tensors(data_torch, name, bid) + del bid # unused - def write_tensors(self): - block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer"))) - tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) - n_head = self.hparams.get("num_attention_heads") + n_head = self.hparams["num_attention_heads"] n_kv_head = self.hparams.get("num_key_value_heads") - for name, data_torch in self.get_tensors(): - old_dtype = data_torch.dtype - - # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): - data_torch = data_torch.to(torch.float32) - - data = data_torch.numpy() - - if name.endswith("q_proj.weight"): - data = permute(data, n_head, n_head) - if name.endswith("k_proj.weight"): - data = permute(data, n_head, n_kv_head) - - data = data.squeeze() - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) - if new_name is None: - print(f"Can not map tensor {name!r}") - sys.exit() - - n_dims = len(data.shape) - data_dtype = data.dtype - - # if f32 desired, convert any float16 to float32 - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) - - # 1d tensors need to be converted to float32 - if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: - data = data.astype(np.float32) - - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and n_dims == 2: - data = data.astype(np.float16) - - print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") + if name.endswith("q_proj.weight"): + data_torch = LlamaModel.permute(data_torch, n_head, n_head) + if name.endswith("k_proj.weight"): + data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head) - self.gguf_writer.add_tensor(new_name, data) + return [(self.map_tensor_name(name), data_torch)] ###### CONVERSION LOGIC ###### From 56f60f5d698eabac7451989b13d17cd0eab93652 Mon Sep 17 00:00:00 2001 From: Francis Couture-Harpin Date: Wed, 1 May 2024 11:36:23 -0400 Subject: [PATCH 05/15] convert-hf : flake8 linter doesn't like semicolons --- convert-hf-to-gguf.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index cec042baeda40..4b3ee1e76125e 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -1080,7 +1080,6 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter n_head = self.hparams["num_attention_heads"] n_kv_head = self.hparams["num_key_value_heads"] - if name.find("q_layernorm.norms") != -1: assert bid is not None @@ -1366,7 +1365,7 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter def extra_f16_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool: del name, new_name, bid # unused - return n_dims > 1; + return n_dims > 1 @Model.register("MiniCPMForCausalLM") From 3870164f477faf19fa2c9ebe259b1c9dc3b3b871 Mon Sep 17 00:00:00 2001 From: Francis Couture-Harpin Date: Wed, 1 May 2024 12:30:20 -0400 Subject: [PATCH 06/15] convert-hf : allow unusual model part names For example, loading `model-00001-of-00001.safetensors` now works. * convert-hf : fix stacking MoE expert tensors `torch.stack` and `torch.cat` don't do the same thing. * convert-hf : fix Mamba conversion Tested to work even with a SentencePiece-based tokenizer. --- convert-hf-to-gguf.py | 71 ++++++++++++++++++------------------------- 1 file changed, 30 insertions(+), 41 deletions(-) diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index 4b3ee1e76125e..d734d7b6e3ae5 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -49,9 +49,8 @@ class Model(Protocol): is_big_endian: bool endianess: gguf.GGUFEndian use_temp_file: bool + part_names: list[str] is_safetensors: bool - num_parts: int - part_names: Iterable[str] hparams: dict[str, Any] gguf_writer: gguf.GGUFWriter block_count: int @@ -67,9 +66,10 @@ def __init__(self, dir_model: Path, ftype: int, fname_out: Path, is_big_endian: self.is_big_endian = is_big_endian self.endianess = gguf.GGUFEndian.BIG if is_big_endian else gguf.GGUFEndian.LITTLE self.use_temp_file = use_temp_file - self.is_safetensors = self._is_model_safetensors() - self.num_parts = Model.count_model_parts(self.dir_model, ".safetensors" if self.is_safetensors else ".bin") - self.part_names = self._get_part_names() + self.part_names = Model.get_model_part_names(self.dir_model, ".safetensors") + self.is_safetensors = len(self.part_names) > 0 + if not self.is_safetensors: + self.part_names = Model.get_model_part_names(self.dir_model, ".bin") self.hparams = Model.load_hparams(self.dir_model) self.gguf_writer = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[self.model_arch], endianess=self.endianess, use_temp_file=self.use_temp_file) self.block_count = self.find_hparam(["n_layers", "num_hidden_layers", "n_layer"]) @@ -109,7 +109,7 @@ def format_tensor_name(self, key: gguf.MODEL_TENSOR, bid: int | None = None, suf sys.exit() if "{bid}" in name: assert bid is not None - name = name.format(bid) + name = name.format(bid=bid) return name + suffix def map_tensor_name(self, name: str, try_suffixes: Sequence[str] = (".weight", ".bias")) -> str: @@ -228,13 +228,13 @@ def write_vocab(self): self.gguf_writer.close() @staticmethod - def count_model_parts(dir_model: Path, prefix: str) -> int: - num_parts = 0 + def get_model_part_names(dir_model: Path, suffix: str) -> list[str]: + part_names: list[str] = [] for filename in os.listdir(dir_model): - if filename.endswith(prefix): - num_parts += 1 + if filename.endswith(suffix): + part_names.append(filename) - return num_parts + return part_names @staticmethod def load_hparams(dir_model): @@ -258,19 +258,6 @@ def from_model_architecture(cls, arch): except KeyError: raise NotImplementedError(f'Architecture {arch!r} not supported!') from None - def _is_model_safetensors(self) -> bool: - return Model.count_model_parts(self.dir_model, ".safetensors") > 0 - - def _get_part_names(self) -> Iterable[str]: - if self.is_safetensors: - if self.num_parts == 1: # there's only one .safetensors file - return ("model.safetensors",) - return (f"model-{n:05}-of-{self.num_parts:05}.safetensors" for n in range(1, self.num_parts + 1)) - - if self.num_parts == 1: # there's only one .bin file - return ("pytorch_model.bin",) - return (f"pytorch_model-{n:05}-of-{self.num_parts:05}.bin" for n in range(1, self.num_parts + 1)) - # used for GPT-2 BPE and WordPiece vocabs def get_vocab_base(self) -> tuple[list[str], list[int], str]: tokens: list[str] = [] @@ -446,7 +433,7 @@ def _set_vocab_sentencepiece(self): raise FileNotFoundError(f"File not found: {tokenizer_path}") tokenizer = SentencePieceProcessor() - tokenizer.LoadFromFile(tokenizer_path) + tokenizer.LoadFromFile(str(tokenizer_path)) vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size()) @@ -1120,7 +1107,7 @@ def _stack_qk_norm(self, bid: int, n_head: int, norms: dict[str, Tensor], layer_ ename = f"model.layers.{bid}.self_attn.{layer_name}.norms.{xid}.weight" datas.append(norms[ename]) del norms[ename] - data_torch = torch.cat(datas, dim=0) + data_torch = torch.stack(datas, dim=0) merged_name = f"model.layers.{bid}.self_attn.{layer_name}.weight" new_name = self.map_tensor_name(merged_name) @@ -1204,7 +1191,7 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter assert bid is not None if self._experts is None: - self._experts = [{} for _ in range(n_experts)] + self._experts = [{} for _ in range(self.block_count)] self._experts[bid][name] = data_torch @@ -1220,7 +1207,7 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter datas.append(self._experts[bid][ename]) del self._experts[bid][ename] - data_torch = torch.cat(datas, dim=0) + data_torch = torch.stack(datas, dim=0) merged_name = f"layers.{bid}.feed_forward.experts.{wid}.weight" @@ -1267,7 +1254,7 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter assert bid is not None if self._experts is None: - self._experts = [{} for _ in range(n_experts)] + self._experts = [{} for _ in range(self.block_count)] self._experts[bid][name] = data_torch @@ -1283,7 +1270,7 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter datas.append(self._experts[bid][ename]) del self._experts[bid][ename] - data_torch = torch.cat(datas, dim=0) + data_torch = torch.stack(datas, dim=0) merged_name = f"transformer.decoder_layer.{bid}.moe.{wid}.weight" @@ -1484,7 +1471,7 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter assert bid is not None if self._experts is None: - self._experts = [{} for _ in range(n_experts)] + self._experts = [{} for _ in range(self.block_count)] self._experts[bid][name] = data_torch @@ -1500,7 +1487,7 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter datas.append(self._experts[bid][ename]) del self._experts[bid][ename] - data_torch = torch.cat(datas, dim=0) + data_torch = torch.stack(datas, dim=0) merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight" @@ -1604,7 +1591,7 @@ def set_vocab(self): sys.exit(1) tokenizer = SentencePieceProcessor() - tokenizer.LoadFromFile(tokenizer_path) + tokenizer.LoadFromFile(str(tokenizer_path)) vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size()) @@ -1786,7 +1773,7 @@ def set_vocab(self): add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix tokenizer = SentencePieceProcessor() - tokenizer.LoadFromFile(tokenizer_path) + tokenizer.LoadFromFile(str(tokenizer_path)) tokenizer.serialized_model_proto vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size()) @@ -2171,13 +2158,15 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter def extra_f32_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool: del n_dims # unused - return new_name in (self.format_tensor_name(n, bid, ".weight" if name.endswith(".weight") else "") for n in [ - gguf.MODEL_TENSOR.SSM_CONV1D, - gguf.MODEL_TENSOR.SSM_X, - gguf.MODEL_TENSOR.SSM_DT, - gguf.MODEL_TENSOR.SSM_A, - gguf.MODEL_TENSOR.SSM_D, - ]) + return bid is not None and new_name in ( + self.format_tensor_name(n, bid, ".weight" if name.endswith(".weight") else "") for n in [ + gguf.MODEL_TENSOR.SSM_CONV1D, + gguf.MODEL_TENSOR.SSM_X, + gguf.MODEL_TENSOR.SSM_DT, + gguf.MODEL_TENSOR.SSM_A, + gguf.MODEL_TENSOR.SSM_D, + ] + ) @Model.register("CohereForCausalLM") From dcd8dfa1b5a242e8b48c1b0eaf6e765abf316158 Mon Sep 17 00:00:00 2001 From: Francis Couture-Harpin Date: Wed, 1 May 2024 13:07:10 -0400 Subject: [PATCH 07/15] convert : use a string for the SentencePiece tokenizer path --- convert.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/convert.py b/convert.py index fd5487dbb3755..ce1a4b9f5f487 100755 --- a/convert.py +++ b/convert.py @@ -463,7 +463,7 @@ def __init__(self, base_path: Path): raise FileNotFoundError('Cannot find tokenizer.model') self.sentencepiece_tokenizer = SentencePieceProcessor() - self.sentencepiece_tokenizer.LoadFromFile(fname_tokenizer) + self.sentencepiece_tokenizer.LoadFromFile(str(fname_tokenizer)) vocab_size = self.sentencepiece_tokenizer.vocab_size() new_tokens = {id: piece for piece, id in added_tokens.items() if id >= vocab_size} From 21068b6bdfbbfade930a00f2d553d51258dc0a14 Mon Sep 17 00:00:00 2001 From: Francis Couture-Harpin Date: Wed, 1 May 2024 16:59:21 -0400 Subject: [PATCH 08/15] convert-hf : display tensor shape --- convert-hf-to-gguf.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index d734d7b6e3ae5..1dec1e5831e52 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -191,6 +191,7 @@ def write_tensors(self): break for new_name, data in ((n, d.squeeze().numpy()) for n, d in self.modify_tensors(data_torch, name, bid)): + data: np.ndarray = data # type hint n_dims = len(data.shape) data_dtype = data.dtype @@ -211,7 +212,11 @@ def write_tensors(self): if self.ftype == 1 and data_dtype == np.float32 and (name.endswith(".weight") and n_dims >= 2 or extra_f16) and not extra_f32: data = data.astype(np.float16) - print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") + # reverse shape to make it similar to the internal ggml dimension order + shape_str = f"{{{', '.join(str(n) for n in reversed(data.shape))}}}" + + # n_dims is implicit in the shape + print(f"{new_name}, shape = {shape_str}, {old_dtype} --> {data.dtype}") self.gguf_writer.add_tensor(new_name, data) @@ -1774,7 +1779,6 @@ def set_vocab(self): tokenizer = SentencePieceProcessor() tokenizer.LoadFromFile(str(tokenizer_path)) - tokenizer.serialized_model_proto vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size()) From 639b374b1a86d3d1e7d374586ed312d3887ff6a9 Mon Sep 17 00:00:00 2001 From: Francis Couture-Harpin Date: Wed, 1 May 2024 19:02:34 -0400 Subject: [PATCH 09/15] convert-hf : convert norms to f32 by default --- convert-hf-to-gguf.py | 26 ++++++++++---------------- 1 file changed, 10 insertions(+), 16 deletions(-) diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index 1dec1e5831e52..a846b63fed6a3 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -199,17 +199,21 @@ def write_tensors(self): if self.ftype == 0 and data_dtype == np.float16: data = data.astype(np.float32) - # when both are true, the tensor keeps its original type + # when both are True, f32 should win extra_f32 = self.extra_f32_tensors(name, new_name, bid, n_dims) extra_f16 = self.extra_f16_tensors(name, new_name, bid, n_dims) - # 1d tensors need to be converted to float32 - # Most of the codebase that takes in 1D tensors only handles F32 tensors - if self.ftype == 1 and data_dtype == np.float16 and (n_dims == 1 or extra_f32) and not extra_f16: - data = data.astype(np.float32) + # Most of the codebase that takes in 1D tensors or norms only handles F32 tensors + extra_f32 = extra_f32 or n_dims == 1 or new_name.endswith("_norm.weight") # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and (name.endswith(".weight") and n_dims >= 2 or extra_f16) and not extra_f32: + extra_f16 = extra_f16 or (name.endswith(".weight") and n_dims >= 2) + + # when both extra_f32 and extra_f16 are False, convert to float32 by default + if self.ftype == 1 and data_dtype == np.float16 and (extra_f32 or not extra_f16): + data = data.astype(np.float32) + + if self.ftype == 1 and data_dtype == np.float32 and extra_f16 and not extra_f32: data = data.astype(np.float16) # reverse shape to make it similar to the internal ggml dimension order @@ -1100,11 +1104,6 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter return [(self.map_tensor_name(name), data_torch)] - def extra_f32_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool: - del name, bid, n_dims # unused - - return new_name.endswith("_norm.weight") - def _stack_qk_norm(self, bid: int, n_head: int, norms: dict[str, Tensor], layer_name: str = "q_layernorm"): datas: list[Tensor] = [] # extract the norms in order @@ -1505,11 +1504,6 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter return [(self.map_tensor_name(name), data_torch)] - def extra_f32_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool: - del name, bid, n_dims # unused - - return new_name.endswith("_norm.weight") - def write_tensors(self): super().write_tensors() From 644c2696d0c75bb35793307f0ff55f9c95f5b977 Mon Sep 17 00:00:00 2001 From: Francis Couture-Harpin Date: Wed, 1 May 2024 19:16:59 -0400 Subject: [PATCH 10/15] convert-hf : sort model part names `os.listdir` is said to list files in arbitrary order. Sorting the file names should let "model-00009-of-00042.safetensors" be loaded before "model-00010-of-00042.safetensors". --- convert-hf-to-gguf.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index a846b63fed6a3..91f9d127b02e5 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -243,6 +243,8 @@ def get_model_part_names(dir_model: Path, suffix: str) -> list[str]: if filename.endswith(suffix): part_names.append(filename) + part_names.sort() + return part_names @staticmethod From ce067af1184ae4ca7fa326f136dc6364e98251be Mon Sep 17 00:00:00 2001 From: Francis Couture-Harpin Date: Thu, 2 May 2024 15:00:36 -0400 Subject: [PATCH 11/15] convert-hf : use an ABC for Model again It seems Protocol can't be used as a statically type-checked ABC, because its subclasses also can't be instantiated. (why did it seem to work?) At least there's still a way to throw an error when forgetting to define the `model_arch` property of any registered Model subclasses. --- convert-hf-to-gguf.py | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index 91f9d127b02e5..a40924408f011 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -8,10 +8,11 @@ import os import re import sys +from abc import ABC from enum import IntEnum from pathlib import Path from hashlib import sha256 -from typing import TYPE_CHECKING, Any, Callable, ContextManager, Iterable, Iterator, Protocol, Sequence, TypeVar, cast +from typing import TYPE_CHECKING, Any, Callable, ContextManager, Iterable, Iterator, Sequence, TypeVar, cast import numpy as np import torch @@ -40,7 +41,7 @@ class SentencePieceTokenTypes(IntEnum): AnyModel = TypeVar("AnyModel", bound="type[Model]") -class Model(Protocol): +class Model(ABC): _model_classes: dict[str, type[Model]] = {} dir_model: Path @@ -57,6 +58,7 @@ class Model(Protocol): tensor_map: gguf.TensorNameMap tensors: dict[str, Tensor] + # subclasses should define this! model_arch: gguf.MODEL_ARCH def __init__(self, dir_model: Path, ftype: int, fname_out: Path, is_big_endian: bool, use_temp_file: bool): @@ -163,12 +165,18 @@ def set_gguf_parameters(self): print(f"gguf: file type = {self.ftype}") def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + del bid # unused + return [(self.map_tensor_name(name), data_torch)] def extra_f32_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool: + del name, new_name, bid, n_dims # unused + return False def extra_f16_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool: + del name, new_name, bid, n_dims # unused + return False def write_tensors(self): @@ -248,7 +256,7 @@ def get_model_part_names(dir_model: Path, suffix: str) -> list[str]: return part_names @staticmethod - def load_hparams(dir_model): + def load_hparams(dir_model: Path): with open(dir_model / "config.json", "r", encoding="utf-8") as f: return json.load(f) @@ -258,12 +266,14 @@ def register(cls, *names: str) -> Callable[[AnyModel], AnyModel]: def func(modelcls: AnyModel) -> AnyModel: for name in names: + if "model_arch" not in modelcls.__dict__: + raise TypeError(f"Missing property 'model_arch' for {modelcls.__name__!r}") cls._model_classes[name] = modelcls return modelcls return func @classmethod - def from_model_architecture(cls, arch): + def from_model_architecture(cls, arch: str) -> type[Model]: try: return cls._model_classes[arch] except KeyError: From 13f4cf70dbafe0d652ed9f1a3961fb40428d56bc Mon Sep 17 00:00:00 2001 From: Francis Couture-Harpin Date: Thu, 2 May 2024 15:50:21 -0400 Subject: [PATCH 12/15] convert-hf : use a plain class for Model, and forbid direct instantiation There are no abstract methods used anyway, so using ABC isn't really necessary. --- convert-hf-to-gguf.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index a40924408f011..3191afcfd6f1f 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -8,7 +8,6 @@ import os import re import sys -from abc import ABC from enum import IntEnum from pathlib import Path from hashlib import sha256 @@ -41,7 +40,7 @@ class SentencePieceTokenTypes(IntEnum): AnyModel = TypeVar("AnyModel", bound="type[Model]") -class Model(ABC): +class Model: _model_classes: dict[str, type[Model]] = {} dir_model: Path @@ -62,6 +61,8 @@ class Model(ABC): model_arch: gguf.MODEL_ARCH def __init__(self, dir_model: Path, ftype: int, fname_out: Path, is_big_endian: bool, use_temp_file: bool): + if self.__class__ == Model: + raise TypeError(f"{self.__class__.__name__!r} should not be directly instantiated") self.dir_model = dir_model self.ftype = ftype self.fname_out = fname_out @@ -78,6 +79,13 @@ def __init__(self, dir_model: Path, ftype: int, fname_out: Path, is_big_endian: self.tensor_map = gguf.get_tensor_name_map(self.model_arch, self.block_count) self.tensors = dict(self.get_tensors()) + @classmethod + def __init_subclass__(cls): + # can't use an abstract property, because overriding it without type errors + # would require using decorated functions instead of simply defining the property + if "model_arch" not in cls.__dict__: + raise TypeError(f"Missing property 'model_arch' for {cls.__name__!r}") + def find_hparam(self, keys: Iterable[str], optional: bool = False) -> Any: key = next((k for k in keys if k in self.hparams), None) if key is not None: @@ -266,8 +274,6 @@ def register(cls, *names: str) -> Callable[[AnyModel], AnyModel]: def func(modelcls: AnyModel) -> AnyModel: for name in names: - if "model_arch" not in modelcls.__dict__: - raise TypeError(f"Missing property 'model_arch' for {modelcls.__name__!r}") cls._model_classes[name] = modelcls return modelcls return func From 98f2d0e0d73c9118dda9a4bb8a44fa554a3acc73 Mon Sep 17 00:00:00 2001 From: Francis Couture-Harpin Date: Fri, 3 May 2024 22:04:31 -0400 Subject: [PATCH 13/15] convert-hf : more consistent formatting of cmdline args --- convert-hf-to-gguf.py | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index 1a12f9c400b18..7f1fceab41742 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -2236,7 +2236,8 @@ def parse_args() -> argparse.Namespace: ) parser.add_argument( "--awq-path", type=Path, default=None, - help="Path to scale awq cache file") + help="Path to scale awq cache file", + ) parser.add_argument( "--outfile", type=Path, help="path to write to; default: based on input", @@ -2245,14 +2246,26 @@ def parse_args() -> argparse.Namespace: "--outtype", type=str, choices=["f32", "f16"], default="f16", help="output format - use f32 for float32, f16 for float16", ) - parser.add_argument("--bigendian", action="store_true", help="model is executed on big endian machine") + parser.add_argument( + "--bigendian", action="store_true", + help="model is executed on big endian machine", + ) parser.add_argument( "model", type=Path, help="directory containing model file", ) - parser.add_argument("--use-temp-file", action="store_true", help="use the tempfile library while processing (helpful when running out of memory, process killed)") - parser.add_argument("--model-name", type=str, default=None, help="name of the model") - parser.add_argument("--verbose", action="store_true", help="increase output verbosity") + parser.add_argument( + "--use-temp-file", action="store_true", + help="use the tempfile library while processing (helpful when running out of memory, process killed)", + ) + parser.add_argument( + "--model-name", type=str, default=None, + help="name of the model", + ) + parser.add_argument( + "--verbose", action="store_true", + help="increase output verbosity", + ) return parser.parse_args() From f2099c50ab1cd21ed746264daad9538f754a31db Mon Sep 17 00:00:00 2001 From: Francis Couture-Harpin Date: Sat, 4 May 2024 09:09:47 -0400 Subject: [PATCH 14/15] convert-hf : align the message logged for converted tensors --- convert-hf-to-gguf.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index 7f1fceab41742..8750c16715d37 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -189,6 +189,8 @@ def extra_f16_tensors(self, name: str, new_name: str, bid: int | None, n_dims: i return False def write_tensors(self): + max_name_len = max(len(s) for _, s in self.tensor_map.mapping.values()) + len(".weight,") + for name, data_torch in self.tensors.items(): # we don't need these if name.endswith((".attention.masked_bias", ".attention.bias", ".rotary_emb.inv_freq")): @@ -237,7 +239,7 @@ def write_tensors(self): shape_str = f"{{{', '.join(str(n) for n in reversed(data.shape))}}}" # n_dims is implicit in the shape - logger.info(f"{new_name}, shape = {shape_str}, {old_dtype} --> {data.dtype}") + logger.info(f"{f'%-{max_name_len}s' % f'{new_name},'} {old_dtype} --> {data.dtype}, shape = {shape_str}") self.gguf_writer.add_tensor(new_name, data) From 215a0d38c88752d07973cdee9fece4d4de4ca168 Mon Sep 17 00:00:00 2001 From: Francis Couture-Harpin Date: Sat, 4 May 2024 23:55:42 -0400 Subject: [PATCH 15/15] convert-hf : fix Refact conversion --- convert-hf-to-gguf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index 8750c16715d37..bbefd46f63989 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -999,7 +999,7 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter ff_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of) n_head = self.hparams["n_head"] n_head_kv = 1 - head_dim = hidden_dim // n_head + head_dim = self.hparams["n_embd"] // n_head tensors: list[tuple[str, Tensor]] = []