From aa98ef96fea6d5b9dcc80da1e4e207de8996513d Mon Sep 17 00:00:00 2001 From: Ruonan Wang Date: Fri, 2 Aug 2024 10:55:16 +0300 Subject: [PATCH] change mixed_precision to q6_k (#11706) --- python/llm/src/ipex_llm/transformers/convert.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/llm/src/ipex_llm/transformers/convert.py b/python/llm/src/ipex_llm/transformers/convert.py index 05d16926ab8..9689615a31a 100644 --- a/python/llm/src/ipex_llm/transformers/convert.py +++ b/python/llm/src/ipex_llm/transformers/convert.py @@ -394,7 +394,7 @@ def _replace_with_low_bit_linear(model, qtype, modules_to_not_convert=None, if mixed_precision and is_lm_head(name, model_config, out_features): if cur_qtype in [ggml_tensor_qtype["sym_int4"], ggml_tensor_qtype["asym_int4"]]: - cur_qtype = ggml_tensor_qtype["sym_int8"] + cur_qtype = ggml_tensor_qtype["q6_k"] # check hidden size whether is a multiple of 256 cur_qtype = check_hidden_size(cur_qtype, in_features)