From 9ecf572f8923caeab9e4372e2952fd5e28647e42 Mon Sep 17 00:00:00 2001 From: "Chen, Zhentao" Date: Thu, 19 Oct 2023 16:08:47 +0800 Subject: [PATCH] correct Readme GPU example and API docstring (#9225) * update readme to correct GPU usage * update from_pretrained supported low bit options * fix stype check --- python/llm/README.md | 1 + python/llm/src/bigdl/llm/transformers/model.py | 9 +++++---- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/python/llm/README.md b/python/llm/README.md index c7364086382..92ec5c11089 100644 --- a/python/llm/README.md +++ b/python/llm/README.md @@ -127,6 +127,7 @@ You may apply INT4 optimizations to any Hugging Face *Transformers* model on Int ```python #load Hugging Face Transformers model with INT4 optimizations from bigdl.llm.transformers import AutoModelForCausalLM +import intel_extension_for_pytorch model = AutoModelForCausalLM.from_pretrained('/path/to/model/', load_in_4bit=True) #run the optimized model on Intel GPU diff --git a/python/llm/src/bigdl/llm/transformers/model.py b/python/llm/src/bigdl/llm/transformers/model.py index 76dea7fc8d9..3f34c45fb5b 100644 --- a/python/llm/src/bigdl/llm/transformers/model.py +++ b/python/llm/src/bigdl/llm/transformers/model.py @@ -60,9 +60,9 @@ def from_pretrained(cls, :param load_in_4bit: boolean value, True means load linear's weight to symmetric int 4. Default to be False. :param load_in_low_bit: str value, options are sym_int4, asym_int4, sym_int5, asym_int5 - , sym_int8 or fp16. sym_int4 means symmetric int 4, asym_int4 means - asymmetric int 4, etc. Relevant low bit optimizations will - be applied to the model. + , sym_int8, nf3, nf4 or fp16. sym_int4 means symmetric int 4, + asym_int4 means asymmetric int 4, nf4 means 4-bit NormalFloat, etc. + Relevant low bit optimizations will be applied to the model. :param optimize_model: boolean value, Whether to further optimize the low_bit llm model. Default to be True. :param modules_to_not_convert: list of str value, modules (nn.Module) that are skipped when @@ -106,7 +106,8 @@ def load_convert(cls, q_k, optimize_model, *args, **kwargs): from .convert import ggml_convert_low_bit invalidInputError(q_k in ggml_tensor_qtype, f"Unknown load_in_low_bit value: {q_k}, expected:" - f" sym_int4, asym_int4, sym_int5, asym_int5, sym_int8 or fp16.") + f" sym_int4, asym_int4, sym_int5, asym_int5, sym_int8, nf3, nf4 " + "or fp16.") qtype = ggml_tensor_qtype[q_k] # In case it needs a second try,