Skip to content

Commit

Permalink
Fix gguf_q4k (#11293)
Browse files Browse the repository at this point in the history
* udpate embedding parameter

* update benchmark
  • Loading branch information
rnwang04 authored Jun 12, 2024
1 parent 8edcdeb commit 14b1e6b
Show file tree
Hide file tree
Showing 2 changed files with 32 additions and 20 deletions.
48 changes: 29 additions & 19 deletions python/llm/dev/benchmark/all-in-one/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -449,40 +449,50 @@ def run_transformer_int4_gpu(repo_id,
model_path = get_model_path(repo_id, local_model_hub)
# Load model in 4 bit,
# which convert the relevant layers in the model into INT4 format
if fp16:
torch_dtype = torch.float16
else:
torch_dtype = 'auto'
st = time.perf_counter()
origin_repo_id = repo_id.replace("-4bit", "")
if origin_repo_id in CHATGLM_IDS:
if "4bit" in repo_id:
model = AutoModel.load_low_bit(model_path, optimize_model=True,
trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding).eval()
trust_remote_code=True, use_cache=True,
cpu_embedding=cpu_embedding,
torch_dtype=torch_dtype).eval()
else:
model = AutoModel.from_pretrained(model_path, load_in_low_bit=low_bit, optimize_model=True,
trust_remote_code=True, use_cache=True).eval()
trust_remote_code=True, use_cache=True,
torch_dtype=torch_dtype).eval()
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True, cpu_embedding=cpu_embedding)
elif origin_repo_id in LLAMA_IDS:
model = AutoModelForCausalLM.from_pretrained(model_path, load_in_low_bit=low_bit, trust_remote_code=True,
use_cache=True, cpu_embedding=cpu_embedding).eval()
use_cache=True, cpu_embedding=cpu_embedding,
torch_dtype=torch_dtype).eval()
tokenizer = LlamaTokenizer.from_pretrained(model_path, trust_remote_code=True)
else:
if "4bit" in repo_id:
model = AutoModelForCausalLM.load_low_bit(model_path, optimize_model=True,
trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding).eval()
trust_remote_code=True, use_cache=True,
cpu_embedding=cpu_embedding,
torch_dtype=torch_dtype).eval()
else:
if 'starcoder' in repo_id:
# Load starcoder-15.5b model in bf16 format to avoid CPU OOM.
model = AutoModelForCausalLM.from_pretrained(model_path, optimize_model=True, load_in_low_bit=low_bit,
trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding, torch_dtype=torch.bfloat16).eval()
trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding,
torch_dtype=torch.bfloat16 if not fp16 else torch.float16).eval()
# Convert the low-bit model back to fp32 for performance considerations.
model = model.float()
if not fp16:
model = model.float()
else:
model = AutoModelForCausalLM.from_pretrained(model_path, optimize_model=True, load_in_low_bit=low_bit,
trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding).eval()
trust_remote_code=True, use_cache=True,
cpu_embedding=cpu_embedding,
torch_dtype=torch_dtype).eval()
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)

if fp16:
model = model.half()
print("Convert model to half precision")

model = model.to('xpu')

end = time.perf_counter()
Expand Down Expand Up @@ -984,30 +994,30 @@ def run_transformer_int4_fp16_gpu_win(repo_id,
st = time.perf_counter()
if repo_id in CHATGLM_IDS:
model = AutoModel.from_pretrained(model_path, load_in_low_bit=low_bit, optimize_model=True,
trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding).eval()
trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding,
torch_dtype=torch.float16).eval()
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
model = model.half()
model = model.to('xpu')
elif repo_id in LLAMA_IDS:
model = AutoModelForCausalLM.from_pretrained(model_path, load_in_low_bit=low_bit, optimize_model=True,
trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding).eval()
trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding,
torch_dtype=torch.float16).eval()
tokenizer = LlamaTokenizer.from_pretrained(model_path, trust_remote_code=True)
model = model.half()
model = model.to('xpu')
elif repo_id in LLAVA_IDS:
llava_repo_dir = os.environ.get('LLAVA_REPO_DIR')
sys.path.append(rf"{llava_repo_dir}")
from llava.model.language_model.llava_llama import LlavaLlamaForCausalLM
model = AutoModelForCausalLM.from_pretrained(model_path, load_in_low_bit=low_bit, optimize_model=True,
trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding).eval()
trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding,
torch_dtype=torch.float16).eval()
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
model = model.half()
model = model.to('xpu')
else:
model = AutoModelForCausalLM.from_pretrained(model_path, optimize_model=True, load_in_low_bit=low_bit,
trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding).eval()
trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding,
torch_dtype=torch.float16).eval()
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
model = model.half()
model = model.to('xpu')
end = time.perf_counter()
load_time = end - st
Expand Down
4 changes: 3 additions & 1 deletion python/llm/src/ipex_llm/transformers/embedding.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,7 @@ def __init__(self,
requires_grad=False,
quantized=False, _shape=None, qtype=qtype)
self.embedding_dim = embedding_dim
self.num_embeddings = num_embeddings
self.torch_dtype = torch_dtype

def forward(self, x: Tensor):
Expand All @@ -110,5 +111,6 @@ def forward(self, x: Tensor):
"Please `pip install bigdl_core_xe` first.")

result = xe_linear.dequantize_rows(x.contiguous(), self.weight.data,
self.weight.qtype, self.embedding_dim)
self.weight.qtype, self.embedding_dim,
self.num_embeddings)
return result.to(self.torch_dtype)

0 comments on commit 14b1e6b

Please sign in to comment.