Skip to content

Commit

Permalink
Add Qwen pipeline and example (intel-analytics#12292)
Browse files Browse the repository at this point in the history
* support qwen pipeline

* update error msg

* style

* meet review

* minor
  • Loading branch information
hkvision authored Oct 31, 2024
1 parent 4cf1ccc commit 416c191
Show file tree
Hide file tree
Showing 6 changed files with 422 additions and 59 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ In this directory, you will find examples on how to directly run HuggingFace `tr
|------------|----------------------------------------------------------------|
| Llama2 | [meta-llama/Llama-2-7b-chat-hf](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) |
| Llama3 | [meta-llama/Meta-Llama-3-8B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct) |
| Qwen2.5 | [Qwen/Qwen2.5-7b-Instruct](https://huggingface.co/Qwen/Qwen2.5-7b-Instruct) |
| Baichuan2 | [baichuan-inc/Baichuan2-7B-Chat](https://huggingface.co/baichuan-inc/Baichuan-7B-Chat) |
| MiniCPM | [openbmb/MiniCPM-1B-sft-bf16](https://huggingface.co/openbmb/MiniCPM-1B-sft-bf16) |

Expand All @@ -30,7 +31,7 @@ pip install --pre --upgrade ipex-llm[npu]

## 2. Runtime Configurations

**Following envrionment variables are required**:
**Following environment variables are required**:

```cmd
set BIGDL_USE_NPU=1
Expand All @@ -46,6 +47,9 @@ python llama2.py
:: to run Meta-Llama-3-8B-Instruct
python llama3.py
:: to run Qwen2.5-7b-Instruct
python qwen.py
:: to run Baichuan2-7B-Chat
python baichuan2.py
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#


import os
import torch
import time
import argparse
from ipex_llm.transformers.npu_model import AutoModelForCausalLM
from transformers import AutoTokenizer
from transformers.utils import logging

logger = logging.get_logger(__name__)

if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Predict Tokens using `generate()` API for npu model"
)
parser.add_argument(
"--repo-id-or-model-path",
type=str,
default="Qwen/Qwen2.5-7B-Instruct", # Or Qwen2-7B-Instruct
help="The huggingface repo id for the Baichuan2 model to be downloaded"
", or the path to the huggingface checkpoint folder",
)
parser.add_argument("--lowbit-path", type=str,
default="",
help="The path to the lowbit model folder, leave blank if you do not want to save. \
If path not exists, lowbit model will be saved there. \
Else, lowbit model will be loaded.",
)
parser.add_argument('--prompt', type=str, default="AI是什么?",
help='Prompt to infer')
parser.add_argument("--n-predict", type=int, default=32, help="Max tokens to predict")
parser.add_argument("--max-context-len", type=int, default=1024)
parser.add_argument("--max-prompt-len", type=int, default=960)
parser.add_argument("--disable-transpose-value-cache", action="store_true", default=False)

args = parser.parse_args()
model_path = args.repo_id_or_model_path

if not args.lowbit_path or not os.path.exists(args.lowbit_path):
model = AutoModelForCausalLM.from_pretrained(model_path,
optimize_model=True,
pipeline=True,
max_context_len=args.max_context_len,
max_prompt_len=args.max_prompt_len,
torch_dtype=torch.float16,
attn_implementation="eager",
transpose_value_cache=not args.disable_transpose_value_cache,
mixed_precision=True,
trust_remote_code=True)
else:
model = AutoModelForCausalLM.load_low_bit(
args.lowbit_path,
attn_implementation="eager",
torch_dtype=torch.float16,
max_context_len=args.max_context_len,
max_prompt_len=args.max_prompt_len,
pipeline=True,
transpose_value_cache=not args.disable_transpose_value_cache)

tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)

if args.lowbit_path and not os.path.exists(args.lowbit_path):
model.save_low_bit(args.lowbit_path)

print("-" * 80)
print("done")
messages = [{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": args.prompt}]
text = tokenizer.apply_chat_template(messages,
tokenize=False,
add_generation_prompt=True)
with torch.inference_mode():
print("finish to load")
for i in range(5):
_input_ids = tokenizer([text], return_tensors="pt").input_ids
print("input length:", len(_input_ids[0]))
st = time.time()
output = model.generate(
_input_ids, max_new_tokens=args.n_predict, do_print=True
)
end = time.time()
print(f"Inference time: {end-st} s")
input_str = tokenizer.decode(_input_ids[0], skip_special_tokens=False)
print("-" * 20, "Input", "-" * 20)
print(input_str)
output_str = tokenizer.decode(output[0], skip_special_tokens=False)
print("-" * 20, "Output", "-" * 20)
print(output_str)

print("-" * 80)
print("done")
print("success shut down")
69 changes: 44 additions & 25 deletions python/llm/src/ipex_llm/transformers/npu_models/convert_mp.py
Original file line number Diff line number Diff line change
Expand Up @@ -267,6 +267,43 @@ def convert_minicpm(
convert_forward(model, module.MiniCPMForCausalLM, minicpm_casullm_forward)


def convert_qwen(
model: torch.nn.Module,
max_output_len=1024,
max_prompt_len=1024,
decoder=False,
inter_pp=None,
intra_pp=None,
transpose_value_cache=True,
):
from ipex_llm.transformers.npu_models.qwen2_mp import gen_qwen2_fused_model_forward
from ipex_llm.transformers.npu_models.qwen2_mp import DecodeRunner, PrefillRunner
from transformers.models.qwen2.modeling_qwen2 import Qwen2Model
if decoder:
decode_runner = DecodeRunner(
model,
max_seq_len=max_output_len,
inter_pp=inter_pp,
intra_pp=intra_pp,
transpose_value_cache=transpose_value_cache,
)
else:
decode_runner = None
prefill_runner = PrefillRunner(
model,
max_output_len=max_output_len,
max_prompt_len=max_prompt_len,
transpose_value_cache=transpose_value_cache,
)
qwen2_model_forward = gen_qwen2_fused_model_forward(
prefill_runner=prefill_runner, decode_runner=decode_runner
)
convert_forward(model, Qwen2Model, qwen2_model_forward)
from transformers.models.qwen2.modeling_qwen2 import Qwen2ForCausalLM
from ipex_llm.transformers.npu_models.qwen2_mp import qwen2_casullm_forward
convert_forward(model, Qwen2ForCausalLM, qwen2_casullm_forward)


def optimize_llm(
model: torch.nn.Module,
max_context_len=1024,
Expand Down Expand Up @@ -300,31 +337,13 @@ def optimize_llm(
inter_pp = 2
else:
inter_pp = 1

from ipex_llm.transformers.npu_models.qwen2_mp import gen_qwen2_fused_model_forward
from ipex_llm.transformers.npu_models.qwen2_mp import DecodeRunner, PrefillRunner
from transformers.models.qwen2.modeling_qwen2 import Qwen2Model

decode_runner = DecodeRunner(
model,
max_seq_len=max_context_len,
inter_pp=inter_pp,
intra_pp=intra_pp,
transpose_value_cache=transpose_value_cache,
)
prefill_runner = PrefillRunner(
model,
max_output_len=max_context_len,
max_prompt_len=max_prompt_len,
transpose_value_cache=transpose_value_cache,
)
qwen2_model_forward = gen_qwen2_fused_model_forward(
prefill_runner=prefill_runner, decode_runner=decode_runner
)
convert_forward(model, Qwen2Model, qwen2_model_forward)
from transformers.models.qwen2.modeling_qwen2 import Qwen2ForCausalLM
from ipex_llm.transformers.npu_models.qwen2_mp import qwen2_casullm_forward
convert_forward(model, Qwen2ForCausalLM, qwen2_casullm_forward)
convert_qwen(model,
max_output_len=max_context_len,
max_prompt_len=max_prompt_len,
inter_pp=inter_pp,
intra_pp=intra_pp,
decoder=True,
transpose_value_cache=transpose_value_cache)
elif model.config.model_type == "minicpm":
# for minicpm-1b
if intra_pp is None:
Expand Down
65 changes: 35 additions & 30 deletions python/llm/src/ipex_llm/transformers/npu_models/qwen2_mp.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,31 +140,13 @@ def __init__(

# Self Attention
if mode == "decode":
attention_mask = self.create_input_op((self.batch_size, 1, 1, self.max_seq_len + 1))
attention_mask = self.create_input_op(
(self.batch_size, 1, 1, self.max_seq_len + 1), dtype=np.int64)
else:
attention_mask = self.create_input_op((self.batch_size, 1, self.seq_len, self.seq_len))
attention_mask = self.create_input_op(
(self.batch_size, 1, self.seq_len, self.seq_len), dtype=np.int64)

position_ids = self.create_input_op((self.batch_size, self.seq_len))
past_keys = []
past_values = []
if mode == "decode":
for i in range(num_layers):
past_key = self.create_cache_op(
(self.batch_size, self.num_key_value_heads, self.max_seq_len, self.head_dim)
)
if transpose_value:
past_value = self.create_cache_op(
(self.batch_size, self.num_key_value_heads, self.head_dim, self.max_seq_len)
)
else:
past_value = self.create_cache_op(
(self.batch_size, self.num_key_value_heads, self.max_seq_len, self.head_dim)
)
past_keys.append(past_key)
past_values.append(past_value)
else:
past_keys = [None] * num_layers
past_values = [None] * num_layers
position_ids = self.create_input_op((self.batch_size, self.seq_len), dtype=np.int64)

if input_layernorm_weights is None:
input_layernorm_weights = []
Expand Down Expand Up @@ -203,6 +185,27 @@ def __init__(
k_biases = [self.constant(w) for w in k_biases]
v_biases = [self.constant(w) for w in v_biases]

past_keys = []
past_values = []
if mode == "decode":
for i in range(num_layers):
past_key = self.create_cache_op(
(self.batch_size, self.num_key_value_heads, self.max_seq_len, self.head_dim)
)
if transpose_value:
past_value = self.create_cache_op(
(self.batch_size, self.num_key_value_heads, self.head_dim, self.max_seq_len)
)
else:
past_value = self.create_cache_op(
(self.batch_size, self.num_key_value_heads, self.max_seq_len, self.head_dim)
)
past_keys.append(past_key)
past_values.append(past_value)
else:
past_keys = [None] * num_layers
past_values = [None] * num_layers

hidden_states = input

curr_key_values = []
Expand Down Expand Up @@ -396,8 +399,8 @@ def forward(

inputs = (
hidden_states.to(torch.float16),
attention_mask,
position_ids.to(torch.float16),
attention_mask.to(torch.int64),
position_ids.to(torch.int64),
)

for i in range(self.intra_stages):
Expand Down Expand Up @@ -514,7 +517,9 @@ def forward(
seq_len = hidden_states.shape[1]

backend_cls = self.backend_cls_prefill
inputs = (hidden_states.to(torch.float16), attention_mask, position_ids.to(torch.float16))
inputs = (hidden_states.to(torch.float16),
attention_mask.to(torch.int64),
position_ids.to(torch.int64))
inputs += (self.layer_norm_0, self.layer_norm_1)
inputs += (self.q_bias, self.k_bias, self.v_bias)
hidden_states, past_key, past_value = run_model(
Expand Down Expand Up @@ -687,9 +692,9 @@ def run_decode(
causal_mask[:, :, :, -1] = torch.finfo(torch.float16).min
pad_mask = (0, pad_len)
padded_causal_mask = F.pad(
causal_mask.to(torch.float16), pad_mask, value=torch.finfo(torch.float16).min
causal_mask.to(torch.int64), pad_mask, value=torch.iinfo(torch.int64).min
)
padded_causal_mask[:, :, :, -1] = 0.0
padded_causal_mask[:, :, :, -1] = 0
dist.recv(hidden_states, src=rank - 1)
layer_outputs = multi_decoder(
hidden_states,
Expand Down Expand Up @@ -973,9 +978,9 @@ def forward(
hidden_states = F.pad(hidden_states.to(torch.float16), (0, 0, 0, pad_len), value=0.0)
position_ids = F.pad(position_ids, (0, pad_len), value=0)
attention_mask = F.pad(
attention_mask.to(torch.float16),
attention_mask.to(torch.int64),
(0, pad_len, 0, pad_len),
value=torch.finfo(torch.float16).min,
value=torch.iinfo(torch.int64).min,
)

args = (hidden_states, position_ids, attention_mask, past_key_value)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,7 @@ def convert_llm(model: torch.nn.Module,
group_size: int):
if group_size == 0:
n_splits_linear = 1
n_splits_down_proj = 1
n_splits_down_proj = 2 if model.config.intermediate_size == 18944 else 1
else:
n_splits_linear = model.config.hidden_size // group_size
n_splits_down_proj = model.config.intermediate_size // group_size
Expand Down Expand Up @@ -318,9 +318,49 @@ def convert_llm(model: torch.nn.Module,
except:
invalidInputError(False,
"False to InitLLMPipeline.")
elif model.config.model_type == "qwen2":
with tempfile.TemporaryDirectory() as temp_dir:
weight_dir = os.path.join(temp_dir, "model_weights")
os.mkdir(weight_dir)
layer_num = len(model.model.layers)
from .qwen import convert_qwen_layer, convert_lm_head_and_embedding
first_blob_path, last_blob_path = convert_lm_head_and_embedding(model, n_splits_linear,
temp_dir, weight_dir)

param_list = []
for layer_idx in range(0, layer_num):
param_list.append((model, layer_idx, n_splits_linear, n_splits_down_proj,
temp_dir, weight_dir, transpose_value_cache, kv_len, group_size))
with Pool() as pool:
result = pool.starmap(convert_qwen_layer, param_list)

# Prefill Runner
from ipex_llm.transformers.npu_models.convert_mp import convert_qwen
convert_qwen(model,
max_output_len=kv_len,
max_prompt_len=max_prompt_len,
decoder=False,
transpose_value_cache=transpose_value_cache)

# patch attrs for generate
model.kv_len = kv_len
model.num_head = model.model.layers[0].self_attn.num_key_value_heads
model.head_dim = model.model.layers[0].self_attn.head_dim
model.num_layers = layer_num
model.transpose_value_cache = transpose_value_cache
model.vocab_size = model.config.vocab_size

try:
res = InitLLMPipeline("qwen", kv_len, model.num_head, model.head_dim, layer_num,
model.vocab_size, weight_dir, "model",
first_blob_path, last_blob_path,
os.path.join(temp_dir, "decoder_layer"))
except:
invalidInputError(False,
"False to InitLLMPipeline.")
else:
invalidInputError(False,
"Now we only support Llama2 / Llama3 / Baichuan2 for pipeline running.")
invalidInputError(False, "Now we only support Llama2 / Llama3 / Baichuan2 / "
"Qwen2 / Qwen2.5 / Minicpm for pipeline running.")

if isinstance(model.lm_head, SlicedLMHead):
model.lm_head.get_fused_lm_head()
Expand Down
Loading

0 comments on commit 416c191

Please sign in to comment.