Skip to content

Commit

Permalink
Update benchmark script for NPU (intel-analytics#11932)
Browse files Browse the repository at this point in the history
  • Loading branch information
plusbang authored Aug 27, 2024
1 parent 730d9ec commit 7c8c9a0
Show file tree
Hide file tree
Showing 3 changed files with 20 additions and 8 deletions.
2 changes: 2 additions & 0 deletions python/llm/dev/benchmark/all-in-one/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4)
batch_size: 1 # default to 1
in_out_pairs:
- '32-32'
- '960-64'
- '1024-128'
test_api:
- "transformer_int4_fp16_gpu" # on Intel GPU, transformer-like API, (qtype=int4), (dtype=fp16)
Expand All @@ -37,5 +38,6 @@ test_api:
# - "transformers_int4_npu_win" # on Intel NPU for Windows, transformer-like API, (qtype=int4)
cpu_embedding: False # whether put embedding to CPU
streaming: False # whether output in streaming way (only available now for gpu win related test_api)
optimize_model: False # whether apply further optimization on NPU (only available now for transformers_int4_npu_win test_api)
use_fp16_torch_dtype: True # whether use fp16 for non-linear layer (only available now for "pipeline_parallel_gpu" test_api)
task: 'continuation' # task can be 'continuation', 'QA' and 'summarize'
24 changes: 17 additions & 7 deletions python/llm/dev/benchmark/all-in-one/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ def preprocess_prompt(tokenizer, in_len, task):
input_ids = tokenizer.encode(input_str, return_tensors="pt")
return input_ids

def run_model(repo_id, test_api, in_out_pairs, local_model_hub=None, warm_up=1, num_trials=3, num_beams=1, low_bit='sym_int4', cpu_embedding=False, batch_size=1, streaming=False, use_fp16_torch_dtype=False, lookahead=False, task='continuation'):
def run_model(repo_id, test_api, in_out_pairs, local_model_hub=None, warm_up=1, num_trials=3, num_beams=1, low_bit='sym_int4', cpu_embedding=False, batch_size=1, streaming=False, use_fp16_torch_dtype=False, lookahead=False, task='continuation', optimize_model=False):
# TODO: make a parameter
result= {}
if test_api == 'transformer_int4':
Expand Down Expand Up @@ -188,7 +188,7 @@ def run_model(repo_id, test_api, in_out_pairs, local_model_hub=None, warm_up=1,
elif test_api == 'pipeline_parallel_gpu':
result = run_pipeline_parallel_gpu(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, batch_size, cpu_embedding, fp16=use_fp16_torch_dtype)
elif test_api == 'transformers_int4_npu_win':
result = transformers_int4_npu_win(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, batch_size)
result = transformers_int4_npu_win(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, batch_size, optimize_model)
else:
invalidInputError(False, "Unknown test_api " + test_api + ", please check your config.yaml.")

Expand Down Expand Up @@ -603,24 +603,30 @@ def transformers_int4_npu_win(repo_id,
num_trials,
num_beams,
low_bit,
batch_size):
batch_size,
optimize_model):
from ipex_llm.transformers.npu_model import AutoModel, AutoModelForCausalLM
from transformers import AutoTokenizer, LlamaTokenizer

model_path = get_model_path(repo_id, local_model_hub)
in_out_len = in_out_pairs[0].split("-")
max_output_len = max(int(in_out_len[0]) + int(in_out_len[1]), 1024)
# Load model in 4 bit,
# which convert the relevant layers in the model into INT4 format
st = time.perf_counter()
if repo_id in CHATGLM_IDS:
model = AutoModel.from_pretrained(model_path, load_in_low_bit=low_bit, trust_remote_code=True,
model = AutoModel.from_pretrained(model_path, load_in_low_bit=low_bit, trust_remote_code=True, torch_dtype=torch.float16,
optimize_model=optimize_model, max_output_len=max_output_len, max_prompt_len=int(in_out_len[0]), transpose_value_cache=True,
torch_dtype='auto', attn_implementation="eager").eval()
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
elif repo_id in LLAMA_IDS:
model = AutoModelForCausalLM.from_pretrained(model_path, load_in_low_bit=low_bit, trust_remote_code=True,
model = AutoModelForCausalLM.from_pretrained(model_path, load_in_low_bit=low_bit, trust_remote_code=True, torch_dtype=torch.float16,
optimize_model=optimize_model, max_output_len=max_output_len, max_prompt_len=int(in_out_len[0]), transpose_value_cache=True,
use_cache=True, attn_implementation="eager").eval()
tokenizer = LlamaTokenizer.from_pretrained(model_path, trust_remote_code=True)
else:
model = AutoModelForCausalLM.from_pretrained(model_path, load_in_low_bit=low_bit, trust_remote_code=True,
model = AutoModelForCausalLM.from_pretrained(model_path, load_in_low_bit=low_bit, trust_remote_code=True, torch_dtype=torch.float16,
optimize_model=optimize_model, max_output_len=max_output_len, max_prompt_len=int(in_out_len[0]), transpose_value_cache=True,
use_cache=True, attn_implementation="eager").eval()
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
end = time.perf_counter()
Expand All @@ -643,6 +649,7 @@ def transformers_int4_npu_win(repo_id,
true_str = tokenizer.batch_decode(input_ids)[0]
input_list = [true_str] * batch_size
input_ids = tokenizer(input_list, return_tensors="pt").input_ids
input_ids = input_ids[:, :in_len]
actual_in_len = input_ids.shape[1]
result[in_out] = []
for i in range(num_trials + warm_up):
Expand Down Expand Up @@ -2016,12 +2023,15 @@ def run_pipeline_parallel_gpu(repo_id,
streaming = False
use_fp16_torch_dtype = False
task = 'continuation'
optimize_model = False # only for transformers_int4_npu_win
if 'streaming' in conf:
streaming = conf['streaming']
if 'use_fp16_torch_dtype' in conf:
use_fp16_torch_dtype = conf['use_fp16_torch_dtype']
if 'task' in conf:
task = conf['task']
if 'optimize_model' in conf:
optimize_model = conf['optimize_model']
lookahead = False

import pandas as pd
Expand All @@ -2048,7 +2058,7 @@ def run_pipeline_parallel_gpu(repo_id,
if task in ['QA', 'summarize'] and conf['num_beams'] == 1 and batch_size == 1:
lookahead = True
run_model(model, api, in_out_pairs, conf['local_model_hub'], conf['warm_up'], conf['num_trials'], conf['num_beams'],
conf['low_bit'], conf['cpu_embedding'], batch_size, streaming, use_fp16_torch_dtype, lookahead, task)
conf['low_bit'], conf['cpu_embedding'], batch_size, streaming, use_fp16_torch_dtype, lookahead, task, optimize_model)
df = pd.DataFrame(results, columns=['model', '1st token avg latency (ms)', '2+ avg latency (ms/token)', 'encoder time (ms)',
'input/output tokens', 'batch_size', 'actual input/output tokens', 'num_beams', 'low_bit', 'cpu_embedding',
'model loading time (s)', 'peak mem (GB)', 'streaming', 'use_fp16_torch_dtype'])
Expand Down
2 changes: 1 addition & 1 deletion python/llm/src/ipex_llm/transformers/npu_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ def from_pretrained(cls, *args, **kwargs):
ignore_argument(kwargs, "pipeline_parallel_stages")
optimize_model = kwargs.pop("optimize_model", False)
max_output_len = kwargs.pop("max_output_len", 1024)
max_prompt_len = kwargs.pop("max_prompt_len", max_output_len)
max_prompt_len = kwargs.pop("max_prompt_len", 512)
inter_pp = kwargs.pop("inter_pp", None)
intra_pp = kwargs.pop("intra_pp", None)
transpose_value_cache = kwargs.pop("transpose_value_cache", True)
Expand Down

0 comments on commit 7c8c9a0

Please sign in to comment.