diff --git a/python/llm/dev/benchmark/all-in-one/README.md b/python/llm/dev/benchmark/all-in-one/README.md index b0149d49f58..0747bb2365b 100644 --- a/python/llm/dev/benchmark/all-in-one/README.md +++ b/python/llm/dev/benchmark/all-in-one/README.md @@ -41,6 +41,7 @@ test_api: # - "transformer_int4_gpu" # on Intel GPU, transformer-like API, (qtype=int4), (dtype=fp32) # - "transformer_int4_gpu_win" # on Intel GPU for Windows, transformer-like API, (qtype=int4), (dtype=fp32) # - "transformer_int4_loadlowbit_gpu_win" # on Intel GPU for Windows, transformer-like API, (qtype=int4), use load_low_bit API. Please make sure you have used the save.py to save the converted low bit model + # - "transformer_int4_fp16_loadlowbit_gpu_win" # on Intel GPU for Windows, transformer-like API, (qtype=int4), (dtype=fp16), use load_low_bit API. Please make sure you have used the save.py to save the converted low bit model # - "bigdl_fp16_gpu" # on Intel GPU, use ipex-llm transformers API, (dtype=fp16), (qtype=fp16) # - "optimize_model_gpu" # on Intel GPU, can optimize any pytorch models include transformer model # - "deepspeed_optimize_model_gpu" # on Intel GPU, deepspeed autotp inference @@ -64,7 +65,7 @@ task: 'continuation' # task can be 'continuation', 'QA' and 'summarize' ``` ## (Optional) Save model in low bit -If you choose the `transformer_int4_loadlowbit_gpu_win` test API, you will need to save the model in low bit first. +If you choose the `transformer_int4_loadlowbit_gpu_win` or `transformer_int4_fp16_loadlowbit_gpu_win` test API, you will need to save the model in low bit first. Run `python save.py` will save all models declared in `repo_id` list into low bit models under `local_model_hub` folder. diff --git a/python/llm/dev/benchmark/all-in-one/config.yaml b/python/llm/dev/benchmark/all-in-one/config.yaml index f7a50116a97..71af7167b19 100644 --- a/python/llm/dev/benchmark/all-in-one/config.yaml +++ b/python/llm/dev/benchmark/all-in-one/config.yaml @@ -17,6 +17,7 @@ test_api: # - "transformer_int4_gpu" # on Intel GPU, transformer-like API, (qtype=int4), (dtype=fp32) # - "transformer_int4_gpu_win" # on Intel GPU for Windows, transformer-like API, (qtype=int4), (dtype=fp32) # - "transformer_int4_loadlowbit_gpu_win" # on Intel GPU for Windows, transformer-like API, (qtype=int4), use load_low_bit API. Please make sure you have used the save.py to save the converted low bit model + # - "transformer_int4_fp16_loadlowbit_gpu_win" # on Intel GPU for Windows, transformer-like API, (qtype=int4), (dtype=fp16), use load_low_bit API. Please make sure you have used the save.py to save the converted low bit model # - "bigdl_fp16_gpu" # on Intel GPU, use ipex-llm transformers API, (dtype=fp16), (qtype=fp16) # - "optimize_model_gpu" # on Intel GPU, can optimize any pytorch models include transformer model # - "deepspeed_optimize_model_gpu" # on Intel GPU, deepspeed autotp inference diff --git a/python/llm/dev/benchmark/all-in-one/run.py b/python/llm/dev/benchmark/all-in-one/run.py index 9387dac6787..9544f07f263 100644 --- a/python/llm/dev/benchmark/all-in-one/run.py +++ b/python/llm/dev/benchmark/all-in-one/run.py @@ -137,6 +137,10 @@ def run_model(repo_id, test_api, in_out_pairs, local_model_hub=None, warm_up=1, # drop the results of the first time for better performance run_transformer_int4_loadlowbit_gpu_win(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, cpu_embedding, batch_size, streaming) result = run_transformer_int4_loadlowbit_gpu_win(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, cpu_embedding, batch_size, streaming) + elif test_api == 'transformer_int4_fp16_loadlowbit_gpu_win': + # drop the results of the first time for better performance + run_transformer_int4_fp16_loadlowbit_gpu_win(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, cpu_embedding, batch_size, streaming) + result = run_transformer_int4_fp16_loadlowbit_gpu_win(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, cpu_embedding, batch_size, streaming) elif test_api == 'transformer_autocast_bf16': result = run_transformer_autocast_bf16(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, batch_size) elif test_api == 'bigdl_ipex_bf16': @@ -170,7 +174,7 @@ def run_model(repo_id, test_api, in_out_pairs, local_model_hub=None, warm_up=1, low_bit, cpu_embedding, round(result[in_out_pair][-1][5], 2), - result[in_out_pair][-1][6] if any(keyword in test_api for keyword in ['int4_gpu', 'int4_fp16_gpu_win', 'int4_loadlowbit_gpu', 'fp16_gpu', 'deepspeed_optimize_model_gpu']) and not lookahead else 'N/A', + result[in_out_pair][-1][6] if any(keyword in test_api for keyword in ['int4_gpu', 'int4_fp16_gpu_win', 'int4_loadlowbit_gpu', 'int4_fp16_loadlowbit_gpu', 'fp16_gpu', 'deepspeed_optimize_model_gpu']) and not lookahead else 'N/A', streaming if 'win' in test_api else 'N/A', use_fp16_torch_dtype if 'pipeline_parallel_gpu' in test_api else 'N/A'], ) @@ -1191,6 +1195,109 @@ def run_transformer_int4_loadlowbit_gpu_win(repo_id, return result +def run_transformer_int4_fp16_loadlowbit_gpu_win(repo_id, + local_model_hub, + in_out_pairs, + warm_up, + num_trials, + num_beams, + low_bit, + cpu_embedding, + batch_size, + streaming): + from ipex_llm.transformers import AutoModel, AutoModelForCausalLM + from transformers import AutoTokenizer, GPTJForCausalLM, LlamaTokenizer, TextStreamer + model_path = get_model_path(repo_id, local_model_hub) + # Load BigDL-LLM optimized low bit model + st = time.perf_counter() + if repo_id in CHATGLM_IDS: + model = AutoModel.load_low_bit(model_path+'-'+low_bit, optimize_model=True, trust_remote_code=True, + use_cache=True, cpu_embedding=cpu_embedding).eval() + tokenizer = AutoTokenizer.from_pretrained(model_path+'-'+low_bit, trust_remote_code=True) + model = model.half().to('xpu') + elif repo_id in LLAMA_IDS: + model = AutoModelForCausalLM.load_low_bit(model_path+'-'+low_bit, optimize_model=True, trust_remote_code=True, + use_cache=True, cpu_embedding=cpu_embedding).eval() + tokenizer = LlamaTokenizer.from_pretrained(model_path+'-'+low_bit, trust_remote_code=True) + model = model.half().to('xpu') + elif repo_id in LLAVA_IDS: + llava_repo_dir = os.environ.get('LLAVA_REPO_DIR') + sys.path.append(rf"{llava_repo_dir}") + from llava.model.language_model.llava_llama import LlavaLlamaForCausalLM + model = AutoModelForCausalLM.load_low_bit(model_path+'-'+low_bit, optimize_model=True, trust_remote_code=True, + use_cache=True, cpu_embedding=cpu_embedding).eval() + tokenizer = AutoTokenizer.from_pretrained(model_path+'-'+low_bit, trust_remote_code=True) + model = model.half().to('xpu') + else: + model = AutoModelForCausalLM.load_low_bit(model_path+'-'+low_bit, optimize_model=True, trust_remote_code=True, + use_cache=True, cpu_embedding=cpu_embedding).eval() + tokenizer = AutoTokenizer.from_pretrained(model_path+'-'+low_bit, trust_remote_code=True) + model = model.half().to('xpu') + end = time.perf_counter() + load_time = end - st + print(">> loading of model costs {}s and {}GB".format(load_time, torch.xpu.memory.memory_reserved()/(1024**3))) + + model = BenchmarkWrapper(model) + streamer = TextStreamer(tokenizer, skip_prompt=True) + + result = {} + with torch.inference_mode(): + for in_out in in_out_pairs: + try: + in_out_len = in_out.split("-") + in_len = int(in_out_len[0]) + out_len = int(in_out_len[1]) + # As different tokenizer has different encodings, + # in_len.txt maybe shorter than we need, + # use much longer context to make sure input length + test_length = min(in_len*2, 8192) + while test_length not in [32, 256, 1024, 2048, 8192]: + test_length = test_length * 2 + input_str = open(f"prompt/continuation/{test_length}.txt", 'r').read() + # As different tokenizer has different encodings, + # slice the input_ids to ensure the prompt length is required length. + input_ids = tokenizer.encode(input_str, return_tensors="pt") + input_ids = input_ids[:, :in_len] + true_str = tokenizer.batch_decode(input_ids)[0] + input_list = [true_str] * batch_size + input_ids = tokenizer(input_list, return_tensors="pt").input_ids.to('xpu') + actual_in_len = input_ids.shape[1] + result[in_out] = [] + for i in range(num_trials + warm_up): + st = time.perf_counter() + if streaming: + output_ids = model.generate(input_ids, do_sample=False, + max_new_tokens=out_len, min_new_tokens=out_len, + num_beams=num_beams, streamer=streamer) + else: + output_ids = model.generate(input_ids, do_sample=False, + max_new_tokens=out_len, min_new_tokens=out_len, + num_beams=num_beams) + torch.xpu.synchronize() + end = time.perf_counter() + output_ids = output_ids.cpu() + print("model generate cost: " + str(end - st)) + output = tokenizer.batch_decode(output_ids) + if not streaming: + print(output[0]) + actual_out_len = output_ids.shape[1] - actual_in_len + if i >= warm_up: + result[in_out].append([model.first_cost, model.rest_cost_mean, model.encoder_time, + actual_in_len, actual_out_len, load_time, model.peak_memory]) + # torch.xpu.empty_cache() # this may make first token slower + except RuntimeError: + traceback.print_exc() + pass + torch.xpu.synchronize() + torch.xpu.empty_cache() + model.to('cpu') + torch.xpu.synchronize() + torch.xpu.empty_cache() + del model + gc.collect() + return result + + def run_transformer_autocast_bf16( repo_id, local_model_hub, in_out_pairs,