From c80d318d07c8f901ae94a778a4461f56c4a8a98e Mon Sep 17 00:00:00 2001 From: WeiguangHan Date: Tue, 30 Jan 2024 19:39:28 +0800 Subject: [PATCH] LLM: add gpu example for redpajama models (#10040) --- .../Model/redpajama/README.md | 125 ++++++++++++++++++ .../Model/redpajama/generate.py | 88 ++++++++++++ 2 files changed, 213 insertions(+) create mode 100644 python/llm/example/GPU/HF-Transformers-AutoModels/Model/redpajama/README.md create mode 100644 python/llm/example/GPU/HF-Transformers-AutoModels/Model/redpajama/generate.py diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/redpajama/README.md b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/redpajama/README.md new file mode 100644 index 00000000000..6eec97bbb5c --- /dev/null +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/redpajama/README.md @@ -0,0 +1,125 @@ +# redpajama +In this directory, you will find examples on how you could apply BigDL-LLM INT4 optimizations on redpajama models on [Intel GPUs](../../../README.md). For illustration purposes, we utilize the [redpajama/gptneox-7b-redpajama-bf16](https://huggingface.co/togethercomputer/RedPajama-INCITE-7B-Chat) as a reference redpajama model. + +## 0. Requirements +To run these examples with BigDL-LLM on Intel GPUs, we have some recommended requirements for your machine, please refer to [here](../../../README.md#requirements) for more information. + +## Example: Predict Tokens using `generate()` API +In the example [generate.py](./generate.py), we show a basic use case for an redpajama model to predict the next N tokens using `generate()` API, with BigDL-LLM INT4 optimizations on Intel GPUs. +### 1. Install +#### 1.1 Installation on Linux +We suggest using conda to manage the Python environment. For more information about conda installation, please refer to [here](https://docs.conda.io/en/latest/miniconda.html#). + +After installing conda, create a Python environment for BigDL-LLM: +```bash +conda create -n llm python=3.9 +conda activate llm +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default +pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu +``` + +#### 1.2 Installation on Windows +We suggest using conda to manage environment: +```bash +conda create -n llm python=3.9 libuv +conda activate llm +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default +pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu +``` + +### 2. Configures OneAPI environment variables +#### 2.1 Configurations for Linux +```bash +source /opt/intel/oneapi/setvars.sh +``` + +#### 2.2 Configurations for Windows +```cmd +call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat" +``` +> Note: Please make sure you are using **CMD** (**Anaconda Prompt** if using conda) to run the command as PowerShell is not supported. +### 3. Runtime Configurations +For optimal performance, it is recommended to set several environment variables. Please check out the suggestions based on your device. +#### 3.1 Configurations for Linux +
+ +For Intel Arc™ A-Series Graphics and Intel Data Center GPU Flex Series + +```bash +export USE_XETLA=OFF +export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 +``` + + +
+ +
+ +For Intel Data Center GPU Max Series + +```bash +export LD_PRELOAD=${LD_PRELOAD}:${CONDA_PREFIX}/lib/libtcmalloc.so +export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 +export ENABLE_SDP_FUSION=1 +``` +> Note: Please note that `libtcmalloc.so` can be installed by `conda install -c conda-forge -y gperftools=2.10`. +
+ +#### 3.2 Configurations for Windows +
+ +For Intel iGPU + +```cmd +set SYCL_CACHE_PERSISTENT=1 +set BIGDL_LLM_XMX_DISABLED=1 +``` + +
+ +
+ +For Intel Arc™ A300-Series or Pro A60 + +```cmd +set SYCL_CACHE_PERSISTENT=1 +``` + +
+ +
+ +For other Intel dGPU Series + +There is no need to set further environment variables. + +
+ +> Note: For the first time that each model runs on Intel iGPU/Intel Arc™ A300-Series or Pro A60, it may take several minutes to compile. +### 4. Running examples + +``` +python ./generate.py --repo-id-or-model-path REPO_ID_OR_MODEL_PATH --prompt PROMPT --n-predict N_PREDICT +``` +More information about arguments can be found in [Arguments Info](#31-arguments-info) section. The expected output can be found in [Sample Output](#32-sample-output) section. + +#### 3.1 Arguments Info +In the example, several arguments can be passed to satisfy your requirements: + +Arguments info: +- `--repo-id-or-model-path REPO_ID_OR_MODEL_PATH`: argument defining the huggingface repo id for the redpajama model to be downloaded, or the path to the huggingface checkpoint folder. It is default to be `'redpajama/gptneox-7b-redpajama-bf16'`. +- `--prompt PROMPT`: argument defining the prompt to be infered (with integrated prompt format for chat). It is default to be `'def print_hello_world():'`. +- `--n-predict N_PREDICT`: argument defining the max number of tokens to predict. It is default to be `32`. + +#### 3.2 Sample Output +#### [redpajama/gptneox-7b-redpajama-bf16](https://huggingface.co/togethercomputer/RedPajama-INCITE-7B-Chat#gpu-inference) +```log +Inference time: xxxx s +-------------------- Prompt -------------------- +: What is AI? +: +-------------------- Output -------------------- +: What is AI? +: Artificial Intelligence is a branch of computer science that deals with the development of computers that can think like humans. +: What are the main advantages of +``` \ No newline at end of file diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/redpajama/generate.py b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/redpajama/generate.py new file mode 100644 index 00000000000..f6be1ff9936 --- /dev/null +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/redpajama/generate.py @@ -0,0 +1,88 @@ +# +# Copyright 2016 The BigDL Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import torch +import time +import argparse + +from bigdl.llm.transformers import AutoModelForCausalLM +from transformers import AutoTokenizer + +# you could tune the prompt based on your own model, +# here the prompt tuning refers to https://huggingface.co/togethercomputer/RedPajama-INCITE-7B-Chat#gpu-inference +RedPajama_PROMPT_FORMAT = ": {prompt}\n:" + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Transformer INT4 gpu example for RedPajama model') + parser.add_argument('--repo-id-or-model-path', type=str, default="togethercomputer/RedPajama-INCITE-7B-Chat", + help='The huggingface repo id for the RedPajama to be downloaded' + ', or the path to the huggingface checkpoint folder') + parser.add_argument('--prompt', type=str, default="What is AI?", + help='Prompt to infer') + parser.add_argument('--n-predict', type=int, default=32, + help='Max tokens to predict') + + args = parser.parse_args() + model_path = args.repo_id_or_model_path + + # Load model in 4 bit, + # which convert the relevant layers in the model into INT4 format + model = AutoModelForCausalLM.from_pretrained(model_path, + trust_remote_code=True, + load_in_4bit=True, + optimize_model=True, + use_cache=True) + model = model.to('xpu') + + # Load tokenizer + tokenizer = AutoTokenizer.from_pretrained(model_path, + trust_remote_code=True) + + # Generate predicted tokens + with torch.inference_mode(): + prompt = RedPajama_PROMPT_FORMAT.format(prompt=args.prompt) + inputs = tokenizer(prompt, return_tensors='pt').to('xpu') + + # ipex model needs a warmup, then inference time can be accurate + output = model.generate(**inputs, + max_new_tokens=args.n_predict, + do_sample=True, + temperature=0.7, + top_p=0.7, + top_k=50, + return_dict_in_generate=True) + + # start inference + st = time.time() + # if your selected model is capable of utilizing previous key/value attentions + # to enhance decoding speed, but has `"use_cache": false` in its model config, + # it is important to set `use_cache=True` explicitly in the `generate` function + # to obtain optimal performance with BigDL-LLM INT4 optimizations + output = model.generate(**inputs, + max_new_tokens=args.n_predict, + do_sample=True, + temperature=0.7, + top_p=0.7, + top_k=50, + return_dict_in_generate=True) + torch.xpu.synchronize() + end = time.time() + output_str = tokenizer.decode(output.sequences[0]) + print(f'Inference time: {end-st} s') + print('-'*20, 'Prompt', '-'*20) + print(prompt) + print('-'*20, 'Output', '-'*20) + print(output_str) \ No newline at end of file