From 6db1772b718d5ad20b46bc6ae51e323252384b70 Mon Sep 17 00:00:00 2001 From: Yuwen Hu Date: Mon, 16 Dec 2024 14:01:43 +0800 Subject: [PATCH] generate.py and README updates --- .../GPU/HuggingFace/LLM/glm-edge/README.md | 41 ++++++++++++++----- .../GPU/HuggingFace/LLM/glm-edge/generate.py | 24 +++++------ 2 files changed, 40 insertions(+), 25 deletions(-) diff --git a/python/llm/example/GPU/HuggingFace/LLM/glm-edge/README.md b/python/llm/example/GPU/HuggingFace/LLM/glm-edge/README.md index 895e5efba9d..b9bcb5a6d6c 100644 --- a/python/llm/example/GPU/HuggingFace/LLM/glm-edge/README.md +++ b/python/llm/example/GPU/HuggingFace/LLM/glm-edge/README.md @@ -1,5 +1,5 @@ # GLM-Edge -In this directory, you will find examples on how you could apply IPEX-LLM INT4 optimizations on GLM-Edge models on [Intel GPUs](../../../README.md). For illustration purposes, we utilize the [THUDM/glm-edge-1.5b-chat](https://hf-mirror.com/THUDM/glm-edge-1.5b-chat) as a reference InternLM model. +In this directory, you will find examples on how you could apply IPEX-LLM INT4 optimizations on GLM-Edge models on [Intel GPUs](../../../README.md). For illustration purposes, we utilize the [THUDM/glm-edge-1.5b-chat](https://hf-mirror.com/THUDM/glm-edge-1.5b-chat) and [THUDM/glm-edge-4b-chat](https://hf-mirror.com/THUDM/glm-edge-4b-chat) as reference GLM-Edge models. ## 0. Requirements To run these examples with IPEX-LLM on Intel GPUs, we have some recommended requirements for your machine, please refer to [here](../../../README.md#requirements) for more information. @@ -14,8 +14,9 @@ conda activate llm pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ # install packages required for GLM-Edge -pip install git+https://github.com/huggingface/transformers.git -pip install "tiktoken>=0.7.0" "trl<0.12.0" +pip install transformers==4.47.0 +pip install accelerate==0.33.0 +pip install "trl<0.12.0" ``` ### 1.2 Installation on Windows @@ -28,8 +29,9 @@ conda activate llm pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ # install packages required for GLM-Edge -pip install git+https://github.com/huggingface/transformers.git -pip install "tiktoken>=0.7.0" "trl<0.12.0" +pip install transformers==4.47.0 +pip install accelerate==0.33.0 +pip install "trl<0.12.0" ``` ## 2. Configures OneAPI environment variables for Linux @@ -98,14 +100,14 @@ set SYCL_CACHE_PERSISTENT=1 ## 4. Running examples ### Example 1: Predict Tokens using `generate()` API -In the example [generate.py](./generate.py), we show a basic use case for a GLM-4 model to predict the next N tokens using `generate()` API, with IPEX-LLM INT4 optimizations on Intel GPUs. +In the example [generate.py](./generate.py), we show a basic use case for a GLM-Edge model to predict the next N tokens using `generate()` API, with IPEX-LLM INT4 optimizations on Intel GPUs. ``` python ./generate.py --repo-id-or-model-path REPO_ID_OR_MODEL_PATH --prompt PROMPT --n-predict N_PREDICT ``` Arguments info: -- `--repo-id-or-model-path REPO_ID_OR_MODEL_PATH`: argument defining the huggingface repo id for the GLM-4 model (e.g. `THUDM/glm-edge-1.5b-chat`) to be downloaded, or the path to the huggingface checkpoint folder. It is default to be `'THUDM/glm-edge-1.5b-chat'`. +- `--repo-id-or-model-path REPO_ID_OR_MODEL_PATH`: argument defining the huggingface repo id for the GLM-Edge model (e.g. `THUDM/glm-edge-1.5b-chat` or `THUDM/glm-edge-4b-chat`) to be downloaded, or the path to the huggingface checkpoint folder. It is default to be `'THUDM/glm-edge-4b-chat'`. - `--prompt PROMPT`: argument defining the prompt to be infered (with integrated prompt format for chat). It is default to be `'AI是什么?'`. - `--n-predict N_PREDICT`: argument defining the max number of tokens to predict. It is default to be `32`. @@ -114,15 +116,32 @@ Arguments info: ```log Inference time: xxxx s -------------------- Prompt -------------------- -<|user|> AI是什么? -<|assistant|> -------------------- Output -------------------- +AI,即人工智能,指的是由人制造出来的系统或机器能够执行通常需要人类智能才能完成的任务。人工智能可以执行多种任务,包括视觉识别、语言 +``` -AI是什么? +```log +Inference time: xxxx s +-------------------- Prompt -------------------- +What is AI? +-------------------- Output -------------------- +Artificial Intelligence, often abbreviated as AI, refers to the simulation of human intelligence in machines that are programmed to think like humans and mimic +``` -AI,即人工智能,指的是由人制造出来的系统或机器能够执行通常需要人类智能才能完成的任务。人工智能可以执行多种任务,包括视觉识别、语言 +#### [THUDM/glm-edge-4b-chat](https://hf-mirror.com/THUDM/glm-edge-4b-chat) +```log +Inference time: xxxx s +-------------------- Prompt -------------------- +AI是什么? +-------------------- Output -------------------- +AI,即人工智能(Artificial Intelligence),是计算机科学的一个分支,旨在开发出一种智能系统,使其能够执行通常需要人类智能才能完成的任务,如视觉 ``` ```log Inference time: xxxx s +-------------------- Prompt -------------------- +What is AI? +-------------------- Output -------------------- +Artificial intelligence (AI) refers to the simulation of human intelligence in machines that are programmed to think like humans and mimic their actions. AI systems can +``` diff --git a/python/llm/example/GPU/HuggingFace/LLM/glm-edge/generate.py b/python/llm/example/GPU/HuggingFace/LLM/glm-edge/generate.py index e7081fc1736..b02afa18860 100644 --- a/python/llm/example/GPU/HuggingFace/LLM/glm-edge/generate.py +++ b/python/llm/example/GPU/HuggingFace/LLM/glm-edge/generate.py @@ -17,18 +17,14 @@ import torch import time import argparse -import numpy as np from ipex_llm.transformers import AutoModelForCausalLM from transformers import AutoTokenizer -# you could tune the prompt based on your own model, -# here the prompt tuning refers to https://hf-mirror.com/THUDM/glm-edge-1.5b-chat - if __name__ == '__main__': parser = argparse.ArgumentParser(description='Predict Tokens using `generate()` API for GLM-Edge model') - parser.add_argument('--repo-id-or-model-path', type=str, default="THUDM/glm-edge-1.5b-chat", + parser.add_argument('--repo-id-or-model-path', type=str, default="THUDM/glm-edge-4b-chat", help='The huggingface repo id for the GLM-Edge model to be downloaded' ', or the path to the huggingface checkpoint folder') parser.add_argument('--prompt', type=str, default="AI是什么?", @@ -44,11 +40,11 @@ # When running LLMs on Intel iGPUs for Windows users, we recommend setting `cpu_embedding=True` in the from_pretrained function. # This will allow the memory-intensive embedding layer to utilize the CPU instead of iGPU. model = AutoModelForCausalLM.from_pretrained(model_path, - load_in_4bit=True, - optimize_model=True, - trust_remote_code=True, - use_cache=True) - model = model.to("xpu") + load_in_4bit=True, + optimize_model=True, + trust_remote_code=True, + use_cache=True) + model = model.half().to("xpu") # Load tokenizer tokenizer = AutoTokenizer.from_pretrained(model_path, @@ -56,6 +52,7 @@ # Generate predicted tokens with torch.inference_mode(): + # The following code for generation is adapted from https://huggingface.co/THUDM/glm-edge-1.5b-chat#inference message = [{"role": "user", "content": args.prompt}] inputs = tokenizer.apply_chat_template( @@ -63,7 +60,7 @@ return_tensors="pt", add_generation_prompt=True, return_dict=True, - ).to(model.device) + ).to("xpu") generate_kwargs = { "input_ids": inputs["input_ids"], @@ -76,12 +73,11 @@ output = model.generate(**generate_kwargs) st = time.time() - output = model.generate(**generate_kwargs) - torch.xpu.synchronize() end = time.time() - output_str = tokenizer.decode(output[0], skip_special_tokens=True) + + output_str = tokenizer.decode(output[0][inputs.input_ids.shape[1]:], skip_special_tokens=True) print(f'Inference time: {end-st} s') print('-'*20, 'Prompt', '-'*20) print(args.prompt)