Skip to content

Commit

Permalink
Add Modelscope option for GPU model chatglm3
Browse files Browse the repository at this point in the history
  • Loading branch information
ATMxsp01 committed Dec 13, 2024
1 parent 41ef497 commit 67701d0
Show file tree
Hide file tree
Showing 3 changed files with 36 additions and 8 deletions.
5 changes: 5 additions & 0 deletions python/llm/example/GPU/HuggingFace/LLM/chatglm3/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,9 @@ conda activate llm

# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/

# [optional] if you use modelscope as model hub, please make sure you are using 1.11.0 version
pip install modelscope==1.11.0
```

## 2. Configures OneAPI environment variables for Linux
Expand Down Expand Up @@ -101,6 +104,7 @@ Arguments info:
- `--repo-id-or-model-path REPO_ID_OR_MODEL_PATH`: argument defining the huggingface repo id for the ChatGLM3 model to be downloaded, or the path to the huggingface checkpoint folder. It is default to be `'THUDM/chatglm3-6b'`.
- `--prompt PROMPT`: argument defining the prompt to be infered (with integrated prompt format for chat). It is default to be `'AI是什么?'`.
- `--n-predict N_PREDICT`: argument defining the max number of tokens to predict. It is default to be `32`.
- `--modelscope`: using **Modelscope** as model hub instead of **huggingface**.

#### Sample Output
#### [THUDM/chatglm3-6b](https://huggingface.co/THUDM/chatglm3-6b)
Expand Down Expand Up @@ -146,3 +150,4 @@ Arguments info:
- `--repo-id-or-model-path REPO_ID_OR_MODEL_PATH`: argument defining the huggingface repo id for the ChatGLM3 model to be downloaded, or the path to the huggingface checkpoint folder. It is default to be `'THUDM/chatglm3-6b'`.
- `--question QUESTION`: argument defining the question to ask. It is default to be `"晚上睡不着应该怎么办"`.
- `--disable-stream`: argument defining whether to stream chat. If include `--disable-stream` when running the script, the stream chat is disabled and `chat()` API is used.
- `--modelscope`: using **Modelscope** as model hub instead of **huggingface**.
19 changes: 15 additions & 4 deletions python/llm/example/GPU/HuggingFace/LLM/chatglm3/generate.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,24 +20,34 @@
import numpy as np

from ipex_llm.transformers import AutoModel
from transformers import AutoTokenizer

# you could tune the prompt based on your own model,
# here the prompt tuning refers to https://github.com/THUDM/ChatGLM3/blob/main/PROMPT.md
CHATGLM_V3_PROMPT_FORMAT = "<|user|>\n{prompt}\n<|assistant|>"

if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Predict Tokens using `generate()` API for ChatGLM3 model')
parser.add_argument('--repo-id-or-model-path', type=str, default="THUDM/chatglm3-6b",
parser.add_argument('--repo-id-or-model-path', type=str,
help='The huggingface repo id for the ChatGLM3 model to be downloaded'
', or the path to the huggingface checkpoint folder')
parser.add_argument('--prompt', type=str, default="AI是什么?",
help='Prompt to infer')
parser.add_argument('--n-predict', type=int, default=32,
help='Max tokens to predict')
parser.add_argument('--modelscope', action="store_true", default=False,
help="Use models from modelscope")

args = parser.parse_args()
model_path = args.repo_id_or_model_path

if args.modelscope:
from modelscope import AutoTokenizer
model_hub = 'modelscope'
else:
from transformers import AutoTokenizer
model_hub = 'huggingface'

model_path = args.repo_id_or_model_path if args.repo_id_or_model_path else \
("ZhipuAI/chatglm3-6b" if args.modelscope else "THUDM/chatglm3-6b")

# Load model in 4 bit,
# which convert the relevant layers in the model into INT4 format
Expand All @@ -47,7 +57,8 @@
load_in_4bit=True,
optimize_model=True,
trust_remote_code=True,
use_cache=True)
use_cache=True,
model_hub=model_hub)
model = model.half().to('xpu')

# Load tokenizer
Expand Down
20 changes: 16 additions & 4 deletions python/llm/example/GPU/HuggingFace/LLM/chatglm3/streamchat.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,21 +20,32 @@
import numpy as np

from ipex_llm.transformers import AutoModel
from transformers import AutoTokenizer


if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Stream Chat for ChatGLM3 model')
parser.add_argument('--repo-id-or-model-path', type=str, default="THUDM/chatglm3-6b",
parser.add_argument('--repo-id-or-model-path', type=str,
help='The huggingface repo id for the ChatGLM3 model to be downloaded'
', or the path to the huggingface checkpoint folder')
parser.add_argument('--question', type=str, default="晚上睡不着应该怎么办",
help='Qustion you want to ask')
parser.add_argument('--disable-stream', action="store_true",
help='Disable stream chat')
parser.add_argument('--modelscope', action="store_true", default=False,
help="Use models from modelscope")

args = parser.parse_args()
model_path = args.repo_id_or_model_path

if args.modelscope:
from modelscope import AutoTokenizer
model_hub = 'modelscope'
else:
from transformers import AutoTokenizer
model_hub = 'huggingface'

model_path = args.repo_id_or_model_path if args.repo_id_or_model_path else \
("ZhipuAI/chatglm3-6b" if args.modelscope else "THUDM/chatglm3-6b")

disable_stream = args.disable_stream

# Load model in 4 bit,
Expand All @@ -44,7 +55,8 @@
model = AutoModel.from_pretrained(model_path,
load_in_4bit=True,
trust_remote_code=True,
optimize_model=True)
optimize_model=True,
model_hub=model_hub)
model.to('xpu')

# Load tokenizer
Expand Down

0 comments on commit 67701d0

Please sign in to comment.