diff --git a/python/llm/example/GPU/LLM-Finetuning/LoRA/chatglm_finetune/README.md b/python/llm/example/GPU/LLM-Finetuning/LoRA/chatglm_finetune/README.md index d6297bda5f2..39b6e9f3902 100644 --- a/python/llm/example/GPU/LLM-Finetuning/LoRA/chatglm_finetune/README.md +++ b/python/llm/example/GPU/LLM-Finetuning/LoRA/chatglm_finetune/README.md @@ -41,7 +41,6 @@ Then, './AdvertiseGen' will be converted to './AdvertiseGen_fix'. Now, we have p Start the fine-tuning by: - ```bash bash lora_finetuning_on_chatglm3_6b_with_1_arc_card.sh ``` diff --git a/python/llm/example/GPU/LLM-Finetuning/LoRA/chatglm_finetune/lora_finetune_chatglm.py b/python/llm/example/GPU/LLM-Finetuning/LoRA/chatglm_finetune/lora_finetune_chatglm.py index 2e99b8e4406..7e080a35629 100644 --- a/python/llm/example/GPU/LLM-Finetuning/LoRA/chatglm_finetune/lora_finetune_chatglm.py +++ b/python/llm/example/GPU/LLM-Finetuning/LoRA/chatglm_finetune/lora_finetune_chatglm.py @@ -13,10 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Some parts of this file is adapted from -# https://github.com/tloen/alpaca-lora/blob/main/finetune.py -# -# Copyright 2023 Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -30,7 +26,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # This example is ported from https://github.com/THUDM/ChatGLM3/blob/main/finetune_demo/finetune_hf.py - +# L415-L427 are modified to enable the example on Intel Arc import os import jieba @@ -413,6 +409,8 @@ def load_tokenizer_and_model( config=config, ) if peft_config.peft_type.name == "LORA": + # Modified below to adapt to ipex-llm, which is different from + # https://github.com/THUDM/ChatGLM3/blob/main/finetune_demo/finetune_hf.py#L400-L406 from ipex_llm.transformers import AutoModelForCausalLM from ipex_llm.transformers.qlora import get_peft_model import os diff --git a/python/llm/example/GPU/LLM-Finetuning/LoRA/chatglm_finetune/process_advertise_gen_dataset.py b/python/llm/example/GPU/LLM-Finetuning/LoRA/chatglm_finetune/process_advertise_gen_dataset.py index 18f1040ef01..097548b2a88 100644 --- a/python/llm/example/GPU/LLM-Finetuning/LoRA/chatglm_finetune/process_advertise_gen_dataset.py +++ b/python/llm/example/GPU/LLM-Finetuning/LoRA/chatglm_finetune/process_advertise_gen_dataset.py @@ -14,7 +14,7 @@ # limitations under the License. # # This is ported from https://github.com/THUDM/ChatGLM3/blob/main/finetune_demo/lora_finetune.ipynb - +# L60 is changed to enable users to finish all operations under one working directory import json from typing import Union diff --git a/python/llm/example/GPU/LLM-Finetuning/README.md b/python/llm/example/GPU/LLM-Finetuning/README.md index e1760a0c07b..cbd4d2695c2 100644 --- a/python/llm/example/GPU/LLM-Finetuning/README.md +++ b/python/llm/example/GPU/LLM-Finetuning/README.md @@ -17,7 +17,7 @@ This folder contains examples of running different training mode with IPEX-LLM o |------------|-----------------------------------------------------------------|-----------------------------------------------------------------| | LLaMA 2/3 | [LoRA](LoRA), [QLoRA](QLoRA), [QA-LoRA](QA-LoRA), [ReLora](ReLora) | [HF-PEFT](HF-PEFT), [axolotl](axolotl) | | Mistral | [LoRA](DPO), [QLoRA](DPO) | [DPO](DPO) | -| ChatGLM 3 | [QLoRA](QLoRA/alpaca-qlora#3-qlora-finetune) | HF-PEFT | +| ChatGLM 3 | [LoRA](LoRA/chatglm_finetune#lora-fine-tuning-on-chatglm3-6b-with-ipex-llm), [QLoRA](QLoRA/alpaca-qlora#3-qlora-finetune) | HF-PEFT | | Qwen-1.5 | [QLoRA](QLoRA/alpaca-qlora#3-qlora-finetune) | HF-PEFT | | Baichuan2 | [QLoRA](QLoRA/alpaca-qlora#3-qlora-finetune) | HF-PEFT |