From 5f95054f979fca97f24195d2d7bd52887e3de4fc Mon Sep 17 00:00:00 2001 From: "Wang, Jian4" <61138589+hzjane@users.noreply.github.com> Date: Mon, 22 Apr 2024 10:03:19 +0800 Subject: [PATCH] =?UTF-8?q?LLM=EF=BC=9AAdd=20qwen=20moe=20example=20libs?= =?UTF-8?q?=20md=20(#10828)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../CPU/HF-Transformers-AutoModels/Model/qwen1.5/README.md | 4 ++++ python/llm/example/CPU/PyTorch-Models/Model/qwen1.5/README.md | 4 ++++ .../GPU/HF-Transformers-AutoModels/Model/qwen1.5/README.md | 4 ++++ python/llm/example/GPU/PyTorch-Models/Model/qwen1.5/README.md | 4 ++++ 4 files changed, 16 insertions(+) diff --git a/python/llm/example/CPU/HF-Transformers-AutoModels/Model/qwen1.5/README.md b/python/llm/example/CPU/HF-Transformers-AutoModels/Model/qwen1.5/README.md index e4043709085..66040d40e24 100644 --- a/python/llm/example/CPU/HF-Transformers-AutoModels/Model/qwen1.5/README.md +++ b/python/llm/example/CPU/HF-Transformers-AutoModels/Model/qwen1.5/README.md @@ -15,6 +15,10 @@ conda activate llm pip install --pre --upgrade ipex-llm[all] # install ipex-llm with 'all' option pip install transformers==4.37.0 # install the transformers which support Qwen2 + +# only for Qwen1.5-MoE-A2.7B +pip install transformers==4.40.0 +pip install trl==0.8.1 ``` ### 2. Run diff --git a/python/llm/example/CPU/PyTorch-Models/Model/qwen1.5/README.md b/python/llm/example/CPU/PyTorch-Models/Model/qwen1.5/README.md index 095ee0011de..9a2b41b9ba6 100644 --- a/python/llm/example/CPU/PyTorch-Models/Model/qwen1.5/README.md +++ b/python/llm/example/CPU/PyTorch-Models/Model/qwen1.5/README.md @@ -16,6 +16,10 @@ conda activate llm pip install --pre --upgrade ipex-llm[all] # install the latest ipex-llm nightly build with 'all' option pip install transformers==4.37.0 # install transformers which supports Qwen2 + +# only for Qwen1.5-MoE-A2.7B +pip install transformers==4.40.0 +pip install trl==0.8.1 ``` ### 2. Run diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/qwen1.5/README.md b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/qwen1.5/README.md index 3c3d6233528..e0fadcc12fb 100644 --- a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/qwen1.5/README.md +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/qwen1.5/README.md @@ -16,6 +16,10 @@ conda activate llm pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ pip install transformers==4.37.0 # install transformers which supports Qwen2 + +# only for Qwen1.5-MoE-A2.7B +pip install transformers==4.40.0 +pip install trl==0.8.1 ``` #### 1.2 Installation on Windows diff --git a/python/llm/example/GPU/PyTorch-Models/Model/qwen1.5/README.md b/python/llm/example/GPU/PyTorch-Models/Model/qwen1.5/README.md index e8ca82f20fe..110b1f319d1 100644 --- a/python/llm/example/GPU/PyTorch-Models/Model/qwen1.5/README.md +++ b/python/llm/example/GPU/PyTorch-Models/Model/qwen1.5/README.md @@ -30,6 +30,10 @@ pip install dpcpp-cpp-rt==2024.0.2 mkl-dpcpp==2024.0.0 onednn==2024.0.0 pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ pip install transformers==4.37.0 # install transformers which supports Qwen2 + +# only for Qwen1.5-MoE-A2.7B +pip install transformers==4.40.0 +pip install trl==0.8.1 ``` ### 2. Configures OneAPI environment variables for Linux