From 9bb7422d52782fd45abaa319a80b6b3a3f8f500e Mon Sep 17 00:00:00 2001 From: binbin Deng <108676127+plusbang@users.noreply.github.com> Date: Tue, 2 Jan 2024 14:32:50 +0800 Subject: [PATCH] LLM: fix installation of codellama (#9813) --- .../CPU/HF-Transformers-AutoModels/Model/codellama/README.md | 1 + python/llm/example/CPU/PyTorch-Models/Model/codellama/README.md | 1 + .../GPU/HF-Transformers-AutoModels/Model/codellama/readme.md | 1 + python/llm/example/GPU/PyTorch-Models/Model/codellama/README.md | 1 + 4 files changed, 4 insertions(+) diff --git a/python/llm/example/CPU/HF-Transformers-AutoModels/Model/codellama/README.md b/python/llm/example/CPU/HF-Transformers-AutoModels/Model/codellama/README.md index 69eb1f3fbb0..e22b20acd28 100644 --- a/python/llm/example/CPU/HF-Transformers-AutoModels/Model/codellama/README.md +++ b/python/llm/example/CPU/HF-Transformers-AutoModels/Model/codellama/README.md @@ -13,6 +13,7 @@ conda create -n llm python=3.9 conda activate llm pip install bigdl-llm[all] # install bigdl-llm with 'all' option +pip install transformers==4.34.1 # CodeLlamaTokenizer is supported in higher version of transformers ``` ### 2. Run diff --git a/python/llm/example/CPU/PyTorch-Models/Model/codellama/README.md b/python/llm/example/CPU/PyTorch-Models/Model/codellama/README.md index 8a788ebc96d..a776a09a822 100644 --- a/python/llm/example/CPU/PyTorch-Models/Model/codellama/README.md +++ b/python/llm/example/CPU/PyTorch-Models/Model/codellama/README.md @@ -15,6 +15,7 @@ conda create -n llm python=3.9 # recommend to use Python 3.9 conda activate llm pip install --pre --upgrade bigdl-llm[all] # install the latest bigdl-llm nightly build with 'all' option +pip install transformers==4.34.1 # CodeLlamaTokenizer is supported in higher version of transformers ``` ### 2. Run diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/codellama/readme.md b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/codellama/readme.md index c9821a81077..84ff00406f4 100644 --- a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/codellama/readme.md +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/codellama/readme.md @@ -14,6 +14,7 @@ conda activate llm # below command will install intel_extension_for_pytorch==2.0.110+xpu as default # you can install specific ipex/torch version for your need pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu +pip install transformers==4.34.1 # CodeLlamaTokenizer is supported in higher version of transformers ``` ### 2. Configures OneAPI environment variables diff --git a/python/llm/example/GPU/PyTorch-Models/Model/codellama/README.md b/python/llm/example/GPU/PyTorch-Models/Model/codellama/README.md index 0b571551e9d..1f83a680d97 100644 --- a/python/llm/example/GPU/PyTorch-Models/Model/codellama/README.md +++ b/python/llm/example/GPU/PyTorch-Models/Model/codellama/README.md @@ -17,6 +17,7 @@ conda activate llm # below command will install intel_extension_for_pytorch==2.0.110+xpu as default # you can install specific ipex/torch version for your need pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu +pip install transformers==4.34.1 # CodeLlamaTokenizer is supported in higher version of transformers ``` ### 2. Configures OneAPI environment variables