diff --git a/python/llm/example/GPU/Pipeline-Parallel-Inference/README.md b/python/llm/example/GPU/Pipeline-Parallel-Inference/README.md index 3fac4d19d77..2005fe0f5d7 100644 --- a/python/llm/example/GPU/Pipeline-Parallel-Inference/README.md +++ b/python/llm/example/GPU/Pipeline-Parallel-Inference/README.md @@ -9,7 +9,8 @@ To run this example with IPEX-LLM on Intel GPUs, we have some recommended requir - [meta-llama/Llama-2-7b-chat-hf](./run_llama_arc_2_card.sh) - [meta-llama/Llama-2-13b-chat-hf](./run_llama_arc_2_card.sh) - [meta-llama/Meta-Llama-3-8B-Instruct](./run_llama_arc_2_card.sh) -- [Qwen/Qwen1.5-14B-Chat](./run_qwen1.5_14b_arc_2_card.sh) +- [Qwen/Qwen1.5-7B-Chat](./run_qwen1.5_arc_2_card.sh) +- [Qwen/Qwen1.5-14B-Chat](./run_qwen1.5_arc_2_card.sh) ## Example: Run pipeline parallel inference on multiple GPUs @@ -49,15 +50,15 @@ bash run_llama_arc_2_card.sh
- Show Qwen1.5-14B example + Show Qwen1.5 example -#### Run Qwen1.5-14B-Chat on two Intel Arc A770 +#### Run Qwen1.5-7B-Chat / Qwen1.5-14B-Chat on two Intel Arc A770 You could specify `--repo-id-or-model-path` in the test script to be the huggingface repo id for Qwen1.5 to be downloaded, or the path to the huggingface checkpoint folder. Besides, you could change `NUM_GPUS` to the number of GPUs you have on your machine. ```bash pip install transformers==4.37.0 -bash run_qwen1.5_14b_arc_2_card.sh +bash run_qwen1.5_arc_2_card.sh ```
diff --git a/python/llm/example/GPU/Pipeline-Parallel-Inference/run_qwen1.5_14b_arc_2_card.sh b/python/llm/example/GPU/Pipeline-Parallel-Inference/run_qwen1.5_arc_2_card.sh similarity index 76% rename from python/llm/example/GPU/Pipeline-Parallel-Inference/run_qwen1.5_14b_arc_2_card.sh rename to python/llm/example/GPU/Pipeline-Parallel-Inference/run_qwen1.5_arc_2_card.sh index 0e450d44cde..f3b49bbffc1 100644 --- a/python/llm/example/GPU/Pipeline-Parallel-Inference/run_qwen1.5_14b_arc_2_card.sh +++ b/python/llm/example/GPU/Pipeline-Parallel-Inference/run_qwen1.5_arc_2_card.sh @@ -26,5 +26,11 @@ fi export TORCH_LLM_ALLREDUCE=0 NUM_GPUS=2 # number of used GPU + +# To run Qwen1.5-7B-Chat CCL_ZE_IPC_EXCHANGE=sockets torchrun --standalone --nnodes=1 --nproc-per-node $NUM_GPUS \ - generate.py --repo-id-or-model-path 'Qwen/Qwen1.5-14B-Chat' --gpu-num $NUM_GPUS + generate.py --repo-id-or-model-path 'Qwen/Qwen1.5-7B-Chat' --gpu-num $NUM_GPUS + +# # To run Qwen1.5-14B-Chat +# CCL_ZE_IPC_EXCHANGE=sockets torchrun --standalone --nnodes=1 --nproc-per-node $NUM_GPUS \ +# generate.py --repo-id-or-model-path 'Qwen/Qwen1.5-14B-Chat' --gpu-num $NUM_GPUS