From 2299698b4504b7c9b3c26f60be26ff93569ef646 Mon Sep 17 00:00:00 2001 From: Xiangyu Tian <109123695+xiangyuT@users.noreply.github.com> Date: Wed, 29 May 2024 17:16:50 +0800 Subject: [PATCH] Refine Pipeline Parallel FastAPI example (#11168) --- python/llm/example/GPU/Pipeline-Parallel-FastAPI/README.md | 2 ++ python/llm/example/GPU/Pipeline-Parallel-FastAPI/run.sh | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/python/llm/example/GPU/Pipeline-Parallel-FastAPI/README.md b/python/llm/example/GPU/Pipeline-Parallel-FastAPI/README.md index a3a8f109aa8..e4233e37ee1 100644 --- a/python/llm/example/GPU/Pipeline-Parallel-FastAPI/README.md +++ b/python/llm/example/GPU/Pipeline-Parallel-FastAPI/README.md @@ -20,6 +20,8 @@ pip install oneccl_bind_pt==2.1.100 --extra-index-url https://pytorch-extension. source /opt/intel/oneapi/setvars.sh pip install mpi4py fastapi uvicorn conda install -c conda-forge -y gperftools=2.10 # to enable tcmalloc + +pip install transformers==4.31.0 # for llama2 models ``` ### 2. Run pipeline parallel serving on multiple GPUs diff --git a/python/llm/example/GPU/Pipeline-Parallel-FastAPI/run.sh b/python/llm/example/GPU/Pipeline-Parallel-FastAPI/run.sh index 1e55c9d80ed..a15b6c51ff5 100644 --- a/python/llm/example/GPU/Pipeline-Parallel-FastAPI/run.sh +++ b/python/llm/example/GPU/Pipeline-Parallel-FastAPI/run.sh @@ -8,4 +8,5 @@ export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=2 export TORCH_LLM_ALLREDUCE=0 export MODEL_PATH=YOUR_MODEL_PATH -CCL_ZE_IPC_EXCHANGE=sockets torchrun --standalone --nnodes=1 --nproc-per-node 2 pipeline_serving.py --repo-id-or-model-path $MODEL_PATH --low-bit fp8 +export NUM_GPUS=2 +CCL_ZE_IPC_EXCHANGE=sockets torchrun --standalone --nnodes=1 --nproc-per-node $NUM_GPUS pipeline_serving.py --repo-id-or-model-path $MODEL_PATH --low-bit fp8