From 712ffff0e6891618b1f28e421f7f0b7e046b1daa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Daniele=20Trifir=C3=B2?= Date: Thu, 13 Jun 2024 14:55:15 +0200 Subject: [PATCH] Dockerfile.ubi: use cuda-base as base for vllm-openai target this adds the cuda runtime in order to fix missing libcudart.so.12 on vLLM libraries --- Dockerfile.ubi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile.ubi b/Dockerfile.ubi index 294399be24c46..27271c83566e6 100644 --- a/Dockerfile.ubi +++ b/Dockerfile.ubi @@ -169,7 +169,7 @@ RUN --mount=type=cache,target=/root/.cache/ccache \ # We used base cuda image because pytorch installs its own cuda libraries. # However pynccl depends on cuda libraries so we had to switch to the runtime image # In the future it would be nice to get a container with pytorch and cuda without duplicating cuda -FROM python-install AS vllm-openai +FROM cuda-base AS vllm-openai WORKDIR /workspace