-
Notifications
You must be signed in to change notification settings - Fork 10k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge branch 'master' of github.com:ggerganov/llama.cpp into mfalcon_…
…mamba_cuda
- Loading branch information
Showing
5 changed files
with
45 additions
and
54 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,35 +1,37 @@ | ||
ARG UBUNTU_VERSION=22.04 | ||
# This needs to generally match the container host's environment. | ||
ARG CUDA_VERSION=11.7.1 | ||
ARG CUDA_VERSION=12.6.0 | ||
# Target the CUDA build image | ||
ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION} | ||
# Target the CUDA runtime image | ||
ARG BASE_CUDA_RUN_CONTAINER=nvidia/cuda:${CUDA_VERSION}-runtime-ubuntu${UBUNTU_VERSION} | ||
|
||
FROM ${BASE_CUDA_DEV_CONTAINER} AS build | ||
|
||
# Unless otherwise specified, we make a fat build. | ||
ARG CUDA_DOCKER_ARCH=all | ||
# CUDA architecture to build for (defaults to all supported archs) | ||
ARG CUDA_DOCKER_ARCH=default | ||
|
||
RUN apt-get update && \ | ||
apt-get install -y build-essential git | ||
apt-get install -y build-essential git cmake | ||
|
||
WORKDIR /app | ||
|
||
COPY . . | ||
|
||
# Set nvcc architecture | ||
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH} | ||
# Enable CUDA | ||
ENV GGML_CUDA=1 | ||
|
||
RUN make -j$(nproc) llama-cli | ||
# Use the default CUDA archs if not specified | ||
RUN if [ "${CUDA_DOCKER_ARCH}" != "default" ]; then \ | ||
export CMAKE_ARGS="-DCMAKE_CUDA_ARCHITECTURES=${CUDA_DOCKER_ARCH}"; \ | ||
fi && \ | ||
cmake -B build -DGGML_CUDA=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \ | ||
cmake --build build --config Release --target llama-cli -j$(nproc) | ||
|
||
FROM ${BASE_CUDA_RUN_CONTAINER} AS runtime | ||
|
||
RUN apt-get update && \ | ||
apt-get install -y libgomp1 | ||
|
||
COPY --from=build /app/llama-cli /llama-cli | ||
COPY --from=build /app/build/ggml/src/libggml.so /libggml.so | ||
COPY --from=build /app/build/src/libllama.so /libllama.so | ||
COPY --from=build /app/build/bin/llama-cli /llama-cli | ||
|
||
ENTRYPOINT [ "/llama-cli" ] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters