Skip to content

Commit

Permalink
Focus on building for centos:stream9
Browse files Browse the repository at this point in the history
No acceleration, the first accelerated platform will be macOS.

Signed-off-by: Eric Curtin <[email protected]>
  • Loading branch information
ericcurtin committed Jul 25, 2024
1 parent 706fe4c commit fa0171e
Showing 1 changed file with 7 additions and 16 deletions.
23 changes: 7 additions & 16 deletions container-images/ramalama/latest/Containerfile
Original file line number Diff line number Diff line change
@@ -1,29 +1,20 @@
FROM fedora:39
FROM centos:stream9

RUN mkdir -p /models
RUN dnf install -y git jq procps-ng vim clblast-devel vulkan-headers \
vulkan-loader-devel glslc glslang python3-pip dnf-plugins-core \
python3-dnf-plugin-versionlock cmake gcc-c++ libcurl-devel && \
if [ "$(uname -m)" = "aarch64" ]; then \
dnf copr enable -y slp/mesa-krunkit && \
dnf install -y mesa-libOpenCL-23.3.5-102.aarch64 \
mesa-vulkan-drivers-23.3.5-102.aarch64 && \
dnf versionlock mesa-libOpenCL-23.3.5-102.aarch64 \
mesa-vulkan-drivers-23.3.5-102.aarch64; \
fi && \
dnf install -y vulkan-tools && \
RUN dnf install -y git jq procps-ng vim vulkan-headers vulkan-loader-devel \
glslc glslang python3-pip dnf-plugins-core \
python3-dnf-plugin-versionlock cmake gcc-c++ libcurl-devel \
vulkan-tools && \
dnf clean all && \
rm -rf /var/cache/*dnf*

RUN pip install "huggingface_hub[cli]==0.24.2"

ENV LLAMA_CCACHE=0
ENV LLAMA_CURL=1
ENV LLAMA_VULKAN=1
ENV GGML_CCACHE=0

RUN git clone -b ramalama https://github.com/ericcurtin/llama.cpp.git && \
cd llama.cpp && \
cmake -B build -DLLAMA_CCACHE=0 -DLLAMA_VULKAN=1 -DLLAMA_CURL=1 && \
cmake -B build -DGGML_CCACHE=0 && \
cmake --build build --config Release -j $(nproc) && \
cd build/bin && \
for file in *; do \
Expand Down

0 comments on commit fa0171e

Please sign in to comment.