Skip to content

Commit

Permalink
Start an Asahi version
Browse files Browse the repository at this point in the history
Asahi has a forked version of mesa while it upstream.

Signed-off-by: Eric Curtin <[email protected]>
  • Loading branch information
ericcurtin committed Nov 6, 2024
1 parent 0048ee3 commit c5e4d33
Show file tree
Hide file tree
Showing 6 changed files with 49 additions and 14 deletions.
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,7 @@ curl -fsSL https://raw.githubusercontent.com/containers/ramalama/s/install.sh |
| Hardware | Enabled |
| ---------------------------------- | ------- |
| CPU | :white_check_mark: |
| Apple Silicon GPU (Linux / Asahi) | :white_check_mark: |
| Apple Silicon GPU (macOS) | :white_check_mark: |
| Apple Silicon GPU (podman-machine) | :x: |
| Nvidia GPU (cuda) | :x: [Containerfile](https://github.com/containers/ramalama/blob/main/container-images/cuda/Containerfile) available but not published to quay.io |
Expand Down
29 changes: 29 additions & 0 deletions container-images/asahi/Containerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
FROM fedora:41

# renovate: datasource=github-releases depName=containers/omlmd extractVersion=^v(?<version>.*)
ARG OMLMD_VERSION=0.1.6
ARG LLAMA_CPP_SHA=1329c0a75e6a7defc5c380eaf80d8e0f66d7da78
# renovate: datasource=git-refs depName=ggerganov/whisper.cpp packageName=https://github.com/ggerganov/whisper.cpp gitRef=master versioning=loose type=digest
ARG WHISPER_CPP_SHA=0377596b77a3602e36430320cbe45f8c305ef04a

RUN dnf install -y dnf-plugins-core && \
dnf copr enable -y @asahi/fedora-remix-branding && \
dnf install -y asahi-repos && \
dnf install -y mesa-vulkan-drivers vulkan-headers vulkan-loader-devel \
vulkan-tools spirv-tools glslc glslang git procps-ng vim cmake gcc-c++ \
python3-pip python3-argcomplete clang && \
dnf clean all && \
rm -rf /var/cache/*dnf*

RUN /usr/bin/python3 --version
RUN pip install "omlmd==${OMLMD_VERSION}"

COPY ../scripts /scripts
RUN export CC=clang && \
export CXX=clang++ && \
chmod +x /scripts/*.sh && \
/scripts/build_llama_and_whisper.sh "$LLAMA_CPP_SHA" "$WHISPER_CPP_SHA" \
"/usr" "-DGGML_VULKAN=1"

ENV WHISPER_CPP_SHA=${WHISPER_CPP_SHA}
ENV LLAMA_CPP_SHA=${LLAMA_CPP_SHA}
5 changes: 1 addition & 4 deletions container-images/cuda/Containerfile
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,6 @@ RUN chmod +x /scripts/*.sh && \
# Final runtime image
FROM docker.io/nvidia/cuda:12.6.2-runtime-ubi9

# renovate: datasource=github-releases depName=huggingface/huggingface_hub extractVersion=^v(?<version>.*)
ARG HUGGINGFACE_HUB_VERSION=0.26.2
# renovate: datasource=github-releases depName=containers/omlmd extractVersion=^v(?<version>.*)
ARG OMLMD_VERSION=0.1.6

Expand All @@ -32,8 +30,7 @@ RUN dnf install -y python3 python3-pip && \
rm -rf /var/cache/*dnf*

# Install Python packages in the runtime image
RUN pip install "huggingface_hub==${HUGGINGFACE_HUB_VERSION}" \
"omlmd==${OMLMD_VERSION}"
RUN pip install "omlmd==${OMLMD_VERSION}"

# Copy the entire installation directory from the builder
COPY --from=builder /tmp/install /usr
Expand Down
6 changes: 0 additions & 6 deletions container-images/ramalama/Containerfile
Original file line number Diff line number Diff line change
@@ -1,11 +1,7 @@
FROM registry.access.redhat.com/ubi9/ubi:9.4-1214.1729773476

# renovate: datasource=github-releases depName=huggingface/huggingface_hub extractVersion=^v(?<version>.*)
ARG HUGGINGFACE_HUB_VERSION=0.26.2
# renovate: datasource=github-releases depName=containers/omlmd extractVersion=^v(?<version>.*)
ARG OMLMD_VERSION=0.1.6
# renovate: datasource=github-releases depName=tqdm/tqdm extractVersion=^v(?<version>.*)
ARG TQDM_VERSION=4.66.6
ARG LLAMA_CPP_SHA=1329c0a75e6a7defc5c380eaf80d8e0f66d7da78
# renovate: datasource=git-refs depName=ggerganov/whisper.cpp packageName=https://github.com/ggerganov/whisper.cpp gitRef=master versioning=loose type=digest
ARG WHISPER_CPP_SHA=0377596b77a3602e36430320cbe45f8c305ef04a
Expand All @@ -24,9 +20,7 @@ RUN dnf install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.n
rm -rf /var/cache/*dnf*

RUN /usr/bin/python3 --version
RUN pip install "huggingface_hub==${HUGGINGFACE_HUB_VERSION}"
RUN pip install "omlmd==${OMLMD_VERSION}"
RUN pip install "tqdm==${TQDM_VERSION}"

RUN dnf config-manager --add-repo \
https://mirror.stream.centos.org/9-stream/AppStream/$(uname -m)/os/
Expand Down
20 changes: 16 additions & 4 deletions ramalama/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,10 +103,16 @@ def model_path(self, args):
raise NotImplementedError(f"model_path for {self.type} not implemented")

def _image(self, args):
if args.image != default_image():
return args.image

gpu_type, _ = get_gpu()
if gpu_type == "HIP_VISIBLE_DEVICES":
if args.image == default_image():
return "quay.io/ramalama/rocm:latest"
return "quay.io/ramalama/rocm:latest"

if gpu_type == "ASAHI_VISIBLE_DEVICES":
return "quay.io/ramalama/asahi:latest"

return args.image

def setup_container(self, args):
Expand Down Expand Up @@ -147,7 +153,7 @@ def setup_container(self, args):
conman_args += ["--device", "/dev/kfd"]

gpu_type, gpu_num = get_gpu()
if gpu_type == "HIP_VISIBLE_DEVICES":
if gpu_type == "HIP_VISIBLE_DEVICES" or gpu_type == "ASAHI_VISIBLE_DEVICES":
conman_args += ["-e", f"{gpu_type}={gpu_num}"]
return conman_args

Expand Down Expand Up @@ -198,7 +204,7 @@ def gpu_args(self):
# any additional arguments.
pass
elif sys.platform == "linux" and (
os.path.exists("/dev/dri") or os.getenv("HIP_VISIBLE_DEVICES") or os.getenv("CUDA_VISIBLE_DEVICES")
os.getenv("HIP_VISIBLE_DEVICES") or os.getenv("ASAHI_VISIBLE_DEVICES") or os.getenv("CUDA_VISIBLE_DEVICES")
):
gpu_args = ["-ngl", "99"]
else:
Expand Down Expand Up @@ -406,6 +412,12 @@ def get_gpu():
if gpu_bytes: # this is the ROCm/AMD case
return "HIP_VISIBLE_DEVICES", gpu_num

if os.path.exists('/etc/os-release'):
with open('/etc/os-release', 'r') as file:
content = file.read()
if "asahi" in content.lower():
return "ASAHI_VISIBLE_DEVICES", 1

return None, None


Expand Down
2 changes: 2 additions & 0 deletions test/ci.sh
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,8 @@ main() {
# verify llama.cpp version matches
grep "$(grep "ARG LLAMA_CPP_SHA=" container-images/ramalama/Containerfile)" \
container-images/cuda/Containerfile
grep "$(grep "ARG LLAMA_CPP_SHA=" container-images/ramalama/Containerfile)" \
container-images/asahi/Containerfile

local os
os="$(uname -s)"
Expand Down

0 comments on commit c5e4d33

Please sign in to comment.