forked from domcyrus/lagoon-llama2
-
Notifications
You must be signed in to change notification settings - Fork 1
/
gpu.Dockerfile
37 lines (27 loc) · 1.11 KB
/
gpu.Dockerfile
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
ARG CUDA_IMAGE="12.1.1-devel-ubuntu22.04"
FROM nvidia/cuda:${CUDA_IMAGE}
ENV MODEL=WizardLM-13B-V1.2
# Install the package
RUN apt-get update && apt-get upgrade -y \
&& apt-get install -y git build-essential \
python3 python3-pip gcc wget \
ocl-icd-opencl-dev opencl-headers clinfo \
libclblast-dev libopenblas-dev \
&& mkdir -p /etc/OpenCL/vendors && echo "libnvidia-opencl.so.1" > /etc/OpenCL/vendors/nvidia.icd \
&& rm -rf /var/lib/apt/lists/*
ENV CUDA_DOCKER_ARCH=all
ENV LLAMA_CUBLAS=1
RUN python3 -m pip install --upgrade pip pytest cmake scikit-build setuptools fastapi uvicorn sse-starlette pydantic-settings requests
RUN CMAKE_ARGS="-DLLAMA_CUBLAS=on" FORCE_CMAKE=1 pip install llama-cpp-python
# Set a working directory for better clarity
WORKDIR /app
COPY ./start-llama2.sh /app/start-llama2.sh
COPY ./hug_model.py /app/hug_model.py
# Tell LLAMA_CPP that we want to offload layers to the GPU
ENV LLAMA_CPP_ARGS="--n_gpu_layers=43"
# Set environment variable for the host
ENV HOST=0.0.0.0
# Expose a port for the server
EXPOSE 8000
# Run the server start script
CMD ["/app/start-llama2.sh"]