build(docker): upgrade to CUDA 12.8 and Debian 12

- Update base image from Debian 11 (bullseye) to Debian 12 (bookworm)
- Upgrade CUDA version from 12.4 to 12.8
- Update PyTorch from version 2.5.1 to 2.7.0 with CUDA 12.8 support
- Change triton dependency from exact version 3.1.0 to minimum version constraint

Signed-off-by: CHEN, CHUN <jim60105@gmail.com>
pull/3255/head
CHEN, CHUN 2025-05-27 00:00:16 +08:00
parent 342c79fe83
commit cff20de128
No known key found for this signature in database
GPG Key ID: A0C4B928E0DCA4CF
1 changed files with 12 additions and 12 deletions

View File

@ -6,7 +6,7 @@ ARG RELEASE=0
######################################## ########################################
# Base stage # Base stage
######################################## ########################################
FROM docker.io/library/python:3.11-slim-bullseye AS base FROM docker.io/library/python:3.11-slim-bookworm AS base
# RUN mount cache for multi-arch: https://github.com/docker/buildx/issues/549#issuecomment-1788297892 # RUN mount cache for multi-arch: https://github.com/docker/buildx/issues/549#issuecomment-1788297892
ARG TARGETARCH ARG TARGETARCH
@ -22,22 +22,22 @@ ENV NVIDIA_DRIVER_CAPABILITIES=compute,utility
# Installing the complete CUDA Toolkit system-wide usually adds around 8GB to the image size. # Installing the complete CUDA Toolkit system-wide usually adds around 8GB to the image size.
# Since most CUDA packages already installed through pip, there's no need to download the entire toolkit. # Since most CUDA packages already installed through pip, there's no need to download the entire toolkit.
# Therefore, we opt to install only the essential libraries. # Therefore, we opt to install only the essential libraries.
# Here is the package list for your reference: https://developer.download.nvidia.com/compute/cuda/repos/debian11/x86_64 # Here is the package list for your reference: https://developer.download.nvidia.com/compute/cuda/repos/debian12/x86_64
ADD https://developer.download.nvidia.com/compute/cuda/repos/debian11/x86_64/cuda-keyring_1.1-1_all.deb /tmp/cuda-keyring_x86_64.deb ADD https://developer.download.nvidia.com/compute/cuda/repos/debian12/x86_64/cuda-keyring_1.1-1_all.deb /tmp/cuda-keyring_x86_64.deb
RUN --mount=type=cache,id=apt-$TARGETARCH$TARGETVARIANT,sharing=locked,target=/var/cache/apt \ RUN --mount=type=cache,id=apt-$TARGETARCH$TARGETVARIANT,sharing=locked,target=/var/cache/apt \
--mount=type=cache,id=aptlists-$TARGETARCH$TARGETVARIANT,sharing=locked,target=/var/lib/apt/lists \ --mount=type=cache,id=aptlists-$TARGETARCH$TARGETVARIANT,sharing=locked,target=/var/lib/apt/lists \
dpkg -i cuda-keyring_x86_64.deb && \ dpkg -i cuda-keyring_x86_64.deb && \
rm -f cuda-keyring_x86_64.deb && \ rm -f cuda-keyring_x86_64.deb && \
apt-get update && \ apt-get update && \
apt-get install -y --no-install-recommends \ apt-get install -y --no-install-recommends \
# !If you experience any related issues, replace the following line with `cuda-12-4` to obtain the complete CUDA package. # !If you experience any related issues, replace the following line with `cuda-12-8` to obtain the complete CUDA package.
cuda-nvcc-12-4 cuda-nvcc-12-8
ENV PATH="/usr/local/cuda/bin${PATH:+:${PATH}}" ENV PATH="/usr/local/cuda/bin${PATH:+:${PATH}}"
ENV LD_LIBRARY_PATH=/usr/local/cuda/lib64 ENV LD_LIBRARY_PATH=/usr/local/cuda/lib64
ENV CUDA_VERSION=12.4 ENV CUDA_VERSION=12.8
ENV NVIDIA_REQUIRE_CUDA=cuda>=12.4 ENV NVIDIA_REQUIRE_CUDA=cuda>=12.8
ENV CUDA_HOME=/usr/local/cuda ENV CUDA_HOME=/usr/local/cuda
######################################## ########################################
@ -58,7 +58,7 @@ ENV UV_PROJECT_ENVIRONMENT=/venv
ENV VIRTUAL_ENV=/venv ENV VIRTUAL_ENV=/venv
ENV UV_LINK_MODE=copy ENV UV_LINK_MODE=copy
ENV UV_PYTHON_DOWNLOADS=0 ENV UV_PYTHON_DOWNLOADS=0
ENV UV_INDEX=https://download.pytorch.org/whl/cu124 ENV UV_INDEX=https://download.pytorch.org/whl/cu128
# Install build dependencies # Install build dependencies
RUN --mount=type=cache,id=apt-$TARGETARCH$TARGETVARIANT,sharing=locked,target=/var/cache/apt \ RUN --mount=type=cache,id=apt-$TARGETARCH$TARGETVARIANT,sharing=locked,target=/var/cache/apt \
@ -73,10 +73,10 @@ RUN --mount=type=cache,id=apt-$TARGETARCH$TARGETVARIANT,sharing=locked,target=/v
RUN --mount=type=cache,id=uv-$TARGETARCH$TARGETVARIANT,sharing=locked,target=/root/.cache/uv \ RUN --mount=type=cache,id=uv-$TARGETARCH$TARGETVARIANT,sharing=locked,target=/root/.cache/uv \
uv venv --system-site-packages /venv && \ uv venv --system-site-packages /venv && \
uv pip install --no-deps \ uv pip install --no-deps \
# torch (866.2MiB) # torch (1.0GiB)
torch==2.5.1+cu124 \ torch==2.7.0+cu128 \
# triton (199.8MiB) # triton (149.3MiB)
triton==3.1.0 \ triton>=3.1.0 \
# tensorflow (615.0MiB) # tensorflow (615.0MiB)
tensorflow>=2.16.1 \ tensorflow>=2.16.1 \
# onnxruntime-gpu (215.7MiB) # onnxruntime-gpu (215.7MiB)