docker gpu arm update
This commit is contained in:
@@ -92,8 +92,16 @@ RUN mkdir -p build
|
|||||||
WORKDIR /build/Paddle/build
|
WORKDIR /build/Paddle/build
|
||||||
|
|
||||||
# Configure CMake for ARM64 + CUDA build
|
# Configure CMake for ARM64 + CUDA build
|
||||||
# Note: Adjust CUDA_ARCH_NAME based on your GPU architecture
|
#
|
||||||
# Common values: Auto, Ampere, Ada, Hopper
|
# CUDA_ARCH is auto-detected from host GPU and passed via docker-compose.
|
||||||
|
# To detect: nvidia-smi --query-gpu=compute_cap --format=csv,noheader
|
||||||
|
# Example: 12.1 -> use "90" (Hopper, closest supported), 9.0 -> use "90"
|
||||||
|
#
|
||||||
|
# Build time: ~30-60 min with single arch vs 2-4 hours with all archs
|
||||||
|
|
||||||
|
ARG CUDA_ARCH=90
|
||||||
|
RUN echo "Building for CUDA architecture: sm_${CUDA_ARCH}"
|
||||||
|
|
||||||
RUN cmake .. \
|
RUN cmake .. \
|
||||||
-GNinja \
|
-GNinja \
|
||||||
-DCMAKE_BUILD_TYPE=Release \
|
-DCMAKE_BUILD_TYPE=Release \
|
||||||
@@ -107,7 +115,9 @@ RUN cmake .. \
|
|||||||
-DON_INFER=OFF \
|
-DON_INFER=OFF \
|
||||||
-DWITH_PYTHON=ON \
|
-DWITH_PYTHON=ON \
|
||||||
-DWITH_AVX=OFF \
|
-DWITH_AVX=OFF \
|
||||||
-DCUDA_ARCH_NAME=Auto \
|
-DCUDA_ARCH_NAME=Manual \
|
||||||
|
-DCUDA_ARCH_BIN="${CUDA_ARCH}" \
|
||||||
|
-DCMAKE_CUDA_ARCHITECTURES="${CUDA_ARCH}" \
|
||||||
-DCMAKE_EXPORT_COMPILE_COMMANDS=ON
|
-DCMAKE_EXPORT_COMPILE_COMMANDS=ON
|
||||||
|
|
||||||
# Build PaddlePaddle (this takes 2-4 hours)
|
# Build PaddlePaddle (this takes 2-4 hours)
|
||||||
|
|||||||
@@ -3,16 +3,22 @@
|
|||||||
# CPU: docker compose up ocr-cpu
|
# CPU: docker compose up ocr-cpu
|
||||||
# GPU: docker compose up ocr-gpu
|
# GPU: docker compose up ocr-gpu
|
||||||
# Test: docker compose run --rm test
|
# Test: docker compose run --rm test
|
||||||
# Build: docker compose run --rm build-paddle (ARM64 GPU wheel, one-time)
|
# Build: CUDA_ARCH=90 docker compose --profile build run --rm build-paddle
|
||||||
|
#
|
||||||
|
# Auto-detect CUDA arch before building:
|
||||||
|
# export CUDA_ARCH=$(nvidia-smi --query-gpu=compute_cap --format=csv,noheader | head -1 | tr -d '.')
|
||||||
|
# docker compose --profile build run --rm build-paddle
|
||||||
|
|
||||||
services:
|
services:
|
||||||
# PaddlePaddle GPU wheel builder (ARM64 only, one-time build)
|
# PaddlePaddle GPU wheel builder (ARM64 only, one-time build)
|
||||||
# Creates ./wheels/paddlepaddle_gpu-*.whl for ARM64 GPU support
|
# Creates ./wheels/paddlepaddle_gpu-*.whl for ARM64 GPU support
|
||||||
# Run once: docker compose run --rm build-paddle
|
# CUDA_ARCH env var controls target GPU architecture (default: 90 for Hopper)
|
||||||
build-paddle:
|
build-paddle:
|
||||||
build:
|
build:
|
||||||
context: .
|
context: .
|
||||||
dockerfile: Dockerfile.build-paddle
|
dockerfile: Dockerfile.build-paddle
|
||||||
|
args:
|
||||||
|
CUDA_ARCH: ${CUDA_ARCH:-90}
|
||||||
volumes:
|
volumes:
|
||||||
- ./wheels:/wheels
|
- ./wheels:/wheels
|
||||||
profiles:
|
profiles:
|
||||||
|
|||||||
Reference in New Issue
Block a user