# docker-compose.tuning.paddle.yml - Ray Tune with PaddleOCR GPU # Usage: # docker compose -f docker-compose.tuning.paddle.yml up -d paddle-ocr-gpu # docker compose -f docker-compose.tuning.paddle.yml run raytune --service paddle --samples 64 # docker compose -f docker-compose.tuning.paddle.yml down services: raytune: image: seryus.ddns.net/unir/raytune:latest command: ["--service", "paddle", "--host", "paddle-ocr-gpu", "--port", "8000", "--samples", "64"] volumes: - ./results:/app/results:rw environment: - PYTHONUNBUFFERED=1 depends_on: paddle-ocr-gpu: condition: service_healthy paddle-ocr-gpu: image: seryus.ddns.net/unir/paddle-ocr-gpu:latest container_name: paddle-ocr-gpu-tuning ports: - "8002:8000" volumes: - ./dataset:/app/dataset:ro - ./debugset:/app/debugset:rw - paddlex-cache:/root/.paddlex environment: - PYTHONUNBUFFERED=1 - CUDA_VISIBLE_DEVICES=0 - PADDLE_DET_MODEL=PP-OCRv5_mobile_det - PADDLE_REC_MODEL=PP-OCRv5_mobile_rec deploy: resources: reservations: devices: - driver: nvidia count: 1 capabilities: [gpu] restart: unless-stopped healthcheck: test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:8000/health')"] interval: 30s timeout: 10s retries: 3 start_period: 60s volumes: paddlex-cache: name: paddlex-model-cache