mirror of
https://gitee.com/paddlepaddle/PaddleOCR.git
synced 2025-12-06 11:30:08 +08:00
Fix compose file for xpu and dockerfile for dcu (#17135)
* Fix compose file for xpu * Add previleged * Fix DCU dockerfile * Add --device * Fix dcu docker compose
This commit is contained in:
@@ -22,7 +22,7 @@ services:
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- VLM_BACKEND=${VLM_BACKEND:-vllm}
|
||||
command: /bin/bash -c "paddlex --serve --pipeline /home/paddleocr/pipeline_config_${VLM_BACKEND}.yaml"
|
||||
command: /bin/bash -c "source ~/.bashrc && paddlex --serve --pipeline /home/paddleocr/pipeline_config_${VLM_BACKEND}.yaml --device dcu"
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "curl -f http://localhost:8080/health || exit 1"]
|
||||
volumes:
|
||||
|
||||
@@ -11,7 +11,7 @@ services:
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- VLM_BACKEND=${VLM_BACKEND:-vllm}
|
||||
command: /bin/bash -c "paddlex --serve --pipeline /home/paddleocr/pipeline_config_${VLM_BACKEND}.yaml"
|
||||
command: /bin/bash -c "paddlex --serve --pipeline /home/paddleocr/pipeline_config_${VLM_BACKEND}.yaml --device npu"
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "curl -f http://localhost:8080/health || exit 1"]
|
||||
volumes:
|
||||
|
||||
@@ -7,35 +7,23 @@ services:
|
||||
depends_on:
|
||||
paddleocr-vlm-server:
|
||||
condition: service_healthy
|
||||
deploy:
|
||||
resources:
|
||||
reservations:
|
||||
devices:
|
||||
- driver: nvidia
|
||||
device_ids: ["0"]
|
||||
capabilities: [gpu]
|
||||
user: root
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- VLM_BACKEND=${VLM_BACKEND:-vllm}
|
||||
command: /bin/bash -c "paddlex --serve --pipeline /home/paddleocr/pipeline_config_${VLM_BACKEND}.yaml"
|
||||
command: /bin/bash -c "paddlex --serve --pipeline /home/paddleocr/pipeline_config_${VLM_BACKEND}.yaml --device xpu"
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "curl -f http://localhost:8080/health || exit 1"]
|
||||
privileged: true
|
||||
shm_size: 64G
|
||||
|
||||
paddleocr-vlm-server:
|
||||
image: ccr-2vdh3abv-pub.cnc.bj.baidubce.com/paddlepaddle/paddleocr-genai-${VLM_BACKEND}-server:${VLM_IMAGE_TAG_SUFFIX}
|
||||
container_name: paddleocr-vlm-server
|
||||
deploy:
|
||||
resources:
|
||||
reservations:
|
||||
devices:
|
||||
- driver: nvidia
|
||||
device_ids: ["0"]
|
||||
capabilities: [gpu]
|
||||
user: root
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "curl -f http://localhost:8080/health || exit 1"]
|
||||
start_period: 300s
|
||||
privileged: true
|
||||
shm_size: 64G
|
||||
|
||||
@@ -41,6 +41,10 @@ WORKDIR /home/paddleocr
|
||||
|
||||
USER paddleocr
|
||||
|
||||
RUN if [ "${DEVICE_TYPE}" = 'dcu' ]; then \
|
||||
echo 'source /opt/dtk-24.04.1/env.sh' >> "${HOME}/.bashrc"; \
|
||||
fi
|
||||
|
||||
ARG BUILD_FOR_OFFLINE=false
|
||||
RUN if [ "${BUILD_FOR_OFFLINE}" = 'true' ]; then \
|
||||
mkdir -p "${HOME}/.paddlex/official_models" \
|
||||
|
||||
Reference in New Issue
Block a user