fix #4419
This commit is contained in:
parent
d0f953bf5b
commit
efb81b25ec
|
@ -448,7 +448,8 @@ docker build -f ./docker/docker-cuda/Dockerfile \
|
|||
-t llamafactory:latest .
|
||||
|
||||
docker run -dit --gpus=all \
|
||||
-v ./hf_cache:/root/.cache/huggingface/ \
|
||||
-v ./hf_cache:/root/.cache/huggingface \
|
||||
-v ./ms_cache:/root/.cache/modelscope \
|
||||
-v ./data:/app/data \
|
||||
-v ./output:/app/output \
|
||||
-p 7860:7860 \
|
||||
|
@ -471,7 +472,8 @@ docker build -f ./docker/docker-npu/Dockerfile \
|
|||
|
||||
# Change `device` upon your resources
|
||||
docker run -dit \
|
||||
-v ./hf_cache:/root/.cache/huggingface/ \
|
||||
-v ./hf_cache:/root/.cache/huggingface \
|
||||
-v ./ms_cache:/root/.cache/modelscope \
|
||||
-v ./data:/app/data \
|
||||
-v ./output:/app/output \
|
||||
-v /usr/local/dcmi:/usr/local/dcmi \
|
||||
|
|
|
@ -448,7 +448,8 @@ docker build -f ./docker/docker-cuda/Dockerfile \
|
|||
-t llamafactory:latest .
|
||||
|
||||
docker run -dit --gpus=all \
|
||||
-v ./hf_cache:/root/.cache/huggingface/ \
|
||||
-v ./hf_cache:/root/.cache/huggingface \
|
||||
-v ./ms_cache:/root/.cache/modelscope \
|
||||
-v ./data:/app/data \
|
||||
-v ./output:/app/output \
|
||||
-p 7860:7860 \
|
||||
|
@ -471,7 +472,8 @@ docker build -f ./docker/docker-npu/Dockerfile \
|
|||
|
||||
# 根据您的资源更改 `device`
|
||||
docker run -dit \
|
||||
-v ./hf_cache:/root/.cache/huggingface/ \
|
||||
-v ./hf_cache:/root/.cache/huggingface \
|
||||
-v ./ms_cache:/root/.cache/modelscope \
|
||||
-v ./data:/app/data \
|
||||
-v ./output:/app/output \
|
||||
-v /usr/local/dcmi:/usr/local/dcmi \
|
||||
|
|
|
@ -36,7 +36,7 @@ RUN EXTRA_PACKAGES="metrics"; \
|
|||
pip uninstall -y transformer-engine flash-attn
|
||||
|
||||
# Set up volumes
|
||||
VOLUME [ "/root/.cache/huggingface/", "/app/data", "/app/output" ]
|
||||
VOLUME [ "/root/.cache/huggingface", "/root/.cache/modelscope", "/app/data", "/app/output" ]
|
||||
|
||||
# Expose port 7860 for the LLaMA Board
|
||||
ENV GRADIO_SERVER_PORT 7860
|
||||
|
|
|
@ -10,7 +10,8 @@ services:
|
|||
PIP_INDEX: https://pypi.org/simple
|
||||
container_name: llamafactory
|
||||
volumes:
|
||||
- ./hf_cache:/root/.cache/huggingface/
|
||||
- ./hf_cache:/root/.cache/huggingface
|
||||
- ./ms_cache:/root/.cache/modelscope
|
||||
- ./data:/app/data
|
||||
- ./output:/app/output
|
||||
ports:
|
||||
|
|
|
@ -30,7 +30,7 @@ RUN EXTRA_PACKAGES="torch-npu,metrics"; \
|
|||
pip uninstall -y transformer-engine flash-attn
|
||||
|
||||
# Set up volumes
|
||||
VOLUME [ "/root/.cache/huggingface/", "/app/data", "/app/output" ]
|
||||
VOLUME [ "/root/.cache/huggingface", "/root/.cache/modelscope", "/app/data", "/app/output" ]
|
||||
|
||||
# Expose port 7860 for the LLaMA Board
|
||||
ENV GRADIO_SERVER_PORT 7860
|
||||
|
|
|
@ -8,7 +8,8 @@ services:
|
|||
PIP_INDEX: https://pypi.org/simple
|
||||
container_name: llamafactory
|
||||
volumes:
|
||||
- ./hf_cache:/root/.cache/huggingface/
|
||||
- ./hf_cache:/root/.cache/huggingface
|
||||
- ./ms_cache:/root/.cache/modelscope
|
||||
- ./data:/app/data
|
||||
- ./output:/app/output
|
||||
- /usr/local/dcmi:/usr/local/dcmi
|
||||
|
|
Loading…
Reference in New Issue