LLaMA-Factory-310P3/docker/docker-cuda/docker-compose.yml

33 lines
787 B
YAML
Raw Normal View History

services:
2024-06-11 00:19:17 +08:00
llamafactory:
build:
2024-06-25 00:46:47 +08:00
dockerfile: ./docker/docker-cuda/Dockerfile
context: ../..
2024-06-11 00:19:17 +08:00
args:
INSTALL_BNB: false
INSTALL_VLLM: false
INSTALL_DEEPSPEED: false
2024-06-27 20:14:48 +08:00
INSTALL_FLASHATTN: false
2024-06-11 00:19:17 +08:00
PIP_INDEX: https://pypi.org/simple
container_name: llamafactory
volumes:
2024-06-26 10:15:00 +08:00
- ../../hf_cache:/root/.cache/huggingface
- ../../ms_cache:/root/.cache/modelscope
- ../../data:/app/data
- ../../output:/app/output
ports:
- "7860:7860"
2024-06-11 00:19:17 +08:00
- "8000:8000"
ipc: host
2024-06-13 00:07:48 +08:00
tty: true
stdin_open: true
command: bash
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: "all"
capabilities: [gpu]
restart: unless-stopped