LLaMA-Factory-310P3/docker-compose.yml

30 lines
634 B
YAML

version: '3.8'
services:
llamafactory:
build:
dockerfile: Dockerfile
context: .
args:
INSTALL_BNB: false
INSTALL_VLLM: false
INSTALL_DEEPSPEED: false
PIP_INDEX: https://pypi.org/simple
container_name: llamafactory
volumes:
- ./hf_cache:/root/.cache/huggingface/
- ./data:/app/data
- ./output:/app/output
ports:
- "7860:7860"
- "8000:8000"
ipc: host
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: "all"
capabilities: [gpu]
restart: unless-stopped