version: '3.8' services: llama-factory: build: dockerfile: Dockerfile context: . container_name: llama_factory volumes: - ./hf_cache:/root/.cache/huggingface/ - ./data:/app/data - ./output:/app/output ports: - "7860:7860" ipc: host deploy: resources: reservations: devices: - driver: nvidia count: "all" capabilities: [gpu] restart: unless-stopped