version: '3.3' services: init: image: tabbyml/tabby container_name: tabby-init command: dagu start --params=MODEL_NAME=${MODEL_NAME} ./tabby/tasks/init.yaml volumes: - ${HF_VOLUME} server: image: tabbyml/tabby container_name: tabby-server command: uvicorn tabby.server:app --host 0.0.0.0 --port 5000 environment: MODEL_NAME: ${MODEL_NAME} MODEL_BACKEND: triton EVENTS_LOG_DIR: /logs/tabby-server ports: - "5000:5000" volumes: - ${HF_VOLUME} - ${LOGS_VOLUME} depends_on: init: condition: service_completed_successfully triton: condition: service_healthy admin: image: tabbyml/tabby container_name: tabby-admin command: streamlit run tabby/admin/Home.py ports: - "8501:8501" triton: image: tabbyml/fastertransformer_backend container_name: tabby-triton command: /scripts/triton.sh shm_size: 1gb volumes: - ./scripts:/scripts - ${HF_VOLUME} deploy: resources: reservations: devices: - driver: nvidia count: all capabilities: [gpu] environment: MODEL_NAME: ${MODEL_NAME} depends_on: init: condition: service_completed_successfully healthcheck: test: ["CMD", "curl", "-f", "http://localhost:8002/metrics"] interval: 2s timeout: 2s start_period: 120s vector: image: timberio/vector:0.28.1-alpine container_name: tabby-vector volumes: - ./config/vector.toml:/etc/vector/vector.toml:ro - ${LOGS_VOLUME}