tabby/deployment/docker-compose.yml

69 lines
1.6 KiB
YAML
Raw Normal View History

version: '3.3'
services:
init:
image: tabbyml/tabby
container_name: tabby-init
command: dagu start --params=MODEL_NAME=${MODEL_NAME} ./tabby/tasks/init.yaml
volumes:
2023-03-27 05:41:22 +00:00
- ${HF_VOLUME}
server:
image: tabbyml/tabby
container_name: tabby-server
command: uvicorn tabby.server:app --host 0.0.0.0 --port 5000
environment:
2023-03-27 05:41:22 +00:00
MODEL_NAME: ${MODEL_NAME}
MODEL_BACKEND: triton
EVENTS_LOG_DIR: /logs/tabby-server
ports:
- "5000:5000"
volumes:
2023-03-27 05:41:22 +00:00
- ${HF_VOLUME}
- ${LOGS_VOLUME}
depends_on:
init:
condition: service_completed_successfully
triton:
condition: service_healthy
admin:
image: tabbyml/tabby
container_name: tabby-admin
command: streamlit run tabby/admin/Home.py
ports:
- "8501:8501"
triton:
image: tabbyml/fastertransformer_backend
container_name: tabby-triton
command: /scripts/triton.sh
shm_size: 1gb
volumes:
- ./scripts:/scripts
2023-03-27 05:41:22 +00:00
- ${HF_VOLUME}
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
environment:
2023-03-27 05:41:22 +00:00
MODEL_NAME: ${MODEL_NAME}
depends_on:
init:
condition: service_completed_successfully
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8002/metrics"]
interval: 2s
timeout: 2s
start_period: 120s
vector:
image: timberio/vector:0.28.1-alpine
container_name: tabby-vector
volumes:
- ./config/vector.toml:/etc/vector/vector.toml:ro
- ${LOGS_VOLUME}