Files
AITools/docker-compose.yml
2025-10-11 23:13:08 +02:00

52 lines
1.2 KiB
YAML

services:
ollama:
image: ollama/ollama:latest
container_name: ollama
restart: unless-stopped
ports:
- "11434:11434"
volumes:
- /mnt/e/volumes/ollama/data:/root/.ollama
environment:
- OLLAMA_HOST=0.0.0.0:11434
# Optional: Set GPU device if you have multiple GPUs
# - NVIDIA_VISIBLE_DEVICES=0
command: serve
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
networks:
- app-network
healthcheck:
test: ["CMD", "ollama", "list"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
faster-whisper:
image: lscr.io/linuxserver/faster-whisper:gpu-legacy
container_name: faster-whisper
gpus: all
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/Paris
- WHISPER_MODEL=turbo # bon compromis pour RTX 4060 Ti
- WHISPER_LANG=fr
- WHISPER_BEAM=5 # précision vs rapidité
volumes:
- /mnt/e/volumes/faster-whisper/audio:/app
- /mnt/e/volumes/faster-whisper/models:/root/.cache/whisper
ports:
- 10300:10300
restart: unless-stopped
networks:
app-network:
driver: bridge