Enhance README.md with Docker installation instructions and update Ollama API endpoint to be configurable via environment variable.

This commit is contained in:
Your Name
2025-07-17 00:05:23 -04:00
parent c2ee2394d2
commit dcf13c1423
7 changed files with 568 additions and 2 deletions

51
docker-compose.yml Normal file
View File

@ -0,0 +1,51 @@
version: '3.8'
services:
videotranscriber:
build: .
container_name: videotranscriber
ports:
- "8501:8501"
volumes:
# Mount your video files directory (change the left path to your actual videos folder)
- "${VIDEO_PATH:-./videos}:/app/data/videos"
# Mount output directory for transcripts and summaries
- "${OUTPUT_PATH:-./outputs}:/app/data/outputs"
# Mount cache directory for model caching (optional, improves performance)
- "${CACHE_PATH:-./cache}:/app/data/cache"
# Mount a config directory if needed
- "${CONFIG_PATH:-./config}:/app/config"
environment:
# Ollama configuration for host access
- OLLAMA_API_URL=${OLLAMA_API_URL:-http://host.docker.internal:11434/api}
# Optional: HuggingFace token for advanced features
- HF_TOKEN=${HF_TOKEN:-}
# GPU configuration
- CUDA_VISIBLE_DEVICES=${CUDA_VISIBLE_DEVICES:-}
# Cache settings
- TRANSFORMERS_CACHE=/app/data/cache/transformers
- WHISPER_CACHE=/app/data/cache/whisper
# For GPU access (uncomment if you have NVIDIA GPU and nvidia-docker)
# deploy:
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: 1
# capabilities: [gpu]
restart: unless-stopped
# For Linux hosts, you might prefer host networking for better Ollama access
# network_mode: host # Uncomment for Linux hosts
# Use bridge networking for Windows/Mac with host.docker.internal
networks:
- videotranscriber-network
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8501/_stcore/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 60s
networks:
videotranscriber-network:
driver: bridge