63 lines
2.5 KiB
Plaintext
63 lines
2.5 KiB
Plaintext
|
|
# VideoTranscriber Docker Configuration
|
||
|
|
# Copy this file to .env and modify the values as needed
|
||
|
|
|
||
|
|
# =============================================================================
|
||
|
|
# DOCKER VOLUME PATHS (Host Directories)
|
||
|
|
# =============================================================================
|
||
|
|
|
||
|
|
# Path to your video files directory on the host
|
||
|
|
# This directory will be mounted into the container at /app/data/videos
|
||
|
|
VIDEO_PATH=./videos
|
||
|
|
|
||
|
|
# Path where outputs (transcripts, summaries) will be saved on the host
|
||
|
|
# This directory will be mounted into the container at /app/data/outputs
|
||
|
|
OUTPUT_PATH=./outputs
|
||
|
|
|
||
|
|
# Path for caching ML models and processed files (improves performance)
|
||
|
|
# This directory will be mounted into the container at /app/data/cache
|
||
|
|
CACHE_PATH=./cache
|
||
|
|
|
||
|
|
# Optional: Configuration directory for custom settings
|
||
|
|
CONFIG_PATH=./config
|
||
|
|
|
||
|
|
# =============================================================================
|
||
|
|
# OLLAMA CONFIGURATION
|
||
|
|
# =============================================================================
|
||
|
|
|
||
|
|
# Ollama API URL - how the container accesses your host Ollama service
|
||
|
|
# For Windows/Mac with Docker Desktop: use host.docker.internal
|
||
|
|
# For Linux: use host networking or the actual host IP
|
||
|
|
OLLAMA_API_URL=http://host.docker.internal:11434/api
|
||
|
|
|
||
|
|
# =============================================================================
|
||
|
|
# ML MODEL CONFIGURATION
|
||
|
|
# =============================================================================
|
||
|
|
|
||
|
|
# HuggingFace token for advanced features (speaker diarization, etc.)
|
||
|
|
# Get your token at: https://huggingface.co/settings/tokens
|
||
|
|
# Leave empty if not using advanced features
|
||
|
|
HF_TOKEN=
|
||
|
|
|
||
|
|
# GPU Configuration
|
||
|
|
# Specify which GPU devices to use (leave empty for all available)
|
||
|
|
# Examples: "0" for first GPU, "0,1" for first two GPUs
|
||
|
|
CUDA_VISIBLE_DEVICES=
|
||
|
|
|
||
|
|
# =============================================================================
|
||
|
|
# DOCKER-SPECIFIC SETTINGS
|
||
|
|
# =============================================================================
|
||
|
|
|
||
|
|
# Container name (change if you want to run multiple instances)
|
||
|
|
CONTAINER_NAME=videotranscriber
|
||
|
|
|
||
|
|
# Port mapping (host:container)
|
||
|
|
HOST_PORT=8501
|
||
|
|
|
||
|
|
# =============================================================================
|
||
|
|
# EXAMPLE USAGE
|
||
|
|
# =============================================================================
|
||
|
|
# 1. Copy this file: cp docker.env.example .env
|
||
|
|
# 2. Edit the paths to match your system
|
||
|
|
# 3. Make sure Ollama is running on your host: ollama serve
|
||
|
|
# 4. Start the container: docker-compose up -d
|
||
|
|
# 5. Access the app at: http://localhost:8501
|