Files
TalkEdit/utils/validation.py
Your Name 70c5d32413 feat: Add streaming Ollama support, model caching, and UI improvements
- Add streaming summarization via Ollama API (stream_summarize_with_ollama)

- Cache ML models with @st.cache_resource (diarization, NER, translation, Whisper)

- Add temp file cleanup for extracted audio

- Add system capabilities detection (FFmpeg, GPU info)

- Add get_video_duration utility

- Improve validation with FFmpeg check

- Rewrite app.py with streaming support and UI enhancements

- Clean up redundant comments and unused imports across all utils
2026-02-18 10:26:09 -05:00

39 lines
1.1 KiB
Python

from pathlib import Path
import shutil
import logging
logger = logging.getLogger(__name__)
def validate_environment(obs_path: Path = None):
"""Validate environment and prerequisites."""
errors = []
if obs_path and not obs_path.exists():
errors.append(f"Directory not found: {obs_path}")
if not shutil.which("ffmpeg"):
errors.append("FFmpeg is not installed or not in PATH. Install it from https://ffmpeg.org/download.html")
return errors
def get_system_capabilities():
"""Return a dict of detected system capabilities for display."""
import torch
caps = {
"ffmpeg": shutil.which("ffmpeg") is not None,
"cuda": torch.cuda.is_available(),
"mps": hasattr(torch.backends, "mps") and torch.backends.mps.is_available(),
"gpu_name": None,
"gpu_memory": None,
}
if caps["cuda"] and torch.cuda.device_count() > 0:
props = torch.cuda.get_device_properties(0)
caps["gpu_name"] = props.name
caps["gpu_memory"] = props.total_memory
return caps