From 33cca5f552d784ff79db07b2570737d487f9fba2 Mon Sep 17 00:00:00 2001 From: Your Name Date: Tue, 3 Mar 2026 06:31:04 -0500 Subject: [PATCH] Initial CutScript release - Open-source AI-powered text-based video editor CutScript is a local-first, Descript-like video editor where you edit video by editing text. Delete a word from the transcript and it's cut from the video. Features: - Word-level transcription with WhisperX - Text-based video editing with undo/redo - AI filler word removal (Ollama/OpenAI/Claude) - AI clip creation for shorts - Waveform timeline with virtualized transcript - FFmpeg stream-copy (fast) and re-encode (4K) export - Caption burn-in and sidecar SRT generation - Studio Sound audio enhancement (DeepFilterNet) - Keyboard shortcuts (J/K/L, Space, Delete, Ctrl+Z/S/E) - Encrypted API key storage - Project save/load (.aive files) Architecture: - Electron + React + Tailwind (frontend) - FastAPI + Python (backend) - WhisperX for transcription - FFmpeg for video processing - Multi-provider AI support Performance optimizations: - RAF-throttled time updates - Zustand selectors for granular subscriptions - Dual-canvas waveform rendering - Virtualized transcript with react-virtuoso Built on top of DataAnts-AI/VideoTranscriber, completely rewritten as a desktop application. License: MIT --- .github/workflows/docker-build.yml | 97 - .gitignore | 24 +- DOCKER.md | 305 -- Dockerfile | 47 - Dockerfile.gpu | 56 - GEMINI_INSIGHTS.md | 105 - INSTALLATION.md | 141 - LICENSE | 2 +- QUICK-FIX.md | 63 - README.md | 266 +- app.py | 767 ----- backend/main.py | 117 + backend/requirements.txt | 33 + backend/routers/__init__.py | 0 backend/routers/ai.py | 83 + backend/routers/audio.py | 38 + backend/routers/captions.py | 65 + backend/routers/export.py | 156 + backend/routers/transcribe.py | 53 + backend/services/__init__.py | 0 backend/services/ai_provider.py | 211 ++ backend/services/audio_cleaner.py | 79 + backend/services/background_removal.py | 59 + backend/services/caption_generator.py | 148 + backend/services/diarization.py | 98 + backend/services/transcription.py | 205 ++ backend/services/video_editor.py | 271 ++ backend/utils/__init__.py | 0 {utils => backend/utils}/audio_processing.py | 0 {utils => backend/utils}/cache.py | 0 {utils => backend/utils}/gpu_utils.py | 0 docker-compose.prebuilt.yml | 70 - docker-compose.yml | 51 - docker.env.example | 63 - electron/main.js | 131 + electron/preload.js | 12 + electron/python-bridge.js | 105 + frontend/index.html | 16 + frontend/package-lock.json | 2817 ++++++++++++++++++ frontend/package.json | 31 + frontend/postcss.config.js | 6 + frontend/src/App.tsx | 310 ++ frontend/src/components/AIPanel.tsx | 332 +++ frontend/src/components/ExportDialog.tsx | 229 ++ frontend/src/components/SettingsPanel.tsx | 192 ++ frontend/src/components/TranscriptEditor.tsx | 204 ++ frontend/src/components/VideoPlayer.tsx | 133 + frontend/src/components/WaveformTimeline.tsx | 220 ++ frontend/src/hooks/useKeyboardShortcuts.ts | 236 ++ frontend/src/hooks/useVideoSync.ts | 69 + frontend/src/index.css | 37 + frontend/src/main.tsx | 10 + frontend/src/store/aiStore.ts | 129 + frontend/src/store/editorStore.ts | 232 ++ frontend/src/types/project.ts | 86 + frontend/src/vite-env.d.ts | 16 + frontend/tailwind.config.js | 30 + frontend/tsconfig.json | 23 + frontend/vite.config.ts | 15 + install.bat | 25 - install.py | 307 -- install.sh | 26 - package.json | 49 + requirements.txt | 54 - shared/project-schema.json | 55 + utils/diarization.py | 226 -- utils/export.py | 284 -- utils/keyword_extraction.py | 334 --- utils/ollama_integration.py | 201 -- utils/summarization.py | 111 - utils/transcription.py | 103 - utils/translation.py | 262 -- utils/validation.py | 38 - 73 files changed, 7463 insertions(+), 3906 deletions(-) delete mode 100644 .github/workflows/docker-build.yml delete mode 100644 DOCKER.md delete mode 100644 Dockerfile delete mode 100644 Dockerfile.gpu delete mode 100644 GEMINI_INSIGHTS.md delete mode 100644 INSTALLATION.md delete mode 100644 QUICK-FIX.md delete mode 100644 app.py create mode 100644 backend/main.py create mode 100644 backend/requirements.txt create mode 100644 backend/routers/__init__.py create mode 100644 backend/routers/ai.py create mode 100644 backend/routers/audio.py create mode 100644 backend/routers/captions.py create mode 100644 backend/routers/export.py create mode 100644 backend/routers/transcribe.py create mode 100644 backend/services/__init__.py create mode 100644 backend/services/ai_provider.py create mode 100644 backend/services/audio_cleaner.py create mode 100644 backend/services/background_removal.py create mode 100644 backend/services/caption_generator.py create mode 100644 backend/services/diarization.py create mode 100644 backend/services/transcription.py create mode 100644 backend/services/video_editor.py create mode 100644 backend/utils/__init__.py rename {utils => backend/utils}/audio_processing.py (100%) rename {utils => backend/utils}/cache.py (100%) rename {utils => backend/utils}/gpu_utils.py (100%) delete mode 100644 docker-compose.prebuilt.yml delete mode 100644 docker-compose.yml delete mode 100644 docker.env.example create mode 100644 electron/main.js create mode 100644 electron/preload.js create mode 100644 electron/python-bridge.js create mode 100644 frontend/index.html create mode 100644 frontend/package-lock.json create mode 100644 frontend/package.json create mode 100644 frontend/postcss.config.js create mode 100644 frontend/src/App.tsx create mode 100644 frontend/src/components/AIPanel.tsx create mode 100644 frontend/src/components/ExportDialog.tsx create mode 100644 frontend/src/components/SettingsPanel.tsx create mode 100644 frontend/src/components/TranscriptEditor.tsx create mode 100644 frontend/src/components/VideoPlayer.tsx create mode 100644 frontend/src/components/WaveformTimeline.tsx create mode 100644 frontend/src/hooks/useKeyboardShortcuts.ts create mode 100644 frontend/src/hooks/useVideoSync.ts create mode 100644 frontend/src/index.css create mode 100644 frontend/src/main.tsx create mode 100644 frontend/src/store/aiStore.ts create mode 100644 frontend/src/store/editorStore.ts create mode 100644 frontend/src/types/project.ts create mode 100644 frontend/src/vite-env.d.ts create mode 100644 frontend/tailwind.config.js create mode 100644 frontend/tsconfig.json create mode 100644 frontend/vite.config.ts delete mode 100644 install.bat delete mode 100644 install.py delete mode 100644 install.sh create mode 100644 package.json delete mode 100644 requirements.txt create mode 100644 shared/project-schema.json delete mode 100644 utils/diarization.py delete mode 100644 utils/export.py delete mode 100644 utils/keyword_extraction.py delete mode 100644 utils/ollama_integration.py delete mode 100644 utils/summarization.py delete mode 100644 utils/transcription.py delete mode 100644 utils/translation.py delete mode 100644 utils/validation.py diff --git a/.github/workflows/docker-build.yml b/.github/workflows/docker-build.yml deleted file mode 100644 index c5dd578..0000000 --- a/.github/workflows/docker-build.yml +++ /dev/null @@ -1,97 +0,0 @@ -name: Build and Push Docker Images - -on: - push: - branches: [ main, develop ] - tags: [ 'v*' ] - pull_request: - branches: [ main ] - release: - types: [published] - -env: - REGISTRY: ghcr.io - -jobs: - build: - runs-on: ubuntu-latest - permissions: - contents: read - packages: write - - steps: - - name: Free up disk space - run: | - echo "Disk space before cleanup:" - df -h - # Remove unnecessary large packages - sudo rm -rf /usr/share/dotnet - sudo rm -rf /usr/local/lib/android - sudo rm -rf /opt/ghc - sudo rm -rf /opt/hostedtoolcache/CodeQL - sudo rm -rf /usr/local/share/boost - sudo rm -rf /usr/share/swift - # Clean apt cache - sudo apt-get clean - # Clean Docker - docker system prune -af - echo "Disk space after cleanup:" - df -h - - - name: Checkout repository - uses: actions/checkout@v4 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - - name: Log in to Container Registry - uses: docker/login-action@v3 - with: - registry: ${{ env.REGISTRY }} - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Convert repository name to lowercase - id: lowercase-repo - run: echo "repository=$(echo ${{ github.repository }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT - - - name: Extract metadata - id: meta - uses: docker/metadata-action@v5 - with: - images: ${{ env.REGISTRY }}/${{ steps.lowercase-repo.outputs.repository }} - tags: | - type=ref,event=branch - type=ref,event=pr - type=semver,pattern={{version}} - type=semver,pattern={{major}}.{{minor}} - type=semver,pattern={{major}} - type=raw,value=latest,enable={{is_default_branch}} - - - name: Build and push Docker image (CPU) - uses: docker/build-push-action@v5 - with: - context: . - platforms: linux/amd64 - push: true - tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} - cache-from: type=gha - cache-to: type=gha,mode=max - - - name: Clean up Docker to free space for GPU build - run: docker system prune -af - - - name: Build and push GPU-enabled image - uses: docker/build-push-action@v5 - with: - context: . - file: Dockerfile.gpu - platforms: linux/amd64 - push: true - tags: | - ${{ env.REGISTRY }}/${{ steps.lowercase-repo.outputs.repository }}:latest-gpu - ${{ env.REGISTRY }}/${{ steps.lowercase-repo.outputs.repository }}:${{ github.sha }}-gpu - labels: ${{ steps.meta.outputs.labels }} - cache-from: type=gha - cache-to: type=gha,mode=max \ No newline at end of file diff --git a/.gitignore b/.gitignore index 0bb6ebd..9f38f14 100644 --- a/.gitignore +++ b/.gitignore @@ -1,13 +1,33 @@ -# Python virtual environment +# Dependencies +node_modules/ + +# Build output +frontend/dist/ + +# Python venv/ __pycache__/ *.pyc +*.pyo +*.egg-info/ -# IDE files +# IDE / Editor .vscode/ .idea/ +.cursor/ # OS files .env .DS_Store Thumbs.db + +# Logs +*.log + +# Lock files (root only — frontend lock is committed) +/package-lock.json + +# Electron build output +dist/ +build/ +*.asar diff --git a/DOCKER.md b/DOCKER.md deleted file mode 100644 index 4fe009d..0000000 --- a/DOCKER.md +++ /dev/null @@ -1,305 +0,0 @@ -# Docker Deployment Guide for VideoTranscriber - -This guide explains how to run VideoTranscriber in a Docker container while using Ollama models on your host system. - -## Architecture Overview - -``` -┌─────────────────────────────────────────┐ -│ Host System │ -│ ┌─────────────────┐ ┌──────────────────│ -│ │ Ollama Service │ │ Video Files │ -│ │ (port 11434) │ │ Directory │ -│ └─────────────────┘ └──────────────────│ -│ ▲ ▲ │ -│ │ │ │ -│ ┌───────┼─────────────────────┼─────────│ -│ │ Docker Container │ │ -│ │ ┌─────▼─────────┐ │ │ -│ │ │ VideoTranscriber │ │ -│ │ │ - Streamlit App │ │ -│ │ │ - Whisper Models │ │ -│ │ │ - ML Dependencies │ │ -│ │ └───────────────┘ │ │ -│ └────────────────────────────┼─────────│ -│ │ │ -│ Mounted Volumes ─────┘ │ -└─────────────────────────────────────────┘ -``` - -## Quick Start - -### Prerequisites - -1. **Docker & Docker Compose** installed -2. **Ollama running on host**: - ```bash - # Install Ollama (if not already installed) - curl -fsSL https://ollama.ai/install.sh | sh - - # Start Ollama service - ollama serve - - # Pull a model (in another terminal) - ollama pull llama3 - ``` - -### 1. Setup Environment - -```bash -# Copy environment template -cp docker.env.example .env - -# Edit .env file with your paths -# Key settings to update: -VIDEO_PATH=/path/to/your/videos -OUTPUT_PATH=/path/to/save/outputs -HF_TOKEN=your_huggingface_token_if_needed -``` - -### 2. Create Required Directories - -```bash -# Create directories for mounting -mkdir -p videos outputs cache config -``` - -### 3. Build and Run - -```bash -# Build and start the container -docker-compose up -d - -# View logs -docker-compose logs -f - -# Access the application -# Open browser to: http://localhost:8501 -``` - -## Configuration Options - -### Environment Variables - -| Variable | Description | Default | Required | -|----------|-------------|---------|----------| -| `VIDEO_PATH` | Host directory containing video files | `./videos` | Yes | -| `OUTPUT_PATH` | Host directory for outputs | `./outputs` | Yes | -| `CACHE_PATH` | Host directory for model cache | `./cache` | No | -| `OLLAMA_API_URL` | Ollama API endpoint | `http://host.docker.internal:11434/api` | No | -| `HF_TOKEN` | HuggingFace token for advanced features | - | No | -| `CUDA_VISIBLE_DEVICES` | GPU devices to use | - | No | - -### Volume Mounts - -| Host Path | Container Path | Purpose | -|-----------|----------------|---------| -| `${VIDEO_PATH}` | `/app/data/videos` | Input video files | -| `${OUTPUT_PATH}` | `/app/data/outputs` | Generated transcripts/summaries | -| `${CACHE_PATH}` | `/app/data/cache` | Model and processing cache | -| `${CONFIG_PATH}` | `/app/config` | Configuration files | - -## Platform-Specific Setup - -### Windows (Docker Desktop) - -```yaml -# In docker-compose.yml - use bridge networking -networks: - - videotranscriber-network - -environment: - - OLLAMA_API_URL=http://host.docker.internal:11434/api -``` - -### macOS (Docker Desktop) - -Same as Windows - uses `host.docker.internal` to access host services. - -### Linux - -Option 1 - Host Networking (Recommended): -```yaml -# In docker-compose.yml -network_mode: host - -environment: - - OLLAMA_API_URL=http://localhost:11434/api -``` - -Option 2 - Bridge Networking: -```yaml -environment: - - OLLAMA_API_URL=http://172.17.0.1:11434/api # Docker bridge IP -``` - -## GPU Support - -### NVIDIA GPU Setup - -1. **Install NVIDIA Container Toolkit**: - ```bash - # Ubuntu/Debian - curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg - curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list | \ - sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \ - sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list - sudo apt-get update && sudo apt-get install -y nvidia-container-toolkit - sudo systemctl restart docker - ``` - -2. **Enable in docker-compose.yml**: - ```yaml - deploy: - resources: - reservations: - devices: - - driver: nvidia - count: 1 - capabilities: [gpu] - ``` - -## Usage in Container - -### Application Settings - -When running in Docker, update these settings in the VideoTranscriber UI: - -1. **Base Folder**: Set to `/app/data/videos` -2. **Ollama Models**: Should auto-detect from host -3. **GPU Settings**: Will use container GPU if configured - -### File Access - -- **Input Videos**: Place in your `${VIDEO_PATH}` directory on host -- **Outputs**: Generated files appear in `${OUTPUT_PATH}` on host -- **Cache**: Models cached in `${CACHE_PATH}` for faster subsequent runs - -## Troubleshooting - -### Common Issues - -#### 1. Can't Connect to Ollama - -**Symptoms**: "Ollama service is not available" message - -**Solutions**: -- Verify Ollama is running: `curl http://localhost:11434/api/tags` -- Check firewall settings -- For Linux, try host networking mode -- Verify OLLAMA_API_URL in environment - -#### 2. No Video Files Detected - -**Symptoms**: "No recordings found" message - -**Solutions**: -- Check VIDEO_PATH points to correct directory -- Ensure directory contains supported formats (.mp4, .avi, .mov, .mkv) -- Check file permissions - -#### 3. GPU Not Detected - -**Symptoms**: Processing is slow, no GPU utilization - -**Solutions**: -- Install NVIDIA Container Toolkit -- Uncomment GPU section in docker-compose.yml -- Verify: `docker run --rm --gpus all nvidia/cuda:11.0-base nvidia-smi` - -#### 4. Permission Issues - -**Symptoms**: Cannot write to output directory - -**Solutions**: -```bash -# Fix permissions -sudo chown -R $(id -u):$(id -g) outputs cache config -chmod -R 755 outputs cache config -``` - -### Debugging - -```bash -# View container logs -docker-compose logs -f videotranscriber - -# Execute shell in container -docker-compose exec videotranscriber bash - -# Check Ollama connectivity from container -docker-compose exec videotranscriber curl -f $OLLAMA_API_URL/tags - -# Monitor resource usage -docker stats videotranscriber -``` - -## Advanced Configuration - -### Custom Dockerfile - -For specialized requirements, modify the Dockerfile: - -```dockerfile -# Add custom dependencies -RUN pip install your-custom-package - -# Set custom environment variables -ENV YOUR_CUSTOM_VAR=value - -# Copy custom configuration -COPY custom-config.yaml /app/config/ -``` - -### Multi-Instance Deployment - -Run multiple instances for different use cases: - -```bash -# Copy docker-compose.yml to docker-compose.prod.yml -# Modify ports and paths -docker-compose -f docker-compose.prod.yml up -d -``` - -### CI/CD Integration - -```yaml -# .github/workflows/docker.yml -name: Build and Deploy -on: - push: - branches: [main] -jobs: - build: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: Build Docker image - run: docker build -t videotranscriber . -``` - -## Performance Optimization - -### Memory Management - -```yaml -# In docker-compose.yml -deploy: - resources: - limits: - memory: 8G - reservations: - memory: 4G -``` - -### Model Caching - -- Use persistent volumes for `/app/data/cache` -- Pre-download models to reduce startup time -- Configure appropriate cache size limits - -### Network Optimization - -- Use host networking on Linux for better performance -- Consider running Ollama and VideoTranscriber on same machine -- Use SSD storage for cache directories \ No newline at end of file diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index 9a8398e..0000000 --- a/Dockerfile +++ /dev/null @@ -1,47 +0,0 @@ -FROM python:3.11-slim - -# Set working directory -WORKDIR /app - -# Install system dependencies -RUN apt-get update && apt-get install -y \ - ffmpeg \ - git \ - wget \ - curl \ - build-essential \ - && rm -rf /var/lib/apt/lists/* - -# Copy requirements first for better Docker layer caching -COPY requirements.txt . - -# Upgrade pip and install build tools -RUN pip install --upgrade pip setuptools wheel - -# Install PyTorch CPU version first (for non-GPU builds) -RUN pip install --no-cache-dir torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu - -# Install remaining Python dependencies -RUN pip install --no-cache-dir -r requirements.txt - -# Copy application code -COPY . . - -# Create directories for mounted volumes -RUN mkdir -p /app/data/videos /app/data/outputs /app/data/cache - -# Set environment variables -ENV STREAMLIT_SERVER_PORT=8501 -ENV STREAMLIT_SERVER_ADDRESS=0.0.0.0 -ENV STREAMLIT_SERVER_HEADLESS=true -ENV STREAMLIT_BROWSER_GATHER_USAGE_STATS=false - -# Expose Streamlit port -EXPOSE 8501 - -# Health check -HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \ - CMD curl -f http://localhost:8501/_stcore/health || exit 1 - -# Start the application -CMD ["streamlit", "run", "app.py", "--server.port=8501", "--server.address=0.0.0.0"] \ No newline at end of file diff --git a/Dockerfile.gpu b/Dockerfile.gpu deleted file mode 100644 index ab562af..0000000 --- a/Dockerfile.gpu +++ /dev/null @@ -1,56 +0,0 @@ -FROM python:3.11-slim - -# Set working directory -WORKDIR /app - -# Install system dependencies including CUDA-related packages -RUN apt-get update && apt-get install -y \ - ffmpeg \ - git \ - wget \ - curl \ - build-essential \ - && rm -rf /var/lib/apt/lists/* - -# Copy requirements first for better Docker layer caching -COPY requirements.txt . - -# Upgrade pip and install build tools -RUN pip install --upgrade pip setuptools wheel - -# Install CUDA-optimized PyTorch FIRST (before other requirements) -# Using latest versions from cu118 index for SpeechBrain 1.0 / pyannote diarization compatibility -RUN pip install --no-cache-dir torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118 - -# Install numpy first as many packages depend on it -RUN pip install --no-cache-dir "numpy>=1.24.0" - -# Install remaining dependencies from requirements.txt -RUN pip install --no-cache-dir -r requirements.txt - -# Copy application code -COPY . . - -# Create directories for mounted volumes -RUN mkdir -p /app/data/videos /app/data/outputs /app/data/cache - -# Set environment variables -ENV STREAMLIT_SERVER_PORT=8501 -ENV STREAMLIT_SERVER_ADDRESS=0.0.0.0 -ENV STREAMLIT_SERVER_HEADLESS=true -ENV STREAMLIT_BROWSER_GATHER_USAGE_STATS=false - -# GPU-specific environment variables -ENV CUDA_VISIBLE_DEVICES=0 -ENV NVIDIA_VISIBLE_DEVICES=all -ENV NVIDIA_DRIVER_CAPABILITIES=compute,utility - -# Expose Streamlit port -EXPOSE 8501 - -# Health check -HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \ - CMD curl -f http://localhost:8501/_stcore/health || exit 1 - -# Start the application -CMD ["streamlit", "run", "app.py", "--server.port=8501", "--server.address=0.0.0.0"] \ No newline at end of file diff --git a/GEMINI_INSIGHTS.md b/GEMINI_INSIGHTS.md deleted file mode 100644 index 0f16264..0000000 --- a/GEMINI_INSIGHTS.md +++ /dev/null @@ -1,105 +0,0 @@ -# Gemini Insights: OBS Recording Transcriber - -## Project Overview -The OBS Recording Transcriber is a Python application built with Streamlit that processes video recordings (particularly from OBS Studio) to generate transcripts and summaries using AI models. The application uses Whisper for transcription and Hugging Face Transformers for summarization. - -## Key Improvement Areas - -### 1. UI Enhancements -- **Implemented:** - - Responsive layout with columns for better organization - - Expanded sidebar with categorized settings - - Custom CSS for improved button styling - - Spinner for long-running operations - - Expanded transcript view by default - -- **Additional Recommendations:** - - Add a dark mode toggle - - Implement progress bars for each processing step - - Add tooltips for complex options - - Create a dashboard view for batch processing results - - Add visualization of transcript segments with timestamps - -### 2. Ollama Local API Integration -- **Implemented:** - - Local API integration for offline summarization - - Model selection from available Ollama models - - Chunking for long texts - - Fallback to online models when Ollama fails - -- **Additional Recommendations:** - - Add temperature and other generation parameters as advanced options - - Implement streaming responses for real-time feedback - - Cache results to avoid reprocessing - - Add support for custom Ollama model creation with specific instructions - - Implement parallel processing for multiple chunks - -### 3. Subtitle Export Formats -- **Implemented:** - - SRT export with proper formatting - - ASS export with basic styling - - Multi-format export options - - Automatic segment creation from plain text - -- **Additional Recommendations:** - - Add customizable styling options for ASS subtitles - - Implement subtitle editing before export - - Add support for VTT format for web videos - - Implement subtitle timing adjustment - - Add batch export for multiple files - -### 4. Architecture and Code Quality -- **Recommendations:** - - Implement proper error handling and logging throughout - - Add unit tests for critical components - - Create a configuration file for default settings - - Implement caching for processed files - - Add type hints throughout the codebase - - Document API endpoints for potential future web service - -### 5. Performance Optimizations -- **Recommendations:** - - Implement parallel processing for batch operations - - Add GPU acceleration configuration options - - Optimize memory usage for large files - - Implement incremental processing for very long recordings - - Add compression options for exported files - -### 6. Additional Features -- **Recommendations:** - - Speaker diarization (identifying different speakers) - - Language detection and translation - - Keyword extraction and timestamp linking - - Integration with video editing software - - Batch processing queue with email notifications - - Custom vocabulary for domain-specific terminology - -## Implementation Roadmap -1. **Phase 1 (Completed):** Basic UI improvements, Ollama integration, and subtitle export -2. **Phase 2 (Completed):** Performance optimizations and additional export formats - - Added WebVTT export format for web videos - - Implemented GPU acceleration with automatic device selection - - Added caching system for faster processing of previously transcribed files - - Optimized memory usage with configurable memory limits - - Added compression options for exported files - - Enhanced ASS subtitle styling options - - Added progress indicators for better user feedback -3. **Phase 3 (Completed):** Advanced features like speaker diarization and translation - - Implemented speaker diarization to identify different speakers in recordings - - Added language detection and translation capabilities - - Integrated keyword extraction with timestamp linking - - Created interactive transcript with keyword highlighting - - Added named entity recognition for better content analysis - - Generated keyword index with timestamp references - - Provided speaker statistics and word count analysis -4. **Phase 4:** Integration with other tools and services - -## Technical Considerations -- Ensure compatibility with different Whisper model sizes -- Handle large files efficiently to prevent memory issues -- Provide graceful degradation when optional dependencies are missing -- Maintain backward compatibility with existing workflows -- Consider containerization for easier deployment - -## Conclusion -The OBS Recording Transcriber has a solid foundation but can be significantly enhanced with the suggested improvements. The focus should be on improving user experience, adding offline processing capabilities, and expanding export options to make the tool more versatile for different use cases. \ No newline at end of file diff --git a/INSTALLATION.md b/INSTALLATION.md deleted file mode 100644 index 5d90c44..0000000 --- a/INSTALLATION.md +++ /dev/null @@ -1,141 +0,0 @@ -# Installation Guide for OBS Recording Transcriber - -This guide will help you install all the necessary dependencies for the OBS Recording Transcriber application, including the advanced features from Phase 3. - -## Prerequisites - -Before installing the Python packages, you need to set up some prerequisites: - -### 1. Python 3.8 or higher - -Make sure you have Python 3.8 or higher installed. You can download it from [python.org](https://www.python.org/downloads/). - -### 2. FFmpeg - -FFmpeg is required for audio processing: - -- **Windows**: - - Download from [gyan.dev/ffmpeg/builds](https://www.gyan.dev/ffmpeg/builds/) - - Extract the ZIP file - - Add the `bin` folder to your system PATH - -- **macOS**: - ```bash - brew install ffmpeg - ``` - -- **Linux**: - ```bash - sudo apt update - sudo apt install ffmpeg - ``` - -### 3. Visual C++ Build Tools (Windows only) - -Some packages like `tokenizers` require C++ build tools: - -1. Download and install [Visual C++ Build Tools](https://visualstudio.microsoft.com/visual-cpp-build-tools/) -2. During installation, select "Desktop development with C++" - -## Installation Steps - -### 1. Create a Virtual Environment (Recommended) - -```bash -# Create a virtual environment -python -m venv venv - -# Activate the virtual environment -# Windows -venv\Scripts\activate -# macOS/Linux -source venv/bin/activate -``` - -### 2. Install PyTorch - -For better performance, install PyTorch with CUDA support if you have an NVIDIA GPU: - -```bash -# Windows/Linux with CUDA -pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118 - -# macOS or CPU-only -pip install torch torchvision torchaudio -``` - -### 3. Install Dependencies - -```bash -# Install all dependencies from requirements.txt -pip install -r requirements.txt -``` - -### 4. Troubleshooting Common Issues - -#### Tokenizers Installation Issues - -If you encounter issues with `tokenizers` installation: - -1. Make sure you have Visual C++ Build Tools installed (Windows) -2. Try installing Rust: [rustup.rs](https://rustup.rs/) -3. Install tokenizers separately: - ```bash - pip install tokenizers --no-binary tokenizers - ``` - -#### PyAnnote.Audio Access - -To use speaker diarization, you need a HuggingFace token with access to the pyannote models: - -1. Create an account on [HuggingFace](https://huggingface.co/) -2. Generate an access token at [huggingface.co/settings/tokens](https://huggingface.co/settings/tokens) -3. Request access to [pyannote/speaker-diarization-3.0](https://huggingface.co/pyannote/speaker-diarization-3.0) -4. Set the token in the application when prompted or as an environment variable: - ```bash - # Windows - set HF_TOKEN=your_token_here - # macOS/Linux - export HF_TOKEN=your_token_here - ``` - -#### Memory Issues with Large Files - -If you encounter memory issues with large files: - -1. Use a smaller Whisper model (e.g., "base" instead of "large") -2. Reduce the GPU memory fraction in the application settings -3. Increase your system's swap space/virtual memory - -## Running the Application - -After installation, run the application with: - -```bash -streamlit run app.py -``` - -## Optional: Ollama Setup for Local Summarization - -To use Ollama for local summarization: - -1. Install Ollama from [ollama.ai](https://ollama.ai/) -2. Pull a model: - ```bash - ollama pull llama3 - ``` -3. Uncomment the Ollama line in requirements.txt and install: - ```bash - pip install ollama - ``` - -## Verifying Installation - -To verify that all components are working correctly: - -1. Run the application -2. Check that GPU acceleration is available (if applicable) -3. Test a small video file with basic transcription -4. Gradually enable advanced features like diarization and translation - -If you encounter any issues, check the application logs for specific error messages. \ No newline at end of file diff --git a/LICENSE b/LICENSE index 83db24d..0989410 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2025 DataAnts-AI +Copyright (c) 2026 DataAnts AI Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/QUICK-FIX.md b/QUICK-FIX.md deleted file mode 100644 index e085af5..0000000 --- a/QUICK-FIX.md +++ /dev/null @@ -1,63 +0,0 @@ -# 🚨 Quick Fix for PyTorch Compatibility Error - -If you're seeing the `torch.compiler.disable` error, here's how to fix it: - -## Immediate Fix - -```bash -# Stop the current container -docker-compose down - -# Remove the old image to force rebuild with fixed versions -docker rmi $(docker images | grep videotranscriber | awk '{print $3}') - -# Rebuild with fixed dependencies -docker-compose up -d --build -``` - -## Better Solution: Use Prebuilt Images - -⚠️ **Note**: GitHub Actions had a naming issue that's now fixed. See [FIX-GITHUB-ACTIONS.md](FIX-GITHUB-ACTIONS.md) for details. - -Once prebuilt images are available, use them instead: - -```bash -# Check if images are ready -docker pull ghcr.io/dataants-ai/videotranscriber:latest - -# If successful, stop current container and use prebuilt image -docker-compose down -docker-compose -f docker-compose.prebuilt.yml up -d -``` - -## What Was Fixed - -1. **Version Pinning**: Updated `requirements.txt` with compatible versions: - - `torch==2.0.1` (was `>=1.7.0`) - - `pytorch-lightning==2.0.6` (compatible with torch 2.0.1) - - `pyannote.audio==3.1.1` (updated to compatible version) - -2. **Build Process**: Removed duplicate PyTorch installation that could cause conflicts - -3. **Prebuilt Images**: Created GitHub Actions to build reliable, tested images - -## Verification - -After fixing, you should see the Streamlit app load without errors at `http://localhost:8501` - -## If Still Having Issues - -1. **Clear Docker cache**: - ```bash - docker system prune -a - ``` - -2. **Check logs**: - ```bash - docker-compose logs -f - ``` - -3. **Manual rebuild**: - ```bash - docker build --no-cache -t videotranscriber . - ``` \ No newline at end of file diff --git a/README.md b/README.md index a92ab03..6164363 100644 --- a/README.md +++ b/README.md @@ -1,198 +1,130 @@ -# Video Transcriber +# CutScript -## Project Overview -The Video Recording Transcriber is a Python application built with Streamlit that processes video and audio recordings to generate transcripts and summaries using AI models. The application uses Whisper for transcription and Hugging Face Transformers for summarization. +An open-source, local-first, Descript-like text-based video editor powered by AI. Edit video by editing text — delete a word from the transcript and it's cut from the video. -**Supported Formats**: MP4, AVI, MOV, MKV (video) and M4A (audio) +## Architecture +- **Electron + React** desktop app with Tailwind CSS +- **FastAPI** Python backend (spawned as child process) +- **WhisperX** for word-level transcription with alignment +- **FFmpeg** for video processing (stream-copy and re-encode) +- **Ollama / OpenAI / Claude** for AI features (filler removal, clip creation) -![SuiteQL_query_UI-1-Thumbnail](https://github.com/user-attachments/assets/72aaf238-6615-4739-b77f-c4eb9ff96996) +## Quick Start -Demo here +### Prerequisites -https://github.com/user-attachments/assets/990e63fc-232e-46a0-afdf-ca8836d46a13 +- Node.js 18+ +- Python 3.10+ +- FFmpeg (in PATH) +- (Optional) Ollama for local AI features +### Install -## Installation - -### 🐳 Docker Installation (Recommended) - -**Benefits**: Isolated environment, no dependency conflicts, easy deployment - -#### Option A: Prebuilt Images (Fastest & Most Reliable) ```bash -# 1. Clone repository for config files -git clone https://github.com/DataAnts-AI/VideoTranscriber.git -cd VideoTranscriber +# Root dependencies (Electron, concurrently) +npm install -# 2. Setup environment -cp docker.env.example .env -# Edit .env with your video directory paths +# Frontend dependencies (React, Tailwind, Zustand) +cd frontend && npm install && cd .. -# 3. Ensure Ollama is running on host -ollama serve # In separate terminal -ollama pull llama3 - -# 4. Start with prebuilt image -docker-compose -f docker-compose.prebuilt.yml up -d - -# 5. Access application -# Open browser to: http://localhost:8501 +# Backend dependencies +cd backend && pip install -r requirements.txt && cd .. ``` -#### Option B: Build from Source (Development) +### Run (Development) + ```bash -# Use the local build approach -docker-compose up -d +# Start all three (backend + frontend + electron) +npm run dev ``` -See [DOCKER.md](DOCKER.md) for complete Docker setup guide. +Or run them separately: -### Easy Installation (Recommended) +```bash +# Terminal 1: Backend +cd backend && python -m uvicorn main:app --reload --port 8642 -#### Windows -1. Download or clone the repository -2. Run `install.bat` by double-clicking it -3. Follow the on-screen instructions +# Terminal 2: Frontend +cd frontend && npm run dev -#### Linux/macOS -1. Download or clone the repository -2. Open a terminal in the project directory -3. Make the install script executable: `chmod +x install.sh` -4. Run the script: `./install.sh` -5. Follow the on-screen instructions - -### Manual Installation -1. Clone the repo. -``` -git clone https://github.com/DataAnts-AI/VideoTranscriber.git -cd VideoTranscriber +# Terminal 3: Electron +npx electron . ``` -2. Install dependencies: +## Project Structure + ``` -pip install -r requirements.txt +cutscript/ +├── electron/ # Electron main process +│ ├── main.js # App entry, spawns Python backend +│ ├── preload.js # Secure IPC bridge +│ └── python-bridge.js +├── frontend/ # React + Vite + Tailwind +│ └── src/ +│ ├── components/ # VideoPlayer, TranscriptEditor, etc. +│ ├── store/ # Zustand state (editorStore, aiStore) +│ ├── hooks/ # useVideoSync, useKeyboardShortcuts +│ └── types/ # TypeScript interfaces +├── backend/ # FastAPI Python backend +│ ├── main.py +│ ├── routers/ # API endpoints +│ ├── services/ # Core logic (transcription, editing, AI) +│ └── utils/ # GPU, cache, audio helpers +└── shared/ # Project schema ``` -Notes: -- Ensure that the versions align with the features you use and your system compatibility. -- torch version should match the capabilities of your hardware (e.g., CUDA support for GPUs). -- For advanced features like speaker diarization, you'll need a HuggingFace token. -- See `INSTALLATION.md` for detailed instructions and troubleshooting. +## Features -3. Run the application: -``` -streamlit run app.py -``` +| Feature | Status | +|---------|--------| +| Word-level transcription (WhisperX) | Done | +| Text-based video editing | Done | +| Undo/redo | Done | +| Waveform timeline | Done | +| FFmpeg stream-copy export | Done | +| FFmpeg re-encode (up to 4K) | Done | +| AI filler word removal | Done | +| AI clip creation (Shorts) | Done | +| Ollama + OpenAI + Claude | Done | +| Word-level captions (SRT/VTT/ASS) | Done | +| Caption burn-in on export | Done | +| Studio Sound (DeepFilterNet) | Done | +| Keyboard shortcuts (J/K/L) | Done | +| Speaker diarization | Done | +| Virtualized transcript (react-virtuoso) | Done | +| Encrypted API key storage | Done | +| Project save/load (.cutscript) | Done | +| AI background removal | Planned | -## Usage -1. Set your base folder where video/audio recordings are stored -2. Select a recording from the dropdown (supports MP4, AVI, MOV, MKV, M4A) -3. Choose transcription and summarization models -4. Configure performance settings (GPU acceleration, caching) -5. Select export formats and compression options -6. Click "Process Recording" to start +## Keyboard Shortcuts -## Advanced Features -- **Speaker Diarization**: Identify and label different speakers in your recordings -- **Translation**: Automatically detect language and translate to multiple languages -- **Keyword Extraction**: Extract important keywords with timestamp links -- **Interactive Transcript**: Navigate through the transcript with keyword highlighting -- **GPU Acceleration**: Utilize your GPU for faster processing -- **Caching**: Save processing time by caching results +| Key | Action | +|-----|--------| +| Space | Play / Pause | +| J / K / L | Reverse / Pause / Forward | +| ← / → | Seek ±5 seconds | +| Delete | Delete selected words | +| Ctrl+Z | Undo | +| Ctrl+Shift+Z | Redo | +| Ctrl+S | Save project | +| Ctrl+E | Export | +| ? | Shortcut cheatsheet | +## API Endpoints +| Method | Endpoint | Description | +|--------|----------|-------------| +| GET | /health | Health check | +| POST | /transcribe | Transcribe video with WhisperX | +| POST | /export | Export edited video (stream copy or re-encode) | +| POST | /ai/filler-removal | Detect filler words via LLM | +| POST | /ai/create-clip | AI-suggested clips for shorts | +| GET | /ai/ollama-models | List local Ollama models | +| POST | /captions | Generate SRT/VTT/ASS captions | +| POST | /audio/clean | Noise reduction (DeepFilterNet) | +| GET | /audio/capabilities | Check audio processing availability | -## Key Improvement Areas +## License -### 1. UI Enhancements -- **Implemented:** - - Responsive layout with columns for better organization - - Expanded sidebar with categorized settings - - Custom CSS for improved button styling - - Spinner for long-running operations - - Expanded transcript view by default - -- **Additional Recommendations:** - - Add a dark mode toggle - - Implement progress bars for each processing step - - Add tooltips for complex options - - Create a dashboard view for batch processing results - - Add visualization of transcript segments with timestamps - -### 2. Ollama Local API Integration -- **Implemented:** - - Local API integration for offline summarization - - Model selection from available Ollama models - - Chunking for long texts - - Fallback to online models when Ollama fails - -- **Additional Recommendations:** - - Add temperature and other generation parameters as advanced options - - Implement streaming responses for real-time feedback - - Cache results to avoid reprocessing - - Add support for custom Ollama model creation with specific instructions - - Implement parallel processing for multiple chunks - -### 3. Subtitle Export Formats -- **Implemented:** - - SRT export with proper formatting - - ASS export with basic styling - - Multi-format export options - - Automatic segment creation from plain text - -- **Additional Recommendations:** - - Add customizable styling options for ASS subtitles - - Implement subtitle editing before export - - Add support for VTT format for web videos - - Implement subtitle timing adjustment - - Add batch export for multiple files - -### 4. Architecture and Code Quality -- **Recommendations:** - - Implement proper error handling and logging throughout - - Add unit tests for critical components - - Create a configuration file for default settings - - Implement caching for processed files - - Add type hints throughout the codebase - - Document API endpoints for potential future web service - -### 5. Performance Optimizations -- **Recommendations:** - - Implement parallel processing for batch operations - - Add GPU acceleration configuration options - - Optimize memory usage for large files - - Implement incremental processing for very long recordings - - Add compression options for exported files - -### 6. Additional Features -- **Recommendations:** - - Speaker diarization (identifying different speakers) - - Language detection and translation - - Keyword extraction and timestamp linking - - Integration with video editing software - - Batch processing queue with email notifications - - Custom vocabulary for domain-specific terminology - -## Implementation Roadmap -1. **Phase 1 (Completed):** Basic UI improvements, Ollama integration, and subtitle export -2. **Phase 2 (Completed):** Performance optimizations and additional export formats - - Added WebVTT export format for web videos - - Implemented GPU acceleration with automatic device selection - - Added caching system for faster processing of previously transcribed files - - Optimized memory usage with configurable memory limits - - Added compression options for exported files - - Enhanced ASS subtitle styling options - - Added progress indicators for better user feedback -3. **Phase 3 (Completed):** Advanced features like speaker diarization and translation - - Implemented speaker diarization to identify different speakers in recordings - - Added language detection and translation capabilities - - Integrated keyword extraction with timestamp linking - - Created interactive transcript with keyword highlighting - - Added named entity recognition for better content analysis - - Generated keyword index with timestamp references - - Provided speaker statistics and word count analysis -4. **Phase 4:** Integration with other tools and services (In progess) - - -Reach out to support@dataants.org if you need assistance with any AI solutions - we offer support for n8n workflows, local RAG chatbots, and ERP and Financial reporting. +MIT License — see [LICENSE](LICENSE) for details. diff --git a/app.py b/app.py deleted file mode 100644 index 3e71331..0000000 --- a/app.py +++ /dev/null @@ -1,767 +0,0 @@ -import streamlit as st -from utils.audio_processing import extract_audio, cleanup_temp_audio, get_video_duration -from utils.transcription import transcribe_audio -from utils.summarization import summarize_text -from utils.validation import validate_environment, get_system_capabilities -from utils.export import export_transcript -from pathlib import Path -import os -import logging -import humanize -import time -import tempfile - -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) - -try: - from utils.ollama_integration import ( - check_ollama_available, list_available_models, - chunk_and_summarize, stream_chunk_and_summarize - ) - OLLAMA_AVAILABLE = check_ollama_available() -except ImportError: - OLLAMA_AVAILABLE = False - -try: - from utils.gpu_utils import get_gpu_info, configure_gpu, optimize_for_inference - GPU_UTILS_AVAILABLE = True - optimize_for_inference() -except ImportError: - GPU_UTILS_AVAILABLE = False - -try: - from utils.cache import get_cache_size, clear_cache - CACHE_AVAILABLE = True -except ImportError: - CACHE_AVAILABLE = False - -try: - from utils.diarization import transcribe_with_diarization - DIARIZATION_AVAILABLE = True -except ImportError: - DIARIZATION_AVAILABLE = False - -try: - from utils.translation import transcribe_and_translate, get_language_name - TRANSLATION_AVAILABLE = True -except ImportError: - TRANSLATION_AVAILABLE = False - -try: - from utils.keyword_extraction import ( - extract_keywords_from_transcript, generate_keyword_index, - generate_interactive_transcript - ) - KEYWORD_EXTRACTION_AVAILABLE = True -except ImportError: - KEYWORD_EXTRACTION_AVAILABLE = False - - -def init_session_state(): - """Initialize session state with defaults for persistence across reruns.""" - defaults = { - "transcription_model": "base", - "summarization_method": "Hugging Face (Online)", - "use_diarization": False, - "use_translation": False, - "use_keywords": False, - "use_gpu": GPU_UTILS_AVAILABLE, - "use_cache": CACHE_AVAILABLE, - "memory_fraction": 0.8, - "export_formats": ["TXT"], - "compress_exports": False, - "base_folder": str(Path.home()), - "recursive_search": False, - "results": None, - "processing": False, - } - for key, val in defaults.items(): - if key not in st.session_state: - st.session_state[key] = val - - -def format_duration(seconds): - """Format seconds into MM:SS or HH:MM:SS.""" - if seconds is None: - return "Unknown" - m, s = divmod(int(seconds), 60) - h, m = divmod(m, 60) - if h > 0: - return f"{h}:{m:02d}:{s:02d}" - return f"{m}:{s:02d}" - - -def save_uploaded_file(uploaded_file): - """Save an uploaded file to a temp directory and return its path.""" - temp_dir = tempfile.mkdtemp(prefix="vt_upload_") - file_path = Path(temp_dir) / uploaded_file.name - with open(file_path, "wb") as f: - f.write(uploaded_file.getbuffer()) - return file_path - - -def render_sidebar(): - """Render the sidebar with collapsible settings groups.""" - st.sidebar.markdown("### Settings") - - # -- Model Settings (expanded by default) -- - with st.sidebar.expander("Model Settings", expanded=True): - st.session_state.transcription_model = st.selectbox( - "Whisper Model", - ["tiny", "base", "small", "medium", "large"], - index=["tiny", "base", "small", "medium", "large"].index( - st.session_state.transcription_model - ), - help="Larger models are more accurate but slower. " - "Memory: tiny ~75MB, base ~140MB, small ~460MB, medium ~1.5GB, large ~2.9GB", - key="sb_whisper_model", - ) - if st.session_state.transcription_model in ("large", "large-v2", "large-v3") and not st.session_state.get("use_gpu", False): - st.warning( - "The **large** Whisper model requires ~2.9GB of memory. " - "Without GPU, this may crash the application. Consider using " - "**medium** or smaller, or enable GPU acceleration." - ) - - summarization_options = ( - ["Hugging Face (Online)", "Ollama (Local)"] - if OLLAMA_AVAILABLE - else ["Hugging Face (Online)"] - ) - st.session_state.summarization_method = st.selectbox( - "Summarization", - summarization_options, - index=0, - help="Ollama runs locally but requires installation.", - key="sb_summarization", - ) - - ollama_model = None - if OLLAMA_AVAILABLE and st.session_state.summarization_method == "Ollama (Local)": - available_models = list_available_models() - if available_models: - ollama_model = st.selectbox( - "Ollama Model", - available_models, - index=0, - key="sb_ollama_model", - ) - else: - st.warning("No Ollama models found. Run `ollama pull `.") - - # -- Advanced Features (collapsed) -- - with st.sidebar.expander("Advanced Features"): - st.session_state.use_diarization = st.checkbox( - "Speaker Diarization", - value=st.session_state.use_diarization, - disabled=not DIARIZATION_AVAILABLE, - help="Identify different speakers in the recording.", - key="sb_diarization", - ) - - hf_token = None - num_speakers = 2 - if st.session_state.use_diarization and DIARIZATION_AVAILABLE: - hf_token = st.text_input( - "HuggingFace Token", - type="password", - help="Required for diarization. Get token at huggingface.co/settings/tokens", - key="sb_hf_token", - ) - num_speakers = st.number_input( - "Number of Speakers", min_value=1, max_value=10, value=2, - key="sb_num_speakers", - ) - - st.session_state.use_translation = st.checkbox( - "Translation", - value=st.session_state.use_translation, - disabled=not TRANSLATION_AVAILABLE, - help="Translate the transcript to another language.", - key="sb_translation", - ) - - target_lang = None - if st.session_state.use_translation and TRANSLATION_AVAILABLE: - target_lang = st.selectbox( - "Target Language", - ["en", "es", "fr", "de", "it", "pt", "nl", "ru", "zh", "ja", "ko", "ar"], - format_func=lambda x: f"{get_language_name(x)} ({x})", - key="sb_target_lang", - ) - - st.session_state.use_keywords = st.checkbox( - "Keyword Extraction", - value=st.session_state.use_keywords, - disabled=not KEYWORD_EXTRACTION_AVAILABLE, - help="Extract keywords and link them to timestamps.", - key="sb_keywords", - ) - - max_keywords = 15 - if st.session_state.use_keywords and KEYWORD_EXTRACTION_AVAILABLE: - max_keywords = st.slider( - "Max Keywords", min_value=5, max_value=30, value=15, - key="sb_max_keywords", - ) - - # -- Performance (collapsed) -- - with st.sidebar.expander("Performance"): - st.session_state.use_gpu = st.checkbox( - "GPU Acceleration", - value=st.session_state.use_gpu, - disabled=not GPU_UTILS_AVAILABLE, - help="Use GPU for faster processing if available.", - key="sb_gpu", - ) - - if GPU_UTILS_AVAILABLE and st.session_state.use_gpu: - gpu_info = get_gpu_info() - if gpu_info["cuda_available"]: - gpu_devices = [ - f"{d['name']} ({humanize.naturalsize(d['total_memory'])})" - for d in gpu_info["cuda_devices"] - ] - st.info(f"GPU: {', '.join(gpu_devices)}") - elif gpu_info["mps_available"]: - st.info("Apple Silicon GPU (MPS)") - else: - st.warning("No GPU detected. Using CPU.") - - st.session_state.memory_fraction = st.slider( - "GPU Memory %", - min_value=0.1, max_value=1.0, - value=st.session_state.memory_fraction, step=0.1, - disabled=not (GPU_UTILS_AVAILABLE and st.session_state.use_gpu), - key="sb_memory", - ) - - st.session_state.use_cache = st.checkbox( - "Cache Results", - value=st.session_state.use_cache, - disabled=not CACHE_AVAILABLE, - help="Cache transcriptions to avoid reprocessing.", - key="sb_cache", - ) - - if CACHE_AVAILABLE and st.session_state.use_cache: - cache_size, cache_files = get_cache_size() - if cache_size > 0: - st.caption(f"Cache: {humanize.naturalsize(cache_size)} ({cache_files} files)") - if st.button("Clear Cache", key="sb_clear_cache"): - cleared = clear_cache() - st.success(f"Cleared {cleared} files") - - # -- Export (collapsed) -- - with st.sidebar.expander("Export Options"): - st.session_state.export_formats = st.multiselect( - "Formats", - ["TXT", "SRT", "VTT", "ASS"], - default=st.session_state.export_formats, - key="sb_export_formats", - ) - - st.session_state.compress_exports = st.checkbox( - "Compress Exports", - value=st.session_state.compress_exports, - key="sb_compress", - ) - - compression_type = None - if st.session_state.compress_exports: - compression_type = st.radio( - "Compression", ["gzip", "zip"], index=0, - key="sb_compression_type", - ) - - ass_style = None - if "ASS" in st.session_state.export_formats: - if st.checkbox("Customize ASS Style", value=False, key="sb_ass_custom"): - ass_style = { - "fontname": st.selectbox( - "Font", - ["Arial", "Helvetica", "Times New Roman", "Courier New"], - key="sb_ass_font", - ), - "fontsize": str(st.slider("Font Size", 12, 72, 48, key="sb_ass_size")), - "alignment": st.selectbox( - "Alignment", - ["2 (Bottom Center)", "1 (Bottom Left)", "3 (Bottom Right)", "8 (Top Center)"], - key="sb_ass_align", - ).split()[0], - "bold": "-1" if st.checkbox("Bold", value=True, key="sb_ass_bold") else "0", - "italic": "-1" if st.checkbox("Italic", value=False, key="sb_ass_italic") else "0", - } - - # -- System Info (collapsed) -- - with st.sidebar.expander("System Info"): - caps = get_system_capabilities() - st.markdown(f"- **FFmpeg:** {'Installed' if caps['ffmpeg'] else 'Not found'}") - st.markdown(f"- **CUDA:** {'Available' if caps['cuda'] else 'Not available'}") - st.markdown(f"- **MPS:** {'Available' if caps['mps'] else 'Not available'}") - if caps["gpu_name"]: - st.markdown(f"- **GPU:** {caps['gpu_name']} ({humanize.naturalsize(caps['gpu_memory'])})") - st.markdown(f"- **Ollama:** {'Connected' if OLLAMA_AVAILABLE else 'Not available'}") - st.markdown(f"- **Diarization:** {'Ready' if DIARIZATION_AVAILABLE else 'Not available'}") - - return { - "ollama_model": ollama_model, - "hf_token": hf_token, - "num_speakers": num_speakers, - "target_lang": target_lang, - "max_keywords": max_keywords, - "compression_type": compression_type, - "ass_style": ass_style, - } - - -def render_file_input(): - """Render the file input section with upload + folder browse tabs.""" - upload_tab, browse_tab = st.tabs(["Upload Files", "Browse Folder"]) - - selected_file = None - - with upload_tab: - uploaded_files = st.file_uploader( - "Drag and drop your recordings here", - type=["mp4", "avi", "mov", "mkv", "m4a"], - accept_multiple_files=True, - key="file_uploader", - ) - if uploaded_files: - if len(uploaded_files) == 1: - selected_file = ("upload", uploaded_files[0]) - else: - file_names = [f.name for f in uploaded_files] - chosen = st.selectbox("Choose a recording", file_names, key="upload_select") - idx = file_names.index(chosen) - selected_file = ("upload", uploaded_files[idx]) - - with browse_tab: - col1, col2 = st.columns([4, 1]) - with col1: - st.session_state.base_folder = st.text_input( - "Folder path", - value=st.session_state.base_folder, - key="folder_input", - ) - with col2: - st.session_state.recursive_search = st.checkbox( - "Recursive", value=st.session_state.recursive_search, - key="recursive_check", - ) - - base_path = Path(st.session_state.base_folder) - env_errors = validate_environment(base_path) - if env_errors: - for error in env_errors: - st.warning(error) - else: - extensions = ["*.mp4", "*.avi", "*.mov", "*.mkv", "*.m4a"] - recordings = [] - glob_fn = base_path.rglob if st.session_state.recursive_search else base_path.glob - for ext in extensions: - recordings.extend(glob_fn(ext)) - - if recordings: - chosen = st.selectbox( - "Choose a recording", - recordings, - format_func=lambda p: str(p.relative_to(base_path)) if str(p).startswith(str(base_path)) else str(p), - key="folder_select", - ) - selected_file = ("path", chosen) - else: - st.info("No recordings found. Supported formats: MP4, AVI, MOV, MKV, M4A") - - return selected_file - - -def render_file_preview(selected_file): - """Show file metadata before processing.""" - if selected_file is None: - return - - source_type, file_ref = selected_file - - if source_type == "upload": - file_size = file_ref.size - file_name = file_ref.name - duration = None - else: - file_size = file_ref.stat().st_size - file_name = file_ref.name - duration = get_video_duration(file_ref) - - cols = st.columns(4) - cols[0].metric("File", file_name) - cols[1].metric("Size", humanize.naturalsize(file_size)) - cols[2].metric("Format", Path(file_name).suffix.upper().lstrip(".")) - cols[3].metric("Duration", format_duration(duration)) - - -def resolve_file_path(selected_file): - """Convert the selected file reference to an actual file path.""" - source_type, file_ref = selected_file - if source_type == "upload": - return save_uploaded_file(file_ref) - return file_ref - - -def process_recording(file_path, sidebar_opts): - """Run the full processing pipeline with granular status updates.""" - results = {} - start_time = time.time() - - try: - with st.status("Processing recording...", expanded=True) as status: - - # Step 1: Transcription - st.write(f"Transcribing with Whisper ({st.session_state.transcription_model} model)...") - t0 = time.time() - - if st.session_state.use_diarization and DIARIZATION_AVAILABLE and sidebar_opts["hf_token"]: - num_spk = int(sidebar_opts["num_speakers"]) if sidebar_opts["num_speakers"] > 0 else None - segments, transcript = transcribe_with_diarization( - file_path, - whisper_model=st.session_state.transcription_model, - num_speakers=num_spk, - use_gpu=st.session_state.use_gpu, - hf_token=sidebar_opts["hf_token"], - ) - results["diarized"] = True - elif st.session_state.use_translation and TRANSLATION_AVAILABLE: - st.write("Transcribing and translating...") - orig_seg, trans_seg, orig_text, trans_text = transcribe_and_translate( - file_path, - whisper_model=st.session_state.transcription_model, - target_lang=sidebar_opts["target_lang"], - use_gpu=st.session_state.use_gpu, - ) - segments = trans_seg - transcript = trans_text - results["original_text"] = orig_text - results["original_segments"] = orig_seg - results["translated"] = True - else: - segments, transcript = transcribe_audio( - file_path, - model=st.session_state.transcription_model, - use_cache=st.session_state.use_cache, - use_gpu=st.session_state.use_gpu, - memory_fraction=st.session_state.memory_fraction, - ) - - transcription_time = time.time() - t0 - st.write(f"Transcription complete ({transcription_time:.1f}s)") - - if not transcript: - status.update(label="Processing failed", state="error") - return None - - results["segments"] = segments - results["transcript"] = transcript - - # Step 2: Keyword extraction - if st.session_state.use_keywords and KEYWORD_EXTRACTION_AVAILABLE: - st.write("Extracting keywords...") - t0 = time.time() - kw_ts, ent_ts = extract_keywords_from_transcript( - transcript, segments, - max_keywords=sidebar_opts["max_keywords"], - use_gpu=st.session_state.use_gpu, - ) - results["keyword_timestamps"] = kw_ts - results["entity_timestamps"] = ent_ts - results["keyword_index"] = generate_keyword_index(kw_ts, ent_ts) - results["interactive_transcript"] = generate_interactive_transcript(segments, kw_ts, ent_ts) - st.write(f"Keywords extracted ({time.time() - t0:.1f}s)") - - # Step 3: Summarization - st.write("Generating summary...") - t0 = time.time() - - use_ollama = ( - OLLAMA_AVAILABLE - and st.session_state.summarization_method == "Ollama (Local)" - and sidebar_opts["ollama_model"] - ) - - if use_ollama: - summary = chunk_and_summarize(transcript, model=sidebar_opts["ollama_model"]) - if not summary: - st.write("Ollama failed, falling back to Hugging Face...") - summary = summarize_text( - transcript, - use_gpu=st.session_state.use_gpu, - memory_fraction=st.session_state.memory_fraction, - ) - results["ollama_streaming"] = True - else: - summary = summarize_text( - transcript, - use_gpu=st.session_state.use_gpu, - memory_fraction=st.session_state.memory_fraction, - ) - - results["summary"] = summary - st.write(f"Summary generated ({time.time() - t0:.1f}s)") - - # Cleanup temp audio files - cleanup_temp_audio() - - total_time = time.time() - start_time - results["processing_time"] = total_time - results["word_count"] = len(transcript.split()) - - status.update(label=f"Complete in {total_time:.1f}s", state="complete") - - return results - - except MemoryError as e: - st.error(str(e)) - logger.error(f"Out of memory: {e}") - return None - except Exception as e: - st.error(f"Processing error: {e}") - logger.error(f"Processing error: {e}", exc_info=True) - return None - - -def render_results(results, sidebar_opts): - """Display processing results with metrics, tabs, and export options.""" - if results is None: - st.error("Processing failed. Check logs for details.") - return - - # Metric cards - st.markdown("---") - metric_cols = st.columns(4) - metric_cols[0].metric("Words", f"{results['word_count']:,}") - metric_cols[1].metric("Segments", str(len(results.get("segments", [])))) - metric_cols[2].metric("Processing Time", f"{results['processing_time']:.1f}s") - - if results.get("diarized"): - speakers = set(seg.get("speaker", "UNKNOWN") for seg in results["segments"]) - metric_cols[3].metric("Speakers", str(len(speakers))) - elif results.get("translated"): - metric_cols[3].metric("Translated", "Yes") - else: - metric_cols[3].metric("Model", st.session_state.transcription_model.capitalize()) - - # Results tabs - tab_names = ["Summary", "Transcript", "Advanced"] - tab1, tab2, tab3 = st.tabs(tab_names) - - with tab1: - st.subheader("Summary") - if results.get("ollama_streaming") and OLLAMA_AVAILABLE and sidebar_opts["ollama_model"]: - st.write(results["summary"]) - with st.expander("Re-generate with streaming"): - if st.button("Stream Summary", key="stream_btn"): - st.write_stream( - stream_chunk_and_summarize( - results["transcript"], - model=sidebar_opts["ollama_model"], - ) - ) - else: - st.write(results["summary"]) - - if results.get("original_text"): - with st.expander("Original Language Summary"): - original_summary = summarize_text( - results["original_text"], - use_gpu=st.session_state.use_gpu, - memory_fraction=st.session_state.memory_fraction, - ) - st.write(original_summary) - - with tab2: - st.subheader("Full Transcript") - - if results.get("interactive_transcript"): - st.markdown(results["interactive_transcript"], unsafe_allow_html=True) - else: - st.markdown( - f"
{_format_segments_html(results['segments'])}
", - unsafe_allow_html=True, - ) - - st.download_button( - "Copy Transcript (Download TXT)", - data=results["transcript"], - file_name="transcript.txt", - mime="text/plain", - key="copy_transcript", - ) - - if results.get("original_text"): - with st.expander("Original Language Transcript"): - st.text(results["original_text"]) - - with tab3: - if results.get("keyword_index"): - st.subheader("Keyword Index") - st.markdown(results["keyword_index"]) - - if results.get("diarized"): - st.subheader("Speaker Information") - speakers = set(seg.get("speaker", "UNKNOWN") for seg in results["segments"]) - st.write(f"Detected {len(speakers)} speakers: {', '.join(speakers)}") - - speaker_words = {} - for seg in results["segments"]: - spk = seg.get("speaker", "UNKNOWN") - speaker_words[spk] = speaker_words.get(spk, 0) + len(seg["text"].split()) - - for spk, words in speaker_words.items(): - st.write(f"- **{spk}**: {words} words") - - # Export section - export_formats = st.session_state.export_formats - if export_formats: - st.markdown("---") - st.subheader("Export") - export_cols = st.columns(len(export_formats)) - - output_base = Path(results.get("file_name", "transcript")).stem - - for i, fmt in enumerate(export_formats): - with export_cols[i]: - if fmt == "TXT": - st.download_button( - label=f"Download {fmt}", - data=results["transcript"], - file_name=f"{output_base}_transcript.txt", - mime="text/plain", - key=f"dl_{fmt}", - ) - elif fmt in ["SRT", "VTT", "ASS"]: - output_path = export_transcript( - results["transcript"], - output_base, - fmt.lower(), - segments=results["segments"], - compress=st.session_state.compress_exports, - compression_type=sidebar_opts["compression_type"], - style=sidebar_opts["ass_style"] if fmt == "ASS" else None, - ) - - with open(output_path, "rb") as f: - content = f.read() - - file_ext = f".{fmt.lower()}" - if st.session_state.compress_exports: - file_ext += ".gz" if sidebar_opts["compression_type"] == "gzip" else ".zip" - - st.download_button( - label=f"Download {fmt}", - data=content, - file_name=f"{output_base}{file_ext}", - mime="application/octet-stream", - key=f"dl_{fmt}", - ) - - try: - os.remove(output_path) - except OSError: - pass - - -def _format_segments_html(segments): - """Format transcript segments as HTML with timestamps.""" - if not segments: - return "

No segments available.

" - - lines = [] - for seg in segments: - start = seg.get("start", 0) - ts = f"{int(start // 60):02d}:{int(start % 60):02d}" - speaker = seg.get("speaker", "") - speaker_html = f"[{speaker}] " if speaker else "" - text = seg.get("text", "").strip() - lines.append( - f"

" - f"{ts}" - f"{speaker_html}{text}

" - ) - return "\n".join(lines) - - -def main(): - st.set_page_config( - page_title="Video Transcriber", - page_icon="🎬", - layout="wide", - initial_sidebar_state="expanded", - ) - - st.markdown(""" - - """, unsafe_allow_html=True) - - init_session_state() - - st.title("Video Transcriber") - st.caption("AI-powered transcription, summarization, and analysis for video and audio recordings") - - sidebar_opts = render_sidebar() - - # FFmpeg check - ffmpeg_errors = validate_environment() - if ffmpeg_errors: - for err in ffmpeg_errors: - st.warning(err) - - selected_file = render_file_input() - - if selected_file: - render_file_preview(selected_file) - - st.markdown("") - if st.button("Start Processing", type="primary", use_container_width=True): - file_path = resolve_file_path(selected_file) - - results = process_recording(file_path, sidebar_opts) - - if results: - source_type, file_ref = selected_file - results["file_name"] = file_ref.name if source_type == "upload" else file_ref.name - st.session_state.results = results - st.toast("Processing complete!", icon="✅") - - # Clean up uploaded temp files - if selected_file[0] == "upload": - try: - os.remove(file_path) - os.rmdir(file_path.parent) - except OSError: - pass - - # Show persisted results from session state - if st.session_state.results: - render_results(st.session_state.results, sidebar_opts) - - -if __name__ == "__main__": - main() diff --git a/backend/main.py b/backend/main.py new file mode 100644 index 0000000..7954733 --- /dev/null +++ b/backend/main.py @@ -0,0 +1,117 @@ +import logging +import os +import stat +from contextlib import asynccontextmanager +from pathlib import Path + +from fastapi import FastAPI, Query, Request, HTTPException +from fastapi.middleware.cors import CORSMiddleware +from fastapi.responses import StreamingResponse + +from routers import transcribe, export, ai, captions, audio + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + + +@asynccontextmanager +async def lifespan(app: FastAPI): + logger.info("AI Video Editor backend starting up") + yield + logger.info("AI Video Editor backend shutting down") + + +app = FastAPI( + title="AI Video Editor Backend", + version="0.1.0", + lifespan=lifespan, +) + +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], + expose_headers=["Content-Range", "Accept-Ranges", "Content-Length"], +) + +app.include_router(transcribe.router) +app.include_router(export.router) +app.include_router(ai.router) +app.include_router(captions.router) +app.include_router(audio.router) + + +MIME_MAP = { + ".mp4": "video/mp4", + ".mkv": "video/x-matroska", + ".mov": "video/quicktime", + ".avi": "video/x-msvideo", + ".webm": "video/webm", + ".m4a": "audio/mp4", + ".wav": "audio/wav", + ".mp3": "audio/mpeg", + ".flac": "audio/flac", +} + + +@app.get("/file") +async def serve_local_file(request: Request, path: str = Query(...)): + """Stream a local file with HTTP Range support (required for video seeking).""" + file_path = Path(path) + if not file_path.is_file(): + raise HTTPException(status_code=404, detail=f"File not found: {path}") + + file_size = file_path.stat().st_size + content_type = MIME_MAP.get(file_path.suffix.lower(), "application/octet-stream") + + range_header = request.headers.get("range") + if range_header: + range_spec = range_header.replace("bytes=", "") + range_start_str, range_end_str = range_spec.split("-") + range_start = int(range_start_str) if range_start_str else 0 + range_end = int(range_end_str) if range_end_str else file_size - 1 + range_end = min(range_end, file_size - 1) + content_length = range_end - range_start + 1 + + def iter_range(): + with open(file_path, "rb") as f: + f.seek(range_start) + remaining = content_length + while remaining > 0: + chunk = f.read(min(65536, remaining)) + if not chunk: + break + remaining -= len(chunk) + yield chunk + + return StreamingResponse( + iter_range(), + status_code=206, + media_type=content_type, + headers={ + "Content-Range": f"bytes {range_start}-{range_end}/{file_size}", + "Accept-Ranges": "bytes", + "Content-Length": str(content_length), + }, + ) + + def iter_file(): + with open(file_path, "rb") as f: + while chunk := f.read(65536): + yield chunk + + return StreamingResponse( + iter_file(), + media_type=content_type, + headers={ + "Accept-Ranges": "bytes", + "Content-Length": str(file_size), + }, + ) + + +@app.get("/health") +async def health(): + return {"status": "ok"} diff --git a/backend/requirements.txt b/backend/requirements.txt new file mode 100644 index 0000000..b31aa6d --- /dev/null +++ b/backend/requirements.txt @@ -0,0 +1,33 @@ +# FastAPI backend +fastapi>=0.115.0 +uvicorn[standard]>=0.32.0 +websockets>=14.0 +python-multipart>=0.0.12 + +# Transcription (WhisperX for word-level alignment) +whisperx>=3.1.0 +faster-whisper>=1.0.0 + +# Audio / Video processing +moviepy>=1.0.3 +ffmpeg-python>=0.2.0 +soundfile>=0.10.3 + +# ML / GPU +torch>=2.0.0 +torchaudio>=2.0.0 +numpy>=1.24.0 + +# Speaker diarization +pyannote.audio>=3.1.1 + +# AI providers +openai>=1.50.0 +anthropic>=0.39.0 +requests>=2.28.0 + +# Audio cleanup +deepfilternet>=0.5.0 + +# Utilities +pydantic>=2.0.0 diff --git a/backend/routers/__init__.py b/backend/routers/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/backend/routers/ai.py b/backend/routers/ai.py new file mode 100644 index 0000000..96bd09e --- /dev/null +++ b/backend/routers/ai.py @@ -0,0 +1,83 @@ +"""AI feature endpoints: filler word detection, clip creation, Ollama model listing.""" + +import logging +from typing import List, Optional + +from fastapi import APIRouter, HTTPException +from pydantic import BaseModel + +from services.ai_provider import AIProvider, detect_filler_words, create_clip_suggestion + +logger = logging.getLogger(__name__) +router = APIRouter() + + +class WordInfo(BaseModel): + index: int + word: str + start: Optional[float] = None + end: Optional[float] = None + + +class FillerRequest(BaseModel): + transcript: str + words: List[WordInfo] + provider: str = "ollama" + model: Optional[str] = None + api_key: Optional[str] = None + base_url: Optional[str] = None + custom_filler_words: Optional[str] = None + + +class ClipRequest(BaseModel): + transcript: str + words: List[WordInfo] + provider: str = "ollama" + model: Optional[str] = None + api_key: Optional[str] = None + base_url: Optional[str] = None + target_duration: int = 60 + + +@router.post("/ai/filler-removal") +async def filler_removal(req: FillerRequest): + try: + words_dicts = [w.model_dump() for w in req.words] + result = detect_filler_words( + transcript=req.transcript, + words=words_dicts, + provider=req.provider, + model=req.model, + api_key=req.api_key, + base_url=req.base_url, + custom_filler_words=req.custom_filler_words, + ) + return result + except Exception as e: + logger.error(f"Filler detection failed: {e}", exc_info=True) + raise HTTPException(status_code=500, detail=str(e)) + + +@router.post("/ai/create-clip") +async def create_clip(req: ClipRequest): + try: + words_dicts = [w.model_dump() for w in req.words] + result = create_clip_suggestion( + transcript=req.transcript, + words=words_dicts, + target_duration=req.target_duration, + provider=req.provider, + model=req.model, + api_key=req.api_key, + base_url=req.base_url, + ) + return result + except Exception as e: + logger.error(f"Clip creation failed: {e}", exc_info=True) + raise HTTPException(status_code=500, detail=str(e)) + + +@router.get("/ai/ollama-models") +async def ollama_models(base_url: str = "http://localhost:11434"): + models = AIProvider.list_ollama_models(base_url) + return {"models": models} diff --git a/backend/routers/audio.py b/backend/routers/audio.py new file mode 100644 index 0000000..5e42f94 --- /dev/null +++ b/backend/routers/audio.py @@ -0,0 +1,38 @@ +"""Audio processing endpoint (noise reduction / Studio Sound).""" + +import logging +from typing import Optional + +from fastapi import APIRouter, HTTPException +from pydantic import BaseModel + +from services.audio_cleaner import clean_audio, is_deepfilter_available + +logger = logging.getLogger(__name__) +router = APIRouter() + + +class AudioCleanRequest(BaseModel): + input_path: str + output_path: Optional[str] = None + + +@router.post("/audio/clean") +async def clean_audio_endpoint(req: AudioCleanRequest): + try: + output = clean_audio(req.input_path, req.output_path or "") + return { + "status": "ok", + "output_path": output, + "engine": "deepfilternet" if is_deepfilter_available() else "ffmpeg_anlmdn", + } + except Exception as e: + logger.error(f"Audio cleaning failed: {e}", exc_info=True) + raise HTTPException(status_code=500, detail=str(e)) + + +@router.get("/audio/capabilities") +async def audio_capabilities(): + return { + "deepfilternet_available": is_deepfilter_available(), + } diff --git a/backend/routers/captions.py b/backend/routers/captions.py new file mode 100644 index 0000000..5650dfd --- /dev/null +++ b/backend/routers/captions.py @@ -0,0 +1,65 @@ +"""Caption generation endpoint.""" + +import logging +from typing import List, Optional + +from fastapi import APIRouter, HTTPException +from fastapi.responses import PlainTextResponse +from pydantic import BaseModel + +from services.caption_generator import generate_srt, generate_vtt, generate_ass, save_captions + +logger = logging.getLogger(__name__) +router = APIRouter() + + +class CaptionWord(BaseModel): + word: str + start: float + end: float + confidence: float = 0.0 + + +class CaptionStyle(BaseModel): + fontName: str = "Arial" + fontSize: int = 48 + fontColor: str = "&H00FFFFFF" + backgroundColor: str = "&H80000000" + position: str = "bottom" + bold: bool = True + + +class CaptionRequest(BaseModel): + words: List[CaptionWord] + deleted_indices: List[int] = [] + format: str = "srt" + words_per_line: int = 8 + style: Optional[CaptionStyle] = None + output_path: Optional[str] = None + + +@router.post("/captions") +async def generate_captions(req: CaptionRequest): + try: + words_dicts = [w.model_dump() for w in req.words] + deleted_set = set(req.deleted_indices) + + if req.format == "srt": + content = generate_srt(words_dicts, deleted_set, req.words_per_line) + elif req.format == "vtt": + content = generate_vtt(words_dicts, deleted_set, req.words_per_line) + elif req.format == "ass": + style_dict = req.style.model_dump() if req.style else None + content = generate_ass(words_dicts, deleted_set, req.words_per_line, style_dict) + else: + raise HTTPException(status_code=400, detail=f"Unknown format: {req.format}") + + if req.output_path: + saved = save_captions(content, req.output_path) + return {"status": "ok", "output_path": saved} + + return PlainTextResponse(content, media_type="text/plain") + + except Exception as e: + logger.error(f"Caption generation failed: {e}", exc_info=True) + raise HTTPException(status_code=500, detail=str(e)) diff --git a/backend/routers/export.py b/backend/routers/export.py new file mode 100644 index 0000000..0aa623b --- /dev/null +++ b/backend/routers/export.py @@ -0,0 +1,156 @@ +"""Export endpoint for video cutting and rendering.""" + +import logging +import tempfile +import os +from typing import List, Optional + +from fastapi import APIRouter, HTTPException +from pydantic import BaseModel + +from services.video_editor import export_stream_copy, export_reencode, export_reencode_with_subs +from services.audio_cleaner import clean_audio +from services.caption_generator import generate_srt, generate_ass, save_captions + +logger = logging.getLogger(__name__) +router = APIRouter() + + +class SegmentModel(BaseModel): + start: float + end: float + + +class ExportWordModel(BaseModel): + word: str + start: float + end: float + confidence: float = 0.0 + + +class ExportRequest(BaseModel): + input_path: str + output_path: str + keep_segments: List[SegmentModel] + mode: str = "fast" + resolution: str = "1080p" + format: str = "mp4" + enhanceAudio: bool = False + captions: str = "none" + words: Optional[List[ExportWordModel]] = None + deleted_indices: Optional[List[int]] = None + + +def _mux_audio(video_path: str, audio_path: str, output_path: str) -> str: + """Replace video's audio track with cleaned audio using FFmpeg.""" + import subprocess + cmd = [ + "ffmpeg", "-y", + "-i", video_path, + "-i", audio_path, + "-c:v", "copy", + "-map", "0:v:0", + "-map", "1:a:0", + "-shortest", + output_path, + ] + result = subprocess.run(cmd, capture_output=True, text=True) + if result.returncode != 0: + raise RuntimeError(f"Audio mux failed: {result.stderr[-300:]}") + return output_path + + +@router.post("/export") +async def export_video(req: ExportRequest): + try: + segments = [{"start": s.start, "end": s.end} for s in req.keep_segments] + + if not segments: + raise HTTPException(status_code=400, detail="No segments to export") + + use_stream_copy = req.mode == "fast" and len(segments) == 1 + needs_reencode_for_subs = req.captions == "burn-in" + + # Burn-in captions require re-encode + if needs_reencode_for_subs: + use_stream_copy = False + + words_dicts = [w.model_dump() for w in req.words] if req.words else [] + deleted_set = set(req.deleted_indices or []) + + # Generate ASS file for burn-in + ass_path = None + if req.captions == "burn-in" and words_dicts: + ass_content = generate_ass(words_dicts, deleted_set) + tmp = tempfile.NamedTemporaryFile(suffix=".ass", delete=False, mode="w", encoding="utf-8") + tmp.write(ass_content) + tmp.close() + ass_path = tmp.name + + try: + if use_stream_copy: + output = export_stream_copy(req.input_path, req.output_path, segments) + elif ass_path: + output = export_reencode_with_subs( + req.input_path, + req.output_path, + segments, + ass_path, + resolution=req.resolution, + format_hint=req.format, + ) + else: + output = export_reencode( + req.input_path, + req.output_path, + segments, + resolution=req.resolution, + format_hint=req.format, + ) + finally: + if ass_path and os.path.exists(ass_path): + os.unlink(ass_path) + + # Audio enhancement: clean, then mux back into the exported video + if req.enhanceAudio: + try: + tmp_dir = tempfile.mkdtemp(prefix="cutscript_audio_") + cleaned_audio = os.path.join(tmp_dir, "cleaned.wav") + clean_audio(output, cleaned_audio) + + muxed_path = output + ".muxed.mp4" + _mux_audio(output, cleaned_audio, muxed_path) + + os.replace(muxed_path, output) + logger.info(f"Audio enhanced and muxed into {output}") + + # Cleanup + try: + os.remove(cleaned_audio) + os.rmdir(tmp_dir) + except OSError: + pass + except Exception as e: + logger.warning(f"Audio enhancement failed (non-fatal): {e}") + + # Sidecar SRT: generate and save alongside video + srt_path = None + if req.captions == "sidecar" and words_dicts: + srt_content = generate_srt(words_dicts, deleted_set) + srt_path = req.output_path.rsplit(".", 1)[0] + ".srt" + save_captions(srt_content, srt_path) + logger.info(f"Sidecar SRT saved to {srt_path}") + + result = {"status": "ok", "output_path": output} + if srt_path: + result["srt_path"] = srt_path + return result + + except ValueError as e: + raise HTTPException(status_code=400, detail=str(e)) + except RuntimeError as e: + logger.error(f"Export failed: {e}", exc_info=True) + raise HTTPException(status_code=500, detail=str(e)) + except Exception as e: + logger.error(f"Export error: {e}", exc_info=True) + raise HTTPException(status_code=500, detail=str(e)) diff --git a/backend/routers/transcribe.py b/backend/routers/transcribe.py new file mode 100644 index 0000000..c03c209 --- /dev/null +++ b/backend/routers/transcribe.py @@ -0,0 +1,53 @@ +"""Transcription endpoint using WhisperX.""" + +import logging +from typing import Optional + +from fastapi import APIRouter, HTTPException +from pydantic import BaseModel + +from services.transcription import transcribe_audio +from services.diarization import diarize_and_label + +logger = logging.getLogger(__name__) +router = APIRouter() + + +class TranscribeRequest(BaseModel): + file_path: str + model: str = "base" + language: Optional[str] = None + use_gpu: bool = True + use_cache: bool = True + diarize: bool = False + hf_token: Optional[str] = None + num_speakers: Optional[int] = None + + +@router.post("/transcribe") +async def transcribe(req: TranscribeRequest): + try: + result = transcribe_audio( + file_path=req.file_path, + model_name=req.model, + use_gpu=req.use_gpu, + use_cache=req.use_cache, + language=req.language, + ) + + if req.diarize and req.hf_token: + result = diarize_and_label( + transcription_result=result, + audio_path=req.file_path, + hf_token=req.hf_token, + num_speakers=req.num_speakers, + use_gpu=req.use_gpu, + ) + + return result + + except FileNotFoundError: + raise HTTPException(status_code=404, detail=f"File not found: {req.file_path}") + except Exception as e: + logger.error(f"Transcription failed: {e}", exc_info=True) + raise HTTPException(status_code=500, detail=str(e)) diff --git a/backend/services/__init__.py b/backend/services/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/backend/services/ai_provider.py b/backend/services/ai_provider.py new file mode 100644 index 0000000..6e6ca9f --- /dev/null +++ b/backend/services/ai_provider.py @@ -0,0 +1,211 @@ +""" +Unified AI provider interface for Ollama, OpenAI, and Claude. +""" + +import json +import logging +from typing import Optional, List + +import requests + +logger = logging.getLogger(__name__) + + +class AIProvider: + """Routes completion requests to the configured provider.""" + + @staticmethod + def complete( + prompt: str, + provider: str = "ollama", + model: Optional[str] = None, + api_key: Optional[str] = None, + base_url: Optional[str] = None, + system_prompt: Optional[str] = None, + temperature: float = 0.3, + ) -> str: + if provider == "ollama": + return _ollama_complete(prompt, model or "llama3", base_url or "http://localhost:11434", system_prompt, temperature) + elif provider == "openai": + return _openai_complete(prompt, model or "gpt-4o", api_key or "", system_prompt, temperature) + elif provider == "claude": + return _claude_complete(prompt, model or "claude-sonnet-4-20250514", api_key or "", system_prompt, temperature) + else: + raise ValueError(f"Unknown provider: {provider}") + + @staticmethod + def list_ollama_models(base_url: str = "http://localhost:11434") -> List[str]: + try: + resp = requests.get(f"{base_url}/api/tags", timeout=3) + if resp.status_code == 200: + return [m["name"] for m in resp.json().get("models", [])] + except Exception: + pass + return [] + + +def _ollama_complete(prompt: str, model: str, base_url: str, system_prompt: Optional[str], temperature: float) -> str: + body = { + "model": model, + "prompt": prompt, + "stream": False, + "options": {"temperature": temperature}, + } + if system_prompt: + body["system"] = system_prompt + + try: + resp = requests.post(f"{base_url}/api/generate", json=body, timeout=120) + resp.raise_for_status() + return resp.json().get("response", "").strip() + except Exception as e: + logger.error(f"Ollama error: {e}") + raise + + +def _openai_complete(prompt: str, model: str, api_key: str, system_prompt: Optional[str], temperature: float) -> str: + try: + from openai import OpenAI + client = OpenAI(api_key=api_key) + messages = [] + if system_prompt: + messages.append({"role": "system", "content": system_prompt}) + messages.append({"role": "user", "content": prompt}) + + response = client.chat.completions.create( + model=model, + messages=messages, + temperature=temperature, + ) + return response.choices[0].message.content.strip() + except Exception as e: + logger.error(f"OpenAI error: {e}") + raise + + +def _claude_complete(prompt: str, model: str, api_key: str, system_prompt: Optional[str], temperature: float) -> str: + try: + import anthropic + client = anthropic.Anthropic(api_key=api_key) + kwargs = { + "model": model, + "max_tokens": 4096, + "temperature": temperature, + "messages": [{"role": "user", "content": prompt}], + } + if system_prompt: + kwargs["system"] = system_prompt + + response = client.messages.create(**kwargs) + return response.content[0].text.strip() + except Exception as e: + logger.error(f"Claude error: {e}") + raise + + +def detect_filler_words( + transcript: str, + words: List[dict], + provider: str = "ollama", + model: Optional[str] = None, + api_key: Optional[str] = None, + base_url: Optional[str] = None, + custom_filler_words: Optional[str] = None, +) -> dict: + """ + Use an LLM to identify filler words in the transcript. + Returns {"wordIndices": [...], "fillerWords": [{"index": N, "word": "...", "reason": "..."}]} + """ + word_list = "\n".join(f"{w['index']}: {w['word']}" for w in words) + + custom_line = "" + if custom_filler_words and custom_filler_words.strip(): + custom_line = f"\n\nAdditionally, flag these user-specified filler words/phrases: {custom_filler_words.strip()}" + + prompt = f"""Analyze this transcript for filler words and verbal hesitations. + +Filler words include: um, uh, uh huh, hmm, like (when used as filler), you know, so (when starting sentences unnecessarily), basically, actually, literally, right, I mean, kind of, sort of, well (when used as filler). + +Also flag repeated words that indicate stammering (e.g., "I I I" or "the the").{custom_line} + +Here are the words with their indices: +{word_list} + +Return ONLY a valid JSON object with this exact structure: +{{"wordIndices": [list of integer indices to remove], "fillerWords": [{{"index": integer, "word": "the word", "reason": "brief reason"}}]}} + +Be conservative -- only flag clear filler words, not words that are part of meaningful sentences.""" + + system = "You are a precise text analysis tool. Return only valid JSON, no explanation." + + result_text = AIProvider.complete( + prompt=prompt, + provider=provider, + model=model, + api_key=api_key, + base_url=base_url, + system_prompt=system, + temperature=0.1, + ) + + try: + start = result_text.find("{") + end = result_text.rfind("}") + 1 + if start >= 0 and end > start: + return json.loads(result_text[start:end]) + except json.JSONDecodeError: + logger.error(f"Failed to parse AI response as JSON: {result_text[:200]}") + + return {"wordIndices": [], "fillerWords": []} + + +def create_clip_suggestion( + transcript: str, + words: List[dict], + target_duration: int = 60, + provider: str = "ollama", + model: Optional[str] = None, + api_key: Optional[str] = None, + base_url: Optional[str] = None, +) -> dict: + """ + Use an LLM to find the best clip segments in a transcript. + """ + word_list = "\n".join( + f"{w['index']}: \"{w['word']}\" ({w.get('start', 0):.1f}s - {w.get('end', 0):.1f}s)" + for w in words + ) + + prompt = f"""Analyze this transcript and find the most engaging {target_duration}-second segment(s) that would work well as a YouTube Short or social media clip. + +Look for: compelling stories, surprising facts, emotional moments, clear explanations, humor, or quotable statements. + +Words with indices and timestamps: +{word_list} + +Return ONLY a valid JSON object: +{{"clips": [{{"title": "short catchy title", "startWordIndex": integer, "endWordIndex": integer, "startTime": float, "endTime": float, "reason": "why this segment is engaging"}}]}} + +Suggest 1-3 clips, each approximately {target_duration} seconds long.""" + + system = "You are a viral content expert. Return only valid JSON, no explanation." + + result_text = AIProvider.complete( + prompt=prompt, + provider=provider, + model=model, + api_key=api_key, + base_url=base_url, + system_prompt=system, + temperature=0.5, + ) + + try: + start = result_text.find("{") + end = result_text.rfind("}") + 1 + if start >= 0 and end > start: + return json.loads(result_text[start:end]) + except json.JSONDecodeError: + logger.error(f"Failed to parse clip suggestions: {result_text[:200]}") + + return {"clips": []} diff --git a/backend/services/audio_cleaner.py b/backend/services/audio_cleaner.py new file mode 100644 index 0000000..6e708d7 --- /dev/null +++ b/backend/services/audio_cleaner.py @@ -0,0 +1,79 @@ +""" +Audio noise reduction using DeepFilterNet. +Falls back to a basic FFmpeg noise filter if DeepFilterNet is not installed. +""" + +import logging +import subprocess +import tempfile +from pathlib import Path + +logger = logging.getLogger(__name__) + +try: + from df.enhance import enhance, init_df, load_audio, save_audio + DEEPFILTER_AVAILABLE = True +except ImportError: + DEEPFILTER_AVAILABLE = False + + +_df_model = None +_df_state = None + + +def _init_deepfilter(): + global _df_model, _df_state + if _df_model is None: + logger.info("Initializing DeepFilterNet model") + _df_model, _df_state, _ = init_df() + return _df_model, _df_state + + +def clean_audio( + input_path: str, + output_path: str = "", +) -> str: + """ + Apply noise reduction to an audio file. + + If DeepFilterNet is available, uses it for high-quality results. + Otherwise falls back to FFmpeg's anlmdn filter. + + Returns: path to the cleaned audio file. + """ + input_path = Path(input_path) + if not output_path: + output_path = str(input_path.with_stem(input_path.stem + "_clean")) + + if DEEPFILTER_AVAILABLE: + return _clean_with_deepfilter(str(input_path), output_path) + else: + return _clean_with_ffmpeg(str(input_path), output_path) + + +def _clean_with_deepfilter(input_path: str, output_path: str) -> str: + model, state = _init_deepfilter() + audio, info = load_audio(input_path, sr=state.sr()) + enhanced = enhance(model, state, audio) + save_audio(output_path, enhanced, sr=state.sr()) + logger.info(f"DeepFilterNet cleaned audio saved to {output_path}") + return output_path + + +def _clean_with_ffmpeg(input_path: str, output_path: str) -> str: + """Fallback: basic noise reduction using FFmpeg's anlmdn filter.""" + cmd = [ + "ffmpeg", "-y", + "-i", input_path, + "-af", "anlmdn=s=7:p=0.002:r=0.002:m=15", + output_path, + ] + result = subprocess.run(cmd, capture_output=True, text=True) + if result.returncode != 0: + raise RuntimeError(f"FFmpeg audio cleaning failed: {result.stderr[-300:]}") + logger.info(f"FFmpeg cleaned audio saved to {output_path}") + return output_path + + +def is_deepfilter_available() -> bool: + return DEEPFILTER_AVAILABLE diff --git a/backend/services/background_removal.py b/backend/services/background_removal.py new file mode 100644 index 0000000..2f2b4cb --- /dev/null +++ b/backend/services/background_removal.py @@ -0,0 +1,59 @@ +""" +AI background removal (Phase 5 - future). +Uses MediaPipe or Robust Video Matting for person segmentation. +Export-only -- no real-time preview. +""" + +import logging + +logger = logging.getLogger(__name__) + +# Placeholder for Phase 5 implementation +# Will use mediapipe or rvm for segmentation at export time + +MEDIAPIPE_AVAILABLE = False +RVM_AVAILABLE = False + +try: + import mediapipe as mp + MEDIAPIPE_AVAILABLE = True +except ImportError: + pass + +try: + pass # rvm import would go here +except ImportError: + pass + + +def is_available() -> bool: + return MEDIAPIPE_AVAILABLE or RVM_AVAILABLE + + +def remove_background_on_export( + input_path: str, + output_path: str, + replacement: str = "blur", + replacement_value: str = "", +) -> str: + """ + Process video frame-by-frame to remove/replace background. + Only runs during export (not real-time). + + Args: + input_path: source video + output_path: destination + replacement: 'blur', 'color', 'image', or 'video' + replacement_value: hex color, image path, or video path + + Returns: + output_path + """ + if not is_available(): + raise RuntimeError( + "Background removal requires mediapipe or robust-video-matting. " + "Install with: pip install mediapipe" + ) + + # Phase 5 implementation will go here + raise NotImplementedError("Background removal is planned for Phase 5") diff --git a/backend/services/caption_generator.py b/backend/services/caption_generator.py new file mode 100644 index 0000000..219adc1 --- /dev/null +++ b/backend/services/caption_generator.py @@ -0,0 +1,148 @@ +""" +Generate caption files (SRT, VTT, ASS) from word-level timestamps. +""" + +import logging +from pathlib import Path +from typing import List, Optional + +logger = logging.getLogger(__name__) + + +def _format_srt_time(seconds: float) -> str: + h = int(seconds // 3600) + m = int((seconds % 3600) // 60) + s = int(seconds % 60) + ms = int((seconds % 1) * 1000) + return f"{h:02d}:{m:02d}:{s:02d},{ms:03d}" + + +def _format_vtt_time(seconds: float) -> str: + h = int(seconds // 3600) + m = int((seconds % 3600) // 60) + s = int(seconds % 60) + ms = int((seconds % 1) * 1000) + return f"{h:02d}:{m:02d}:{s:02d}.{ms:03d}" + + +def _format_ass_time(seconds: float) -> str: + h = int(seconds // 3600) + m = int((seconds % 3600) // 60) + s = int(seconds % 60) + cs = int((seconds % 1) * 100) + return f"{h}:{m:02d}:{s:02d}.{cs:02d}" + + +def generate_srt( + words: List[dict], + deleted_indices: Optional[set] = None, + words_per_line: int = 8, +) -> str: + """Generate SRT caption content from word-level timestamps.""" + deleted_indices = deleted_indices or set() + active_words = [(i, w) for i, w in enumerate(words) if i not in deleted_indices] + + lines = [] + counter = 1 + for chunk_start in range(0, len(active_words), words_per_line): + chunk = active_words[chunk_start:chunk_start + words_per_line] + if not chunk: + continue + + start_time = chunk[0][1]["start"] + end_time = chunk[-1][1]["end"] + text = " ".join(w["word"] for _, w in chunk) + + lines.append(str(counter)) + lines.append(f"{_format_srt_time(start_time)} --> {_format_srt_time(end_time)}") + lines.append(text) + lines.append("") + counter += 1 + + return "\n".join(lines) + + +def generate_vtt( + words: List[dict], + deleted_indices: Optional[set] = None, + words_per_line: int = 8, +) -> str: + """Generate WebVTT caption content.""" + deleted_indices = deleted_indices or set() + active_words = [(i, w) for i, w in enumerate(words) if i not in deleted_indices] + + lines = ["WEBVTT", ""] + for chunk_start in range(0, len(active_words), words_per_line): + chunk = active_words[chunk_start:chunk_start + words_per_line] + if not chunk: + continue + + start_time = chunk[0][1]["start"] + end_time = chunk[-1][1]["end"] + text = " ".join(w["word"] for _, w in chunk) + + lines.append(f"{_format_vtt_time(start_time)} --> {_format_vtt_time(end_time)}") + lines.append(text) + lines.append("") + + return "\n".join(lines) + + +def generate_ass( + words: List[dict], + deleted_indices: Optional[set] = None, + words_per_line: int = 8, + style: Optional[dict] = None, +) -> str: + """Generate ASS subtitle content with styling.""" + deleted_indices = deleted_indices or set() + active_words = [(i, w) for i, w in enumerate(words) if i not in deleted_indices] + + s = style or {} + font = s.get("fontName", "Arial") + size = s.get("fontSize", 48) + color = s.get("fontColor", "&H00FFFFFF") + bold = "-1" if s.get("bold", True) else "0" + alignment = 2 + + header = f"""[Script Info] +Title: AI Video Editor Captions +ScriptType: v4.00+ +PlayResX: 1920 +PlayResY: 1080 + +[V4+ Styles] +Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding +Style: Default,{font},{size},{color},&H000000FF,&H00000000,&H80000000,{bold},0,0,0,100,100,0,0,1,2,1,{alignment},20,20,40,1 + +[Events] +Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text +""" + + events = [] + for chunk_start in range(0, len(active_words), words_per_line): + chunk = active_words[chunk_start:chunk_start + words_per_line] + if not chunk: + continue + + start_time = chunk[0][1]["start"] + end_time = chunk[-1][1]["end"] + text = " ".join(w["word"] for _, w in chunk) + + events.append( + f"Dialogue: 0,{_format_ass_time(start_time)},{_format_ass_time(end_time)},Default,,0,0,0,,{text}" + ) + + return header + "\n".join(events) + "\n" + + +def save_captions( + content: str, + output_path: str, +) -> str: + """Write caption content to a file.""" + output_path = Path(output_path) + output_path.parent.mkdir(parents=True, exist_ok=True) + output_path.write_text(content, encoding="utf-8") + logger.info(f"Saved captions to {output_path}") + return str(output_path) diff --git a/backend/services/diarization.py b/backend/services/diarization.py new file mode 100644 index 0000000..194c640 --- /dev/null +++ b/backend/services/diarization.py @@ -0,0 +1,98 @@ +""" +Speaker diarization service using pyannote.audio. +Refactored from the original repo -- removed Streamlit dependency. +""" + +import logging +import os +from pathlib import Path +from typing import Optional + +import torch + +from utils.gpu_utils import get_optimal_device + +logger = logging.getLogger(__name__) + +_pipeline_cache = {} + + +def _get_pipeline(hf_token: str, device: torch.device): + cache_key = str(device) + if cache_key in _pipeline_cache: + return _pipeline_cache[cache_key] + + try: + from pyannote.audio import Pipeline + + pipeline = Pipeline.from_pretrained( + "pyannote/speaker-diarization-3.0", + use_auth_token=hf_token, + ) + if device.type == "cuda": + pipeline = pipeline.to(device) + + _pipeline_cache[cache_key] = pipeline + return pipeline + except Exception as e: + logger.error(f"Failed to load diarization pipeline: {e}") + return None + + +def diarize_and_label( + transcription_result: dict, + audio_path: str, + hf_token: Optional[str] = None, + num_speakers: Optional[int] = None, + use_gpu: bool = True, +) -> dict: + """ + Apply speaker diarization to an existing transcription result. + Adds 'speaker' field to each word and segment. + + Returns the mutated transcription_result with speaker labels. + """ + hf_token = hf_token or os.environ.get("HF_TOKEN") + if not hf_token: + logger.warning("No HuggingFace token provided; skipping diarization") + return transcription_result + + device = get_optimal_device() if use_gpu else torch.device("cpu") + pipeline = _get_pipeline(hf_token, device) + if pipeline is None: + return transcription_result + + audio_path = Path(audio_path) + logger.info(f"Running diarization on {audio_path}") + + try: + diarization = pipeline(str(audio_path), num_speakers=num_speakers) + except Exception as e: + logger.error(f"Diarization failed: {e}") + return transcription_result + + speaker_map = [] + for turn, _, speaker in diarization.itertracks(yield_label=True): + speaker_map.append((turn.start, turn.end, speaker)) + + def _find_speaker(start: float, end: float) -> str: + best_overlap = 0 + best_speaker = "UNKNOWN" + for s_start, s_end, speaker in speaker_map: + overlap_start = max(start, s_start) + overlap_end = min(end, s_end) + overlap = max(0, overlap_end - overlap_start) + if overlap > best_overlap: + best_overlap = overlap + best_speaker = speaker + return best_speaker + + for word in transcription_result.get("words", []): + word["speaker"] = _find_speaker(word["start"], word["end"]) + + for segment in transcription_result.get("segments", []): + segment["speaker"] = _find_speaker(segment["start"], segment["end"]) + for w in segment.get("words", []): + w["speaker"] = _find_speaker(w["start"], w["end"]) + + return transcription_result diff --git a/backend/services/transcription.py b/backend/services/transcription.py new file mode 100644 index 0000000..38b9f34 --- /dev/null +++ b/backend/services/transcription.py @@ -0,0 +1,205 @@ +""" +WhisperX-based transcription service with word-level alignment. +Falls back to standard Whisper if WhisperX is not available. +""" + +import logging +from pathlib import Path +from typing import Optional + +import torch + +from utils.gpu_utils import get_optimal_device, configure_gpu +from utils.audio_processing import extract_audio +from utils.cache import load_from_cache, save_to_cache + +logger = logging.getLogger(__name__) + +_model_cache: dict = {} + +try: + import whisperx + WHISPERX_AVAILABLE = True +except ImportError: + WHISPERX_AVAILABLE = False + import whisper + +try: + HF_TOKEN = None + import os + HF_TOKEN = os.environ.get("HF_TOKEN") +except Exception: + pass + + +def _get_device(use_gpu: bool = True) -> torch.device: + if use_gpu: + return get_optimal_device() + return torch.device("cpu") + + +def _load_model(model_name: str, device: torch.device): + cache_key = f"{model_name}_{device}" + if cache_key in _model_cache: + return _model_cache[cache_key] + + logger.info(f"Loading model: {model_name} on {device}") + if WHISPERX_AVAILABLE: + compute_type = "float16" if device.type == "cuda" else "int8" + model = whisperx.load_model( + model_name, + device=str(device), + compute_type=compute_type, + ) + else: + model = whisper.load_model(model_name, device=device) + + _model_cache[cache_key] = model + return model + + +def transcribe_audio( + file_path: str, + model_name: str = "base", + use_gpu: bool = True, + use_cache: bool = True, + language: Optional[str] = None, +) -> dict: + """ + Transcribe audio/video file and return word-level timestamps. + + Returns: + dict with keys: words, segments, language + """ + file_path = Path(file_path) + + if use_cache: + cached = load_from_cache(file_path, model_name, "transcribe_wx") + if cached: + logger.info("Using cached transcription") + return cached + + video_extensions = {".mp4", ".avi", ".mov", ".mkv", ".webm"} + if file_path.suffix.lower() in video_extensions: + audio_path = extract_audio(file_path) + else: + audio_path = file_path + + device = _get_device(use_gpu) + model = _load_model(model_name, device) + + logger.info(f"Transcribing: {file_path}") + + if WHISPERX_AVAILABLE: + result = _transcribe_whisperx(model, str(audio_path), device, language) + else: + result = _transcribe_standard(model, str(audio_path), language) + + if use_cache: + save_to_cache(file_path, result, model_name, "transcribe_wx") + + return result + + +def _transcribe_whisperx(model, audio_path: str, device: torch.device, language: Optional[str]) -> dict: + audio = whisperx.load_audio(audio_path) + transcribe_opts = {} + if language: + transcribe_opts["language"] = language + + result = model.transcribe(audio, batch_size=16, **transcribe_opts) + detected_language = result.get("language", "en") + + align_model, align_metadata = whisperx.load_align_model( + language_code=detected_language, + device=str(device), + ) + aligned = whisperx.align( + result["segments"], + align_model, + align_metadata, + audio, + str(device), + return_char_alignments=False, + ) + + words = [] + for seg in aligned.get("segments", []): + for w in seg.get("words", []): + words.append({ + "word": w.get("word", ""), + "start": round(w.get("start", 0), 3), + "end": round(w.get("end", 0), 3), + "confidence": round(w.get("score", 0), 3), + }) + + segments = [] + for i, seg in enumerate(aligned.get("segments", [])): + seg_words = [] + for w in seg.get("words", []): + seg_words.append({ + "word": w.get("word", ""), + "start": round(w.get("start", 0), 3), + "end": round(w.get("end", 0), 3), + "confidence": round(w.get("score", 0), 3), + }) + segments.append({ + "id": i, + "start": round(seg.get("start", 0), 3), + "end": round(seg.get("end", 0), 3), + "text": seg.get("text", "").strip(), + "words": seg_words, + }) + + return { + "words": words, + "segments": segments, + "language": detected_language, + } + + +def _transcribe_standard(model, audio_path: str, language: Optional[str]) -> dict: + """Fallback: standard Whisper (segment-level only, synthesized word timestamps).""" + opts = {} + if language: + opts["language"] = language + + result = model.transcribe(audio_path, **opts) + detected_language = result.get("language", "en") + + words = [] + segments = [] + + for i, seg in enumerate(result.get("segments", [])): + text = seg.get("text", "").strip() + seg_start = seg.get("start", 0) + seg_end = seg.get("end", 0) + seg_words_text = text.split() + duration = seg_end - seg_start + + seg_words = [] + for j, w_text in enumerate(seg_words_text): + w_start = seg_start + (j / max(len(seg_words_text), 1)) * duration + w_end = seg_start + ((j + 1) / max(len(seg_words_text), 1)) * duration + word_obj = { + "word": w_text, + "start": round(w_start, 3), + "end": round(w_end, 3), + "confidence": 0.5, + } + words.append(word_obj) + seg_words.append(word_obj) + + segments.append({ + "id": i, + "start": round(seg_start, 3), + "end": round(seg_end, 3), + "text": text, + "words": seg_words, + }) + + return { + "words": words, + "segments": segments, + "language": detected_language, + } diff --git a/backend/services/video_editor.py b/backend/services/video_editor.py new file mode 100644 index 0000000..c28d04f --- /dev/null +++ b/backend/services/video_editor.py @@ -0,0 +1,271 @@ +""" +FFmpeg-based video cutting engine. +Uses stream copy for fast, lossless cuts and falls back to re-encode when needed. +""" + +import logging +import subprocess +import tempfile +import os +from pathlib import Path +from typing import List + +logger = logging.getLogger(__name__) + + +def _find_ffmpeg() -> str: + """Locate ffmpeg binary.""" + for cmd in ["ffmpeg", "ffmpeg.exe"]: + try: + subprocess.run([cmd, "-version"], capture_output=True, check=True) + return cmd + except (FileNotFoundError, subprocess.CalledProcessError): + continue + raise RuntimeError("FFmpeg not found. Install it or add it to PATH.") + + +def export_stream_copy( + input_path: str, + output_path: str, + keep_segments: List[dict], +) -> str: + """ + Export video using FFmpeg concat demuxer with stream copy. + ~100x faster than re-encoding. No quality loss. + + Args: + input_path: source video file + output_path: destination file + keep_segments: list of {"start": float, "end": float} to keep + + Returns: + output_path on success + """ + ffmpeg = _find_ffmpeg() + input_path = str(Path(input_path).resolve()) + output_path = str(Path(output_path).resolve()) + + if not keep_segments: + raise ValueError("No segments to export") + + temp_dir = tempfile.mkdtemp(prefix="aive_export_") + + try: + segment_files = [] + for i, seg in enumerate(keep_segments): + seg_file = os.path.join(temp_dir, f"seg_{i:04d}.ts") + cmd = [ + ffmpeg, "-y", + "-ss", str(seg["start"]), + "-to", str(seg["end"]), + "-i", input_path, + "-c", "copy", + "-avoid_negative_ts", "make_zero", + "-f", "mpegts", + seg_file, + ] + logger.info(f"Extracting segment {i}: {seg['start']:.2f}s - {seg['end']:.2f}s") + result = subprocess.run(cmd, capture_output=True, text=True) + if result.returncode != 0: + logger.warning(f"Stream copy segment {i} failed, will try re-encode: {result.stderr[-200:]}") + return export_reencode(input_path, output_path, keep_segments) + segment_files.append(seg_file) + + concat_str = "|".join(segment_files) + cmd = [ + ffmpeg, "-y", + "-i", f"concat:{concat_str}", + "-c", "copy", + "-movflags", "+faststart", + output_path, + ] + logger.info(f"Concatenating {len(segment_files)} segments -> {output_path}") + result = subprocess.run(cmd, capture_output=True, text=True) + if result.returncode != 0: + logger.warning(f"Concat failed, falling back to re-encode: {result.stderr[-200:]}") + return export_reencode(input_path, output_path, keep_segments) + + return output_path + + finally: + for f in os.listdir(temp_dir): + try: + os.remove(os.path.join(temp_dir, f)) + except OSError: + pass + try: + os.rmdir(temp_dir) + except OSError: + pass + + +def export_reencode( + input_path: str, + output_path: str, + keep_segments: List[dict], + resolution: str = "1080p", + format_hint: str = "mp4", +) -> str: + """ + Export video with full re-encode. Slower but supports resolution changes, + format conversion, and avoids stream-copy edge cases. + """ + ffmpeg = _find_ffmpeg() + input_path = str(Path(input_path).resolve()) + output_path = str(Path(output_path).resolve()) + + if not keep_segments: + raise ValueError("No segments to export") + + scale_map = { + "720p": "scale=-2:720", + "1080p": "scale=-2:1080", + "4k": "scale=-2:2160", + } + + filter_parts = [] + for i, seg in enumerate(keep_segments): + filter_parts.append( + f"[0:v]trim=start={seg['start']}:end={seg['end']},setpts=PTS-STARTPTS[v{i}];" + f"[0:a]atrim=start={seg['start']}:end={seg['end']},asetpts=PTS-STARTPTS[a{i}];" + ) + + n = len(keep_segments) + concat_inputs = "".join(f"[v{i}][a{i}]" for i in range(n)) + filter_parts.append(f"{concat_inputs}concat=n={n}:v=1:a=1[outv][outa]") + + filter_complex = "".join(filter_parts) + + scale = scale_map.get(resolution, "") + if scale: + filter_complex += f";[outv]{scale}[outv_scaled]" + video_map = "[outv_scaled]" + else: + video_map = "[outv]" + + codec_args = ["-c:v", "libx264", "-preset", "medium", "-crf", "18", "-c:a", "aac", "-b:a", "192k"] + if format_hint == "webm": + codec_args = ["-c:v", "libvpx-vp9", "-crf", "30", "-b:v", "0", "-c:a", "libopus"] + + cmd = [ + ffmpeg, "-y", + "-i", input_path, + "-filter_complex", filter_complex, + "-map", video_map, + "-map", "[outa]", + *codec_args, + "-movflags", "+faststart", + output_path, + ] + + logger.info(f"Re-encoding {n} segments -> {output_path} ({resolution})") + result = subprocess.run(cmd, capture_output=True, text=True) + if result.returncode != 0: + raise RuntimeError(f"FFmpeg re-encode failed: {result.stderr[-500:]}") + + return output_path + + +def export_reencode_with_subs( + input_path: str, + output_path: str, + keep_segments: List[dict], + subtitle_path: str, + resolution: str = "1080p", + format_hint: str = "mp4", +) -> str: + """ + Export video with re-encode and burn-in subtitles (ASS format). + Applies trim+concat first, then overlays the subtitle file. + """ + ffmpeg = _find_ffmpeg() + input_path = str(Path(input_path).resolve()) + output_path = str(Path(output_path).resolve()) + subtitle_path = str(Path(subtitle_path).resolve()) + + if not keep_segments: + raise ValueError("No segments to export") + + scale_map = { + "720p": "scale=-2:720", + "1080p": "scale=-2:1080", + "4k": "scale=-2:2160", + } + + filter_parts = [] + for i, seg in enumerate(keep_segments): + filter_parts.append( + f"[0:v]trim=start={seg['start']}:end={seg['end']},setpts=PTS-STARTPTS[v{i}];" + f"[0:a]atrim=start={seg['start']}:end={seg['end']},asetpts=PTS-STARTPTS[a{i}];" + ) + + n = len(keep_segments) + concat_inputs = "".join(f"[v{i}][a{i}]" for i in range(n)) + filter_parts.append(f"{concat_inputs}concat=n={n}:v=1:a=1[outv][outa]") + + filter_complex = "".join(filter_parts) + + # Escape path for FFmpeg subtitle filter (Windows backslashes need escaping) + escaped_sub = subtitle_path.replace("\\", "/").replace(":", "\\:") + + scale = scale_map.get(resolution, "") + if scale: + filter_complex += f";[outv]{scale},ass='{escaped_sub}'[outv_final]" + else: + filter_complex += f";[outv]ass='{escaped_sub}'[outv_final]" + video_map = "[outv_final]" + + codec_args = ["-c:v", "libx264", "-preset", "medium", "-crf", "18", "-c:a", "aac", "-b:a", "192k"] + if format_hint == "webm": + codec_args = ["-c:v", "libvpx-vp9", "-crf", "30", "-b:v", "0", "-c:a", "libopus"] + + cmd = [ + ffmpeg, "-y", + "-i", input_path, + "-filter_complex", filter_complex, + "-map", video_map, + "-map", "[outa]", + *codec_args, + "-movflags", "+faststart", + output_path, + ] + + logger.info(f"Re-encoding {n} segments with subtitles -> {output_path} ({resolution})") + result = subprocess.run(cmd, capture_output=True, text=True) + if result.returncode != 0: + raise RuntimeError(f"FFmpeg re-encode with subs failed: {result.stderr[-500:]}") + + return output_path + + +def get_video_info(input_path: str) -> dict: + """Get basic video metadata using ffprobe.""" + ffmpeg = _find_ffmpeg() + ffprobe = ffmpeg.replace("ffmpeg", "ffprobe") + + cmd = [ + ffprobe, "-v", "quiet", + "-print_format", "json", + "-show_format", "-show_streams", + str(input_path), + ] + + try: + result = subprocess.run(cmd, capture_output=True, text=True, check=True) + import json + data = json.loads(result.stdout) + fmt = data.get("format", {}) + video_stream = next((s for s in data.get("streams", []) if s.get("codec_type") == "video"), {}) + + return { + "duration": float(fmt.get("duration", 0)), + "size": int(fmt.get("size", 0)), + "format": fmt.get("format_name", ""), + "width": int(video_stream.get("width", 0)), + "height": int(video_stream.get("height", 0)), + "codec": video_stream.get("codec_name", ""), + "fps": eval(video_stream.get("r_frame_rate", "0/1")) if "/" in video_stream.get("r_frame_rate", "") else 0, + } + except Exception as e: + logger.error(f"Failed to get video info: {e}") + return {} diff --git a/backend/utils/__init__.py b/backend/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/utils/audio_processing.py b/backend/utils/audio_processing.py similarity index 100% rename from utils/audio_processing.py rename to backend/utils/audio_processing.py diff --git a/utils/cache.py b/backend/utils/cache.py similarity index 100% rename from utils/cache.py rename to backend/utils/cache.py diff --git a/utils/gpu_utils.py b/backend/utils/gpu_utils.py similarity index 100% rename from utils/gpu_utils.py rename to backend/utils/gpu_utils.py diff --git a/docker-compose.prebuilt.yml b/docker-compose.prebuilt.yml deleted file mode 100644 index a549445..0000000 --- a/docker-compose.prebuilt.yml +++ /dev/null @@ -1,70 +0,0 @@ -version: '3.8' - -services: - videotranscriber: - # Use prebuilt image from GitHub Container Registry - image: ghcr.io/dataants-ai/videotranscriber:latest - container_name: videotranscriber - ports: - - "8501:8501" - volumes: - # Mount your video files directory (change the left path to your actual videos folder) - - "${VIDEO_PATH:-./videos}:/app/data/videos" - # Mount output directory for transcripts and summaries - - "${OUTPUT_PATH:-./outputs}:/app/data/outputs" - # Mount cache directory for model caching (optional, improves performance) - - "${CACHE_PATH:-./cache}:/app/data/cache" - # Mount a config directory if needed - - "${CONFIG_PATH:-./config}:/app/config" - environment: - # Ollama configuration for host access - - OLLAMA_API_URL=${OLLAMA_API_URL:-http://host.docker.internal:11434/api} - # Optional: HuggingFace token for advanced features - - HF_TOKEN=${HF_TOKEN:-} - # GPU configuration - - CUDA_VISIBLE_DEVICES=${CUDA_VISIBLE_DEVICES:-} - # Cache settings - - TRANSFORMERS_CACHE=/app/data/cache/transformers - - WHISPER_CACHE=/app/data/cache/whisper - restart: unless-stopped - # Use bridge networking for Windows/Mac with host.docker.internal - networks: - - videotranscriber-network - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:8501/_stcore/health"] - interval: 30s - timeout: 10s - retries: 3 - start_period: 60s - - # Alternative GPU-enabled service (uncomment to use) - # videotranscriber-gpu: - # image: ghcr.io/dataants-ai/videotranscriber:latest-gpu - # container_name: videotranscriber-gpu - # ports: - # - "8501:8501" - # volumes: - # - "${VIDEO_PATH:-./videos}:/app/data/videos" - # - "${OUTPUT_PATH:-./outputs}:/app/data/outputs" - # - "${CACHE_PATH:-./cache}:/app/data/cache" - # - "${CONFIG_PATH:-./config}:/app/config" - # environment: - # - OLLAMA_API_URL=${OLLAMA_API_URL:-http://host.docker.internal:11434/api} - # - HF_TOKEN=${HF_TOKEN:-} - # - CUDA_VISIBLE_DEVICES=${CUDA_VISIBLE_DEVICES:-0} - # - TRANSFORMERS_CACHE=/app/data/cache/transformers - # - WHISPER_CACHE=/app/data/cache/whisper - # deploy: - # resources: - # reservations: - # devices: - # - driver: nvidia - # count: 1 - # capabilities: [gpu] - # restart: unless-stopped - # networks: - # - videotranscriber-network - -networks: - videotranscriber-network: - driver: bridge \ No newline at end of file diff --git a/docker-compose.yml b/docker-compose.yml deleted file mode 100644 index b2db01d..0000000 --- a/docker-compose.yml +++ /dev/null @@ -1,51 +0,0 @@ -version: '3.8' - -services: - videotranscriber: - build: . - container_name: videotranscriber - ports: - - "8501:8501" - volumes: - # Mount your video files directory (change the left path to your actual videos folder) - - "${VIDEO_PATH:-./videos}:/app/data/videos" - # Mount output directory for transcripts and summaries - - "${OUTPUT_PATH:-./outputs}:/app/data/outputs" - # Mount cache directory for model caching (optional, improves performance) - - "${CACHE_PATH:-./cache}:/app/data/cache" - # Mount a config directory if needed - - "${CONFIG_PATH:-./config}:/app/config" - environment: - # Ollama configuration for host access - - OLLAMA_API_URL=${OLLAMA_API_URL:-http://host.docker.internal:11434/api} - # Optional: HuggingFace token for advanced features - - HF_TOKEN=${HF_TOKEN:-} - # GPU configuration - - CUDA_VISIBLE_DEVICES=${CUDA_VISIBLE_DEVICES:-} - # Cache settings - - TRANSFORMERS_CACHE=/app/data/cache/transformers - - WHISPER_CACHE=/app/data/cache/whisper - # For GPU access (uncomment if you have NVIDIA GPU and nvidia-docker) - # deploy: - # resources: - # reservations: - # devices: - # - driver: nvidia - # count: 1 - # capabilities: [gpu] - restart: unless-stopped - # For Linux hosts, you might prefer host networking for better Ollama access - # network_mode: host # Uncomment for Linux hosts - # Use bridge networking for Windows/Mac with host.docker.internal - networks: - - videotranscriber-network - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:8501/_stcore/health"] - interval: 30s - timeout: 10s - retries: 3 - start_period: 60s - -networks: - videotranscriber-network: - driver: bridge \ No newline at end of file diff --git a/docker.env.example b/docker.env.example deleted file mode 100644 index 04a7049..0000000 --- a/docker.env.example +++ /dev/null @@ -1,63 +0,0 @@ -# VideoTranscriber Docker Configuration -# Copy this file to .env and modify the values as needed - -# ============================================================================= -# DOCKER VOLUME PATHS (Host Directories) -# ============================================================================= - -# Path to your video files directory on the host -# This directory will be mounted into the container at /app/data/videos -VIDEO_PATH=./videos - -# Path where outputs (transcripts, summaries) will be saved on the host -# This directory will be mounted into the container at /app/data/outputs -OUTPUT_PATH=./outputs - -# Path for caching ML models and processed files (improves performance) -# This directory will be mounted into the container at /app/data/cache -CACHE_PATH=./cache - -# Optional: Configuration directory for custom settings -CONFIG_PATH=./config - -# ============================================================================= -# OLLAMA CONFIGURATION -# ============================================================================= - -# Ollama API URL - how the container accesses your host Ollama service -# For Windows/Mac with Docker Desktop: use host.docker.internal -# For Linux: use host networking or the actual host IP -OLLAMA_API_URL=http://host.docker.internal:11434/api - -# ============================================================================= -# ML MODEL CONFIGURATION -# ============================================================================= - -# HuggingFace token for advanced features (speaker diarization, etc.) -# Get your token at: https://huggingface.co/settings/tokens -# Leave empty if not using advanced features -HF_TOKEN= - -# GPU Configuration -# Specify which GPU devices to use (leave empty for all available) -# Examples: "0" for first GPU, "0,1" for first two GPUs -CUDA_VISIBLE_DEVICES= - -# ============================================================================= -# DOCKER-SPECIFIC SETTINGS -# ============================================================================= - -# Container name (change if you want to run multiple instances) -CONTAINER_NAME=videotranscriber - -# Port mapping (host:container) -HOST_PORT=8501 - -# ============================================================================= -# EXAMPLE USAGE -# ============================================================================= -# 1. Copy this file: cp docker.env.example .env -# 2. Edit the paths to match your system -# 3. Make sure Ollama is running on your host: ollama serve -# 4. Start the container: docker-compose up -d -# 5. Access the app at: http://localhost:8501 \ No newline at end of file diff --git a/electron/main.js b/electron/main.js new file mode 100644 index 0000000..ac11da6 --- /dev/null +++ b/electron/main.js @@ -0,0 +1,131 @@ +const { app, BrowserWindow, ipcMain, dialog, safeStorage } = require('electron'); +const path = require('path'); +const { PythonBackend } = require('./python-bridge'); + +let mainWindow = null; +let pythonBackend = null; + +const isDev = !app.isPackaged; +const BACKEND_PORT = 8642; + +function createWindow() { + mainWindow = new BrowserWindow({ + width: 1400, + height: 900, + minWidth: 1024, + minHeight: 700, + title: 'CutScript', + webPreferences: { + preload: path.join(__dirname, 'preload.js'), + contextIsolation: true, + nodeIntegration: false, + webSecurity: isDev ? false : true, + }, + show: false, + }); + + if (isDev) { + mainWindow.loadURL('http://localhost:5173'); + mainWindow.webContents.openDevTools(); + } else { + mainWindow.loadFile(path.join(__dirname, '..', 'frontend', 'dist', 'index.html')); + } + + mainWindow.once('ready-to-show', () => { + mainWindow.show(); + }); + + mainWindow.on('closed', () => { + mainWindow = null; + }); +} + +app.whenReady().then(async () => { + pythonBackend = new PythonBackend(BACKEND_PORT, isDev); + await pythonBackend.start(); + + createWindow(); + + app.on('activate', () => { + if (BrowserWindow.getAllWindows().length === 0) { + createWindow(); + } + }); +}); + +app.on('window-all-closed', () => { + if (process.platform !== 'darwin') { + app.quit(); + } +}); + +app.on('before-quit', () => { + if (pythonBackend) { + pythonBackend.stop(); + } +}); + +// IPC Handlers + +ipcMain.handle('dialog:openFile', async (_event, options) => { + const result = await dialog.showOpenDialog(mainWindow, { + properties: ['openFile'], + filters: [ + { name: 'Video Files', extensions: ['mp4', 'avi', 'mov', 'mkv', 'webm'] }, + { name: 'Audio Files', extensions: ['m4a', 'wav', 'mp3', 'flac'] }, + { name: 'All Files', extensions: ['*'] }, + ], + ...options, + }); + return result.canceled ? null : result.filePaths[0]; +}); + +ipcMain.handle('dialog:saveFile', async (_event, options) => { + const result = await dialog.showSaveDialog(mainWindow, { + filters: [ + { name: 'Video Files', extensions: ['mp4', 'mov', 'webm'] }, + { name: 'Project Files', extensions: ['aive'] }, + ], + ...options, + }); + return result.canceled ? null : result.filePath; +}); + +ipcMain.handle('dialog:openProject', async () => { + const result = await dialog.showOpenDialog(mainWindow, { + properties: ['openFile'], + filters: [ + { name: 'AI Video Editor Project', extensions: ['aive'] }, + ], + }); + return result.canceled ? null : result.filePaths[0]; +}); + +ipcMain.handle('safe-storage:encrypt', (_event, data) => { + if (safeStorage.isEncryptionAvailable()) { + return safeStorage.encryptString(data).toString('base64'); + } + return data; +}); + +ipcMain.handle('safe-storage:decrypt', (_event, encrypted) => { + if (safeStorage.isEncryptionAvailable()) { + return safeStorage.decryptString(Buffer.from(encrypted, 'base64')); + } + return encrypted; +}); + +ipcMain.handle('get-backend-url', () => { + return `http://localhost:${BACKEND_PORT}`; +}); + +ipcMain.handle('fs:readFile', async (_event, filePath) => { + const fs = require('fs'); + return fs.readFileSync(filePath, 'utf-8'); +}); + +ipcMain.handle('fs:writeFile', async (_event, filePath, content) => { + const fs = require('fs'); + fs.writeFileSync(filePath, content, 'utf-8'); + return true; +}); diff --git a/electron/preload.js b/electron/preload.js new file mode 100644 index 0000000..32d5ac0 --- /dev/null +++ b/electron/preload.js @@ -0,0 +1,12 @@ +const { contextBridge, ipcRenderer } = require('electron'); + +contextBridge.exposeInMainWorld('electronAPI', { + openFile: (options) => ipcRenderer.invoke('dialog:openFile', options), + saveFile: (options) => ipcRenderer.invoke('dialog:saveFile', options), + openProject: () => ipcRenderer.invoke('dialog:openProject'), + getBackendUrl: () => ipcRenderer.invoke('get-backend-url'), + encryptString: (data) => ipcRenderer.invoke('safe-storage:encrypt', data), + decryptString: (encrypted) => ipcRenderer.invoke('safe-storage:decrypt', encrypted), + readFile: (path) => ipcRenderer.invoke('fs:readFile', path), + writeFile: (path, content) => ipcRenderer.invoke('fs:writeFile', path, content), +}); diff --git a/electron/python-bridge.js b/electron/python-bridge.js new file mode 100644 index 0000000..4650f28 --- /dev/null +++ b/electron/python-bridge.js @@ -0,0 +1,105 @@ +const { spawn } = require('child_process'); +const path = require('path'); +const http = require('http'); + +class PythonBackend { + constructor(port, isDev) { + this.port = port; + this.isDev = isDev; + this.process = null; + } + + async start() { + // In dev mode, check if a backend is already running (e.g. from `npm run dev:backend`) + // If so, reuse it instead of spawning a duplicate. + if (this.isDev) { + const alreadyRunning = await this._isPortOpen(2000); + if (alreadyRunning) { + console.log(`[backend] Dev backend already running on port ${this.port} — reusing it.`); + return; + } + } + + const backendDir = this.isDev + ? path.join(__dirname, '..', 'backend') + : path.join(process.resourcesPath, 'backend'); + + const pythonCmd = process.platform === 'win32' ? 'python' : 'python3'; + + this.process = spawn(pythonCmd, [ + '-m', 'uvicorn', 'main:app', + '--host', '127.0.0.1', + '--port', String(this.port), + ], { + cwd: backendDir, + stdio: ['pipe', 'pipe', 'pipe'], + env: { ...process.env, PYTHONUNBUFFERED: '1' }, + }); + + this.process.stdout.on('data', (data) => { + console.log(`[backend] ${data.toString().trim()}`); + }); + + this.process.stderr.on('data', (data) => { + console.error(`[backend] ${data.toString().trim()}`); + }); + + this.process.on('error', (err) => { + console.error('[backend] Failed to start Python backend:', err.message); + }); + + this.process.on('exit', (code) => { + console.log(`[backend] Process exited with code ${code}`); + this.process = null; + }); + + await this._waitForReady(30000); + console.log(`[backend] Ready on port ${this.port}`); + } + + _isPortOpen(timeoutMs) { + return new Promise((resolve) => { + const req = http.get(`http://127.0.0.1:${this.port}/health`, (res) => { + resolve(res.statusCode === 200); + }); + req.on('error', () => resolve(false)); + req.setTimeout(timeoutMs, () => { req.destroy(); resolve(false); }); + req.end(); + }); + } + + stop() { + if (this.process) { + if (process.platform === 'win32') { + spawn('taskkill', ['/pid', String(this.process.pid), '/f', '/t']); + } else { + this.process.kill('SIGTERM'); + } + this.process = null; + } + } + + _waitForReady(timeoutMs) { + const startTime = Date.now(); + return new Promise((resolve, reject) => { + const check = () => { + if (Date.now() - startTime > timeoutMs) { + reject(new Error('Backend startup timed out')); + return; + } + const req = http.get(`http://127.0.0.1:${this.port}/health`, (res) => { + if (res.statusCode === 200) { + resolve(); + } else { + setTimeout(check, 500); + } + }); + req.on('error', () => setTimeout(check, 500)); + req.end(); + }; + setTimeout(check, 1000); + }); + } +} + +module.exports = { PythonBackend }; diff --git a/frontend/index.html b/frontend/index.html new file mode 100644 index 0000000..8f06c07 --- /dev/null +++ b/frontend/index.html @@ -0,0 +1,16 @@ + + + + + + + + + + CutScript + + +
+ + + diff --git a/frontend/package-lock.json b/frontend/package-lock.json new file mode 100644 index 0000000..42eb0ed --- /dev/null +++ b/frontend/package-lock.json @@ -0,0 +1,2817 @@ +{ + "name": "ai-video-editor-frontend", + "version": "0.1.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "ai-video-editor-frontend", + "version": "0.1.0", + "dependencies": { + "lucide-react": "^0.468.0", + "react": "^19.0.0", + "react-dom": "^19.0.0", + "react-virtuoso": "^4.18.3", + "wavesurfer.js": "^7.8.0", + "zundo": "^2.3.0", + "zustand": "^5.0.0" + }, + "devDependencies": { + "@types/react": "^19.0.0", + "@types/react-dom": "^19.0.0", + "@vitejs/plugin-react": "^4.3.0", + "autoprefixer": "^10.4.20", + "postcss": "^8.4.49", + "tailwindcss": "^3.4.0", + "typescript": "^5.7.0", + "vite": "^6.0.0" + } + }, + "node_modules/@alloc/quick-lru": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/@alloc/quick-lru/-/quick-lru-5.2.0.tgz", + "integrity": "sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.29.0.tgz", + "integrity": "sha512-9NhCeYjq9+3uxgdtp20LSiJXJvN0FeCtNGpJxuMFZ1Kv3cWUNb6DOhJwUvcVCzKGR66cw4njwM6hrJLqgOwbcw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-validator-identifier": "^7.28.5", + "js-tokens": "^4.0.0", + "picocolors": "^1.1.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/compat-data": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.29.0.tgz", + "integrity": "sha512-T1NCJqT/j9+cn8fvkt7jtwbLBfLC/1y1c7NtCeXFRgzGTsafi68MRv8yzkYSapBnFA6L3U2VSc02ciDzoAJhJg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.29.0.tgz", + "integrity": "sha512-CGOfOJqWjg2qW/Mb6zNsDm+u5vFQ8DxXfbM09z69p5Z6+mE1ikP2jUXw+j42Pf1XTYED2Rni5f95npYeuwMDQA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.29.0", + "@babel/generator": "^7.29.0", + "@babel/helper-compilation-targets": "^7.28.6", + "@babel/helper-module-transforms": "^7.28.6", + "@babel/helpers": "^7.28.6", + "@babel/parser": "^7.29.0", + "@babel/template": "^7.28.6", + "@babel/traverse": "^7.29.0", + "@babel/types": "^7.29.0", + "@jridgewell/remapping": "^2.3.5", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@babel/generator": { + "version": "7.29.1", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.29.1.tgz", + "integrity": "sha512-qsaF+9Qcm2Qv8SRIMMscAvG4O3lJ0F1GuMo5HR/Bp02LopNgnZBC/EkbevHFeGs4ls/oPz9v+Bsmzbkbe+0dUw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.29.0", + "@babel/types": "^7.29.0", + "@jridgewell/gen-mapping": "^0.3.12", + "@jridgewell/trace-mapping": "^0.3.28", + "jsesc": "^3.0.2" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.28.6.tgz", + "integrity": "sha512-JYtls3hqi15fcx5GaSNL7SCTJ2MNmjrkHXg4FSpOA/grxK8KwyZ5bubHsCq8FXCkua6xhuaaBit+3b7+VZRfcA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/compat-data": "^7.28.6", + "@babel/helper-validator-option": "^7.27.1", + "browserslist": "^4.24.0", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-globals": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@babel/helper-globals/-/helper-globals-7.28.0.tgz", + "integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-imports": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.28.6.tgz", + "integrity": "sha512-l5XkZK7r7wa9LucGw9LwZyyCUscb4x37JWTPz7swwFE/0FMQAGpiWUZn8u9DzkSBWEcK25jmvubfpw2dnAMdbw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/traverse": "^7.28.6", + "@babel/types": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-transforms": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.28.6.tgz", + "integrity": "sha512-67oXFAYr2cDLDVGLXTEABjdBJZ6drElUSI7WKp70NrpyISso3plG9SAGEF6y7zbha/wOzUByWWTJvEDVNIUGcA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-module-imports": "^7.28.6", + "@babel/helper-validator-identifier": "^7.28.5", + "@babel/traverse": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-plugin-utils": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.28.6.tgz", + "integrity": "sha512-S9gzZ/bz83GRysI7gAD4wPT/AI3uCnY+9xn+Mx/KPs2JwHJIz1W8PZkg2cqyt3RNOBM8ejcXhV6y8Og7ly/Dug==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-string-parser": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", + "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz", + "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-option": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.27.1.tgz", + "integrity": "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helpers": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.28.6.tgz", + "integrity": "sha512-xOBvwq86HHdB7WUDTfKfT/Vuxh7gElQ+Sfti2Cy6yIWNW05P8iUslOVcZ4/sKbE+/jQaukQAdz/gf3724kYdqw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/template": "^7.28.6", + "@babel/types": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/parser": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.29.0.tgz", + "integrity": "sha512-IyDgFV5GeDUVX4YdF/3CPULtVGSXXMLh1xVIgdCgxApktqnQV0r7/8Nqthg+8YLGaAtdyIlo2qIdZrbCv4+7ww==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.29.0" + }, + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-self": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.27.1.tgz", + "integrity": "sha512-6UzkCs+ejGdZ5mFFC/OCUrv028ab2fp1znZmCZjAOBKiBK2jXD1O+BPSfX8X2qjJ75fZBMSnQn3Rq2mrBJK2mw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-source": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.27.1.tgz", + "integrity": "sha512-zbwoTsBruTeKB9hSq73ha66iFeJHuaFkUbwvqElnygoNbj/jHRsSeokowZFN3CZ64IvEqcmmkVe89OPXc7ldAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/template": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.28.6.tgz", + "integrity": "sha512-YA6Ma2KsCdGb+WC6UpBVFJGXL58MDA6oyONbjyF/+5sBgxY/dwkhLogbMT2GXXyU84/IhRw/2D1Os1B/giz+BQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.28.6", + "@babel/parser": "^7.28.6", + "@babel/types": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.29.0.tgz", + "integrity": "sha512-4HPiQr0X7+waHfyXPZpWPfWL/J7dcN1mx9gL6WdQVMbPnF3+ZhSMs8tCxN7oHddJE9fhNE7+lxdnlyemKfJRuA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.29.0", + "@babel/generator": "^7.29.0", + "@babel/helper-globals": "^7.28.0", + "@babel/parser": "^7.29.0", + "@babel/template": "^7.28.6", + "@babel/types": "^7.29.0", + "debug": "^4.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/types": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.29.0.tgz", + "integrity": "sha512-LwdZHpScM4Qz8Xw2iKSzS+cfglZzJGvofQICy7W7v4caru4EaAmyUuO6BGrbyQ2mYV11W0U8j5mBhd14dd3B0A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-string-parser": "^7.27.1", + "@babel/helper-validator-identifier": "^7.28.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.25.12.tgz", + "integrity": "sha512-Hhmwd6CInZ3dwpuGTF8fJG6yoWmsToE+vYgD4nytZVxcu1ulHpUQRAB1UJ8+N1Am3Mz4+xOByoQoSZf4D+CpkA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.25.12.tgz", + "integrity": "sha512-VJ+sKvNA/GE7Ccacc9Cha7bpS8nyzVv0jdVgwNDaR4gDMC/2TTRc33Ip8qrNYUcpkOHUT5OZ0bUcNNVZQ9RLlg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.25.12.tgz", + "integrity": "sha512-6AAmLG7zwD1Z159jCKPvAxZd4y/VTO0VkprYy+3N2FtJ8+BQWFXU+OxARIwA46c5tdD9SsKGZ/1ocqBS/gAKHg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.25.12.tgz", + "integrity": "sha512-5jbb+2hhDHx5phYR2By8GTWEzn6I9UqR11Kwf22iKbNpYrsmRB18aX/9ivc5cabcUiAT/wM+YIZ6SG9QO6a8kg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.25.12.tgz", + "integrity": "sha512-N3zl+lxHCifgIlcMUP5016ESkeQjLj/959RxxNYIthIg+CQHInujFuXeWbWMgnTo4cp5XVHqFPmpyu9J65C1Yg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.25.12.tgz", + "integrity": "sha512-HQ9ka4Kx21qHXwtlTUVbKJOAnmG1ipXhdWTmNXiPzPfWKpXqASVcWdnf2bnL73wgjNrFXAa3yYvBSd9pzfEIpA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.25.12.tgz", + "integrity": "sha512-gA0Bx759+7Jve03K1S0vkOu5Lg/85dou3EseOGUes8flVOGxbhDDh/iZaoek11Y8mtyKPGF3vP8XhnkDEAmzeg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.25.12.tgz", + "integrity": "sha512-TGbO26Yw2xsHzxtbVFGEXBFH0FRAP7gtcPE7P5yP7wGy7cXK2oO7RyOhL5NLiqTlBh47XhmIUXuGciXEqYFfBQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.25.12.tgz", + "integrity": "sha512-lPDGyC1JPDou8kGcywY0YILzWlhhnRjdof3UlcoqYmS9El818LLfJJc3PXXgZHrHCAKs/Z2SeZtDJr5MrkxtOw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.25.12.tgz", + "integrity": "sha512-8bwX7a8FghIgrupcxb4aUmYDLp8pX06rGh5HqDT7bB+8Rdells6mHvrFHHW2JAOPZUbnjUpKTLg6ECyzvas2AQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.25.12.tgz", + "integrity": "sha512-0y9KrdVnbMM2/vG8KfU0byhUN+EFCny9+8g202gYqSSVMonbsCfLjUO+rCci7pM0WBEtz+oK/PIwHkzxkyharA==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.25.12.tgz", + "integrity": "sha512-h///Lr5a9rib/v1GGqXVGzjL4TMvVTv+s1DPoxQdz7l/AYv6LDSxdIwzxkrPW438oUXiDtwM10o9PmwS/6Z0Ng==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.25.12.tgz", + "integrity": "sha512-iyRrM1Pzy9GFMDLsXn1iHUm18nhKnNMWscjmp4+hpafcZjrr2WbT//d20xaGljXDBYHqRcl8HnxbX6uaA/eGVw==", + "cpu": [ + "mips64el" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.25.12.tgz", + "integrity": "sha512-9meM/lRXxMi5PSUqEXRCtVjEZBGwB7P/D4yT8UG/mwIdze2aV4Vo6U5gD3+RsoHXKkHCfSxZKzmDssVlRj1QQA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.25.12.tgz", + "integrity": "sha512-Zr7KR4hgKUpWAwb1f3o5ygT04MzqVrGEGXGLnj15YQDJErYu/BGg+wmFlIDOdJp0PmB0lLvxFIOXZgFRrdjR0w==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.25.12.tgz", + "integrity": "sha512-MsKncOcgTNvdtiISc/jZs/Zf8d0cl/t3gYWX8J9ubBnVOwlk65UIEEvgBORTiljloIWnBzLs4qhzPkJcitIzIg==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.25.12.tgz", + "integrity": "sha512-uqZMTLr/zR/ed4jIGnwSLkaHmPjOjJvnm6TVVitAa08SLS9Z0VM8wIRx7gWbJB5/J54YuIMInDquWyYvQLZkgw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.25.12.tgz", + "integrity": "sha512-xXwcTq4GhRM7J9A8Gv5boanHhRa/Q9KLVmcyXHCTaM4wKfIpWkdXiMog/KsnxzJ0A1+nD+zoecuzqPmCRyBGjg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.25.12.tgz", + "integrity": "sha512-Ld5pTlzPy3YwGec4OuHh1aCVCRvOXdH8DgRjfDy/oumVovmuSzWfnSJg+VtakB9Cm0gxNO9BzWkj6mtO1FMXkQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.25.12.tgz", + "integrity": "sha512-fF96T6KsBo/pkQI950FARU9apGNTSlZGsv1jZBAlcLL1MLjLNIWPBkj5NlSz8aAzYKg+eNqknrUJ24QBybeR5A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.25.12.tgz", + "integrity": "sha512-MZyXUkZHjQxUvzK7rN8DJ3SRmrVrke8ZyRusHlP+kuwqTcfWLyqMOE3sScPPyeIXN/mDJIfGXvcMqCgYKekoQw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openharmony-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.25.12.tgz", + "integrity": "sha512-rm0YWsqUSRrjncSXGA7Zv78Nbnw4XL6/dzr20cyrQf7ZmRcsovpcRBdhD43Nuk3y7XIoW2OxMVvwuRvk9XdASg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.25.12.tgz", + "integrity": "sha512-3wGSCDyuTHQUzt0nV7bocDy72r2lI33QL3gkDNGkod22EsYl04sMf0qLb8luNKTOmgF/eDEDP5BFNwoBKH441w==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.25.12.tgz", + "integrity": "sha512-rMmLrur64A7+DKlnSuwqUdRKyd3UE7oPJZmnljqEptesKM8wx9J8gx5u0+9Pq0fQQW8vqeKebwNXdfOyP+8Bsg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.25.12.tgz", + "integrity": "sha512-HkqnmmBoCbCwxUKKNPBixiWDGCpQGVsrQfJoVGYLPT41XWF8lHuE5N6WhVia2n4o5QK5M4tYr21827fNhi4byQ==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.25.12.tgz", + "integrity": "sha512-alJC0uCZpTFrSL0CCDjcgleBXPnCrEAhTBILpeAp7M/OFgoqtAetfBzX0xM00MUsVVPpVjlPuMbREqnZCXaTnA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.13", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", + "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.0", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/remapping": { + "version": "2.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/remapping/-/remapping-2.3.5.tgz", + "integrity": "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", + "dev": true, + "license": "MIT" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.31", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz", + "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@rolldown/pluginutils": { + "version": "1.0.0-beta.27", + "resolved": "https://registry.npmjs.org/@rolldown/pluginutils/-/pluginutils-1.0.0-beta.27.tgz", + "integrity": "sha512-+d0F4MKMCbeVUJwG96uQ4SgAznZNSq93I3V+9NHA4OpvqG8mRCpGdKmK8l/dl02h2CCDHwW2FqilnTyDcAnqjA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@rollup/rollup-android-arm-eabi": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.59.0.tgz", + "integrity": "sha512-upnNBkA6ZH2VKGcBj9Fyl9IGNPULcjXRlg0LLeaioQWueH30p6IXtJEbKAgvyv+mJaMxSm1l6xwDXYjpEMiLMg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-android-arm64": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.59.0.tgz", + "integrity": "sha512-hZ+Zxj3SySm4A/DylsDKZAeVg0mvi++0PYVceVyX7hemkw7OreKdCvW2oQ3T1FMZvCaQXqOTHb8qmBShoqk69Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.59.0.tgz", + "integrity": "sha512-W2Psnbh1J8ZJw0xKAd8zdNgF9HRLkdWwwdWqubSVk0pUuQkoHnv7rx4GiF9rT4t5DIZGAsConRE3AxCdJ4m8rg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-darwin-x64": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.59.0.tgz", + "integrity": "sha512-ZW2KkwlS4lwTv7ZVsYDiARfFCnSGhzYPdiOU4IM2fDbL+QGlyAbjgSFuqNRbSthybLbIJ915UtZBtmuLrQAT/w==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-freebsd-arm64": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.59.0.tgz", + "integrity": "sha512-EsKaJ5ytAu9jI3lonzn3BgG8iRBjV4LxZexygcQbpiU0wU0ATxhNVEpXKfUa0pS05gTcSDMKpn3Sx+QB9RlTTA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-freebsd-x64": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.59.0.tgz", + "integrity": "sha512-d3DuZi2KzTMjImrxoHIAODUZYoUUMsuUiY4SRRcJy6NJoZ6iIqWnJu9IScV9jXysyGMVuW+KNzZvBLOcpdl3Vg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-linux-arm-gnueabihf": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.59.0.tgz", + "integrity": "sha512-t4ONHboXi/3E0rT6OZl1pKbl2Vgxf9vJfWgmUoCEVQVxhW6Cw/c8I6hbbu7DAvgp82RKiH7TpLwxnJeKv2pbsw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm-musleabihf": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.59.0.tgz", + "integrity": "sha512-CikFT7aYPA2ufMD086cVORBYGHffBo4K8MQ4uPS/ZnY54GKj36i196u8U+aDVT2LX4eSMbyHtyOh7D7Zvk2VvA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.59.0.tgz", + "integrity": "sha512-jYgUGk5aLd1nUb1CtQ8E+t5JhLc9x5WdBKew9ZgAXg7DBk0ZHErLHdXM24rfX+bKrFe+Xp5YuJo54I5HFjGDAA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.59.0.tgz", + "integrity": "sha512-peZRVEdnFWZ5Bh2KeumKG9ty7aCXzzEsHShOZEFiCQlDEepP1dpUl/SrUNXNg13UmZl+gzVDPsiCwnV1uI0RUA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-gnu": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.59.0.tgz", + "integrity": "sha512-gbUSW/97f7+r4gHy3Jlup8zDG190AuodsWnNiXErp9mT90iCy9NKKU0Xwx5k8VlRAIV2uU9CsMnEFg/xXaOfXg==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-musl": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.59.0.tgz", + "integrity": "sha512-yTRONe79E+o0FWFijasoTjtzG9EBedFXJMl888NBEDCDV9I2wGbFFfJQQe63OijbFCUZqxpHz1GzpbtSFikJ4Q==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-gnu": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.59.0.tgz", + "integrity": "sha512-sw1o3tfyk12k3OEpRddF68a1unZ5VCN7zoTNtSn2KndUE+ea3m3ROOKRCZxEpmT9nsGnogpFP9x6mnLTCaoLkA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-musl": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.59.0.tgz", + "integrity": "sha512-+2kLtQ4xT3AiIxkzFVFXfsmlZiG5FXYW7ZyIIvGA7Bdeuh9Z0aN4hVyXS/G1E9bTP/vqszNIN/pUKCk/BTHsKA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.59.0.tgz", + "integrity": "sha512-NDYMpsXYJJaj+I7UdwIuHHNxXZ/b/N2hR15NyH3m2qAtb/hHPA4g4SuuvrdxetTdndfj9b1WOmy73kcPRoERUg==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-musl": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.59.0.tgz", + "integrity": "sha512-nLckB8WOqHIf1bhymk+oHxvM9D3tyPndZH8i8+35p/1YiVoVswPid2yLzgX7ZJP0KQvnkhM4H6QZ5m0LzbyIAg==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-s390x-gnu": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.59.0.tgz", + "integrity": "sha512-oF87Ie3uAIvORFBpwnCvUzdeYUqi2wY6jRFWJAy1qus/udHFYIkplYRW+wo+GRUP4sKzYdmE1Y3+rY5Gc4ZO+w==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-gnu": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.59.0.tgz", + "integrity": "sha512-3AHmtQq/ppNuUspKAlvA8HtLybkDflkMuLK4DPo77DfthRb71V84/c4MlWJXixZz4uruIH4uaa07IqoAkG64fg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-musl": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.59.0.tgz", + "integrity": "sha512-2UdiwS/9cTAx7qIUZB/fWtToJwvt0Vbo0zmnYt7ED35KPg13Q0ym1g442THLC7VyI6JfYTP4PiSOWyoMdV2/xg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-openbsd-x64": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.59.0.tgz", + "integrity": "sha512-M3bLRAVk6GOwFlPTIxVBSYKUaqfLrn8l0psKinkCFxl4lQvOSz8ZrKDz2gxcBwHFpci0B6rttydI4IpS4IS/jQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ] + }, + "node_modules/@rollup/rollup-openharmony-arm64": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.59.0.tgz", + "integrity": "sha512-tt9KBJqaqp5i5HUZzoafHZX8b5Q2Fe7UjYERADll83O4fGqJ49O1FsL6LpdzVFQcpwvnyd0i+K/VSwu/o/nWlA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ] + }, + "node_modules/@rollup/rollup-win32-arm64-msvc": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.59.0.tgz", + "integrity": "sha512-V5B6mG7OrGTwnxaNUzZTDTjDS7F75PO1ae6MJYdiMu60sq0CqN5CVeVsbhPxalupvTX8gXVSU9gq+Rx1/hvu6A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-ia32-msvc": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.59.0.tgz", + "integrity": "sha512-UKFMHPuM9R0iBegwzKF4y0C4J9u8C6MEJgFuXTBerMk7EJ92GFVFYBfOZaSGLu6COf7FxpQNqhNS4c4icUPqxA==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-gnu": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.59.0.tgz", + "integrity": "sha512-laBkYlSS1n2L8fSo1thDNGrCTQMmxjYY5G0WFWjFFYZkKPjsMBsgJfGf4TLxXrF6RyhI60L8TMOjBMvXiTcxeA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.59.0.tgz", + "integrity": "sha512-2HRCml6OztYXyJXAvdDXPKcawukWY2GpR5/nxKp4iBgiO3wcoEGkAaqctIbZcNB6KlUQBIqt8VYkNSj2397EfA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@types/babel__core": { + "version": "7.20.5", + "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz", + "integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.20.7", + "@babel/types": "^7.20.7", + "@types/babel__generator": "*", + "@types/babel__template": "*", + "@types/babel__traverse": "*" + } + }, + "node_modules/@types/babel__generator": { + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.27.0.tgz", + "integrity": "sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__template": { + "version": "7.4.4", + "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.4.tgz", + "integrity": "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.1.0", + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__traverse": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.28.0.tgz", + "integrity": "sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.28.2" + } + }, + "node_modules/@types/estree": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", + "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/react": { + "version": "19.2.14", + "resolved": "https://registry.npmjs.org/@types/react/-/react-19.2.14.tgz", + "integrity": "sha512-ilcTH/UniCkMdtexkoCN0bI7pMcJDvmQFPvuPvmEaYA/NSfFTAgdUSLAoVjaRJm7+6PvcM+q1zYOwS4wTYMF9w==", + "devOptional": true, + "license": "MIT", + "dependencies": { + "csstype": "^3.2.2" + } + }, + "node_modules/@types/react-dom": { + "version": "19.2.3", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-19.2.3.tgz", + "integrity": "sha512-jp2L/eY6fn+KgVVQAOqYItbF0VY/YApe5Mz2F0aykSO8gx31bYCZyvSeYxCHKvzHG5eZjc+zyaS5BrBWya2+kQ==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "@types/react": "^19.2.0" + } + }, + "node_modules/@vitejs/plugin-react": { + "version": "4.7.0", + "resolved": "https://registry.npmjs.org/@vitejs/plugin-react/-/plugin-react-4.7.0.tgz", + "integrity": "sha512-gUu9hwfWvvEDBBmgtAowQCojwZmJ5mcLn3aufeCsitijs3+f2NsrPtlAWIR6OPiqljl96GVCUbLe0HyqIpVaoA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.28.0", + "@babel/plugin-transform-react-jsx-self": "^7.27.1", + "@babel/plugin-transform-react-jsx-source": "^7.27.1", + "@rolldown/pluginutils": "1.0.0-beta.27", + "@types/babel__core": "^7.20.5", + "react-refresh": "^0.17.0" + }, + "engines": { + "node": "^14.18.0 || >=16.0.0" + }, + "peerDependencies": { + "vite": "^4.2.0 || ^5.0.0 || ^6.0.0 || ^7.0.0" + } + }, + "node_modules/any-promise": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/any-promise/-/any-promise-1.3.0.tgz", + "integrity": "sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==", + "dev": true, + "license": "MIT" + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dev": true, + "license": "ISC", + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/arg": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz", + "integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==", + "dev": true, + "license": "MIT" + }, + "node_modules/autoprefixer": { + "version": "10.4.27", + "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.27.tgz", + "integrity": "sha512-NP9APE+tO+LuJGn7/9+cohklunJsXWiaWEfV3si4Gi/XHDwVNgkwr1J3RQYFIvPy76GmJ9/bW8vyoU1LcxwKHA==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/autoprefixer" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "browserslist": "^4.28.1", + "caniuse-lite": "^1.0.30001774", + "fraction.js": "^5.3.4", + "picocolors": "^1.1.1", + "postcss-value-parser": "^4.2.0" + }, + "bin": { + "autoprefixer": "bin/autoprefixer" + }, + "engines": { + "node": "^10 || ^12 || >=14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/baseline-browser-mapping": { + "version": "2.10.0", + "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.10.0.tgz", + "integrity": "sha512-lIyg0szRfYbiy67j9KN8IyeD7q7hcmqnJ1ddWmNt19ItGpNN64mnllmxUNFIOdOm6by97jlL6wfpTTJrmnjWAA==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "baseline-browser-mapping": "dist/cli.cjs" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/binary-extensions": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", + "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "dev": true, + "license": "MIT", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/browserslist": { + "version": "4.28.1", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.28.1.tgz", + "integrity": "sha512-ZC5Bd0LgJXgwGqUknZY/vkUQ04r8NXnJZ3yYi4vDmSiZmC/pdSN0NbNRPxZpbtO4uAfDUAFffO8IZoM3Gj8IkA==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "baseline-browser-mapping": "^2.9.0", + "caniuse-lite": "^1.0.30001759", + "electron-to-chromium": "^1.5.263", + "node-releases": "^2.0.27", + "update-browserslist-db": "^1.2.0" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/camelcase-css": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/camelcase-css/-/camelcase-css-2.0.1.tgz", + "integrity": "sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001776", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001776.tgz", + "integrity": "sha512-sg01JDPzZ9jGshqKSckOQthXnYwOEP50jeVFhaSFbZcOy05TiuuaffDOfcwtCisJ9kNQuLBFibYywv2Bgm9osw==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "CC-BY-4.0" + }, + "node_modules/chokidar": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", + "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/chokidar/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/commander": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz", + "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", + "dev": true, + "license": "MIT" + }, + "node_modules/cssesc": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", + "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", + "dev": true, + "license": "MIT", + "bin": { + "cssesc": "bin/cssesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/csstype": { + "version": "3.2.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.2.3.tgz", + "integrity": "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==", + "devOptional": true, + "license": "MIT" + }, + "node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/didyoumean": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/didyoumean/-/didyoumean-1.2.2.tgz", + "integrity": "sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw==", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/dlv": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/dlv/-/dlv-1.1.3.tgz", + "integrity": "sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA==", + "dev": true, + "license": "MIT" + }, + "node_modules/electron-to-chromium": { + "version": "1.5.302", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.302.tgz", + "integrity": "sha512-sM6HAN2LyK82IyPBpznDRqlTQAtuSaO+ShzFiWTvoMJLHyZ+Y39r8VMfHzwbU8MVBzQ4Wdn85+wlZl2TLGIlwg==", + "dev": true, + "license": "ISC" + }, + "node_modules/esbuild": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.25.12.tgz", + "integrity": "sha512-bbPBYYrtZbkt6Os6FiTLCTFxvq4tt3JKall1vRwshA3fdVztsLAatFaZobhkBC8/BrPetoa0oksYoKXoG4ryJg==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=18" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.25.12", + "@esbuild/android-arm": "0.25.12", + "@esbuild/android-arm64": "0.25.12", + "@esbuild/android-x64": "0.25.12", + "@esbuild/darwin-arm64": "0.25.12", + "@esbuild/darwin-x64": "0.25.12", + "@esbuild/freebsd-arm64": "0.25.12", + "@esbuild/freebsd-x64": "0.25.12", + "@esbuild/linux-arm": "0.25.12", + "@esbuild/linux-arm64": "0.25.12", + "@esbuild/linux-ia32": "0.25.12", + "@esbuild/linux-loong64": "0.25.12", + "@esbuild/linux-mips64el": "0.25.12", + "@esbuild/linux-ppc64": "0.25.12", + "@esbuild/linux-riscv64": "0.25.12", + "@esbuild/linux-s390x": "0.25.12", + "@esbuild/linux-x64": "0.25.12", + "@esbuild/netbsd-arm64": "0.25.12", + "@esbuild/netbsd-x64": "0.25.12", + "@esbuild/openbsd-arm64": "0.25.12", + "@esbuild/openbsd-x64": "0.25.12", + "@esbuild/openharmony-arm64": "0.25.12", + "@esbuild/sunos-x64": "0.25.12", + "@esbuild/win32-arm64": "0.25.12", + "@esbuild/win32-ia32": "0.25.12", + "@esbuild/win32-x64": "0.25.12" + } + }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/fast-glob": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.3.tgz", + "integrity": "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.8" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-glob/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fastq": { + "version": "1.20.1", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.20.1.tgz", + "integrity": "sha512-GGToxJ/w1x32s/D2EKND7kTil4n8OVk/9mycTc4VDza13lOvpUZTGX3mFSCtV9ksdGBVzvsyAVLM6mHFThxXxw==", + "dev": true, + "license": "ISC", + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dev": true, + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/fraction.js": { + "version": "5.3.4", + "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-5.3.4.tgz", + "integrity": "sha512-1X1NTtiJphryn/uLQz3whtY6jK3fTqoE3ohKs0tT+Ujr1W59oopxmoEh7Lu5p6vBaPbgoM0bzveAW4Qi5RyWDQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": "*" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/rawify" + } + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "dev": true, + "license": "MIT", + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-core-module": { + "version": "2.16.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz", + "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==", + "dev": true, + "license": "MIT", + "dependencies": { + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/jiti": { + "version": "1.21.7", + "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.21.7.tgz", + "integrity": "sha512-/imKNG4EbWNrVjoNC/1H5/9GFy+tqjGBHCaSsN+P2RnPqjsLmv6UD3Ej+Kj8nBWaRAwyk7kK5ZUc+OEatnTR3A==", + "dev": true, + "license": "MIT", + "bin": { + "jiti": "bin/jiti.js" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/jsesc": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz", + "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", + "dev": true, + "license": "MIT", + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "dev": true, + "license": "MIT", + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/lilconfig": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-3.1.3.tgz", + "integrity": "sha512-/vlFKAoH5Cgt3Ie+JLhRbwOsCQePABiU3tJ1egGvyQ+33R/vcwM2Zl2QR/LzjsBeItPt3oSVXapn+m4nQDvpzw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/antonk52" + } + }, + "node_modules/lines-and-columns": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", + "dev": true, + "license": "MIT" + }, + "node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "dev": true, + "license": "ISC", + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/lucide-react": { + "version": "0.468.0", + "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.468.0.tgz", + "integrity": "sha512-6koYRhnM2N0GGZIdXzSeiNwguv1gt/FAjZOiPl76roBi3xKEXa4WmfpxgQwTTL4KipXjefrnf3oV4IsYhi4JFA==", + "license": "ISC", + "peerDependencies": { + "react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0-rc" + } + }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/micromatch": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + "dev": true, + "license": "MIT", + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/mz": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/mz/-/mz-2.7.0.tgz", + "integrity": "sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "any-promise": "^1.0.0", + "object-assign": "^4.0.1", + "thenify-all": "^1.0.0" + } + }, + "node_modules/nanoid": { + "version": "3.3.11", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/node-releases": { + "version": "2.0.27", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.27.tgz", + "integrity": "sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-hash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/object-hash/-/object-hash-3.0.0.tgz", + "integrity": "sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", + "dev": true, + "license": "MIT" + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "dev": true, + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pify": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", + "integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/pirates": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.7.tgz", + "integrity": "sha512-TfySrs/5nm8fQJDcBDuUng3VOUKsd7S+zqvbOTiGXHfxX4wK31ard+hoNuvkicM/2YFzlpDgABOevKSsB4G/FA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/postcss": { + "version": "8.5.8", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.8.tgz", + "integrity": "sha512-OW/rX8O/jXnm82Ey1k44pObPtdblfiuWnrd8X7GJ7emImCOstunGbXUpp7HdBrFQX6rJzn3sPT397Wp5aCwCHg==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.11", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/postcss-import": { + "version": "15.1.0", + "resolved": "https://registry.npmjs.org/postcss-import/-/postcss-import-15.1.0.tgz", + "integrity": "sha512-hpr+J05B2FVYUAXHeK1YyI267J/dDDhMU6B6civm8hSY1jYJnBXxzKDKDswzJmtLHryrjhnDjqqp/49t8FALew==", + "dev": true, + "license": "MIT", + "dependencies": { + "postcss-value-parser": "^4.0.0", + "read-cache": "^1.0.0", + "resolve": "^1.1.7" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "postcss": "^8.0.0" + } + }, + "node_modules/postcss-js": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/postcss-js/-/postcss-js-4.1.0.tgz", + "integrity": "sha512-oIAOTqgIo7q2EOwbhb8UalYePMvYoIeRY2YKntdpFQXNosSu3vLrniGgmH9OKs/qAkfoj5oB3le/7mINW1LCfw==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "camelcase-css": "^2.0.1" + }, + "engines": { + "node": "^12 || ^14 || >= 16" + }, + "peerDependencies": { + "postcss": "^8.4.21" + } + }, + "node_modules/postcss-load-config": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-6.0.1.tgz", + "integrity": "sha512-oPtTM4oerL+UXmx+93ytZVN82RrlY/wPUV8IeDxFrzIjXOLF1pN+EmKPLbubvKHT2HC20xXsCAH2Z+CKV6Oz/g==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "lilconfig": "^3.1.1" + }, + "engines": { + "node": ">= 18" + }, + "peerDependencies": { + "jiti": ">=1.21.0", + "postcss": ">=8.0.9", + "tsx": "^4.8.1", + "yaml": "^2.4.2" + }, + "peerDependenciesMeta": { + "jiti": { + "optional": true + }, + "postcss": { + "optional": true + }, + "tsx": { + "optional": true + }, + "yaml": { + "optional": true + } + } + }, + "node_modules/postcss-nested": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/postcss-nested/-/postcss-nested-6.2.0.tgz", + "integrity": "sha512-HQbt28KulC5AJzG+cZtj9kvKB93CFCdLvog1WFLf1D+xmMvPGlBstkpTEZfK5+AN9hfJocyBFCNiqyS48bpgzQ==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "postcss-selector-parser": "^6.1.1" + }, + "engines": { + "node": ">=12.0" + }, + "peerDependencies": { + "postcss": "^8.2.14" + } + }, + "node_modules/postcss-selector-parser": { + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.1.2.tgz", + "integrity": "sha512-Q8qQfPiZ+THO/3ZrOrO0cJJKfpYCagtMUkXbnEfmgUjwXg6z/WBeOyS9APBBPCTSiDV+s4SwQGu8yFsiMRIudg==", + "dev": true, + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-value-parser": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz", + "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/react": { + "version": "19.2.4", + "resolved": "https://registry.npmjs.org/react/-/react-19.2.4.tgz", + "integrity": "sha512-9nfp2hYpCwOjAN+8TZFGhtWEwgvWHXqESH8qT89AT/lWklpLON22Lc8pEtnpsZz7VmawabSU0gCjnj8aC0euHQ==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-dom": { + "version": "19.2.4", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.2.4.tgz", + "integrity": "sha512-AXJdLo8kgMbimY95O2aKQqsz2iWi9jMgKJhRBAxECE4IFxfcazB2LmzloIoibJI3C12IlY20+KFaLv+71bUJeQ==", + "license": "MIT", + "dependencies": { + "scheduler": "^0.27.0" + }, + "peerDependencies": { + "react": "^19.2.4" + } + }, + "node_modules/react-refresh": { + "version": "0.17.0", + "resolved": "https://registry.npmjs.org/react-refresh/-/react-refresh-0.17.0.tgz", + "integrity": "sha512-z6F7K9bV85EfseRCp2bzrpyQ0Gkw1uLoCel9XBVWPg/TjRj94SkJzUTGfOa4bs7iJvBWtQG0Wq7wnI0syw3EBQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-virtuoso": { + "version": "4.18.3", + "resolved": "https://registry.npmjs.org/react-virtuoso/-/react-virtuoso-4.18.3.tgz", + "integrity": "sha512-fLz/peHAx4Eu0DLHurFEEI7Y6n5CqEoxBh04rgJM9yMuOJah2a9zWg/MUOmZLcp7zuWYorXq5+5bf3IRgkNvWg==", + "license": "MIT", + "peerDependencies": { + "react": ">=16 || >=17 || >= 18 || >= 19", + "react-dom": ">=16 || >=17 || >= 18 || >=19" + } + }, + "node_modules/read-cache": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/read-cache/-/read-cache-1.0.0.tgz", + "integrity": "sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA==", + "dev": true, + "license": "MIT", + "dependencies": { + "pify": "^2.3.0" + } + }, + "node_modules/readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "dev": true, + "license": "MIT", + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/resolve": { + "version": "1.22.11", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.11.tgz", + "integrity": "sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-core-module": "^2.16.1", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/reusify": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.1.0.tgz", + "integrity": "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==", + "dev": true, + "license": "MIT", + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/rollup": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.59.0.tgz", + "integrity": "sha512-2oMpl67a3zCH9H79LeMcbDhXW/UmWG/y2zuqnF2jQq5uq9TbM9TVyXvA4+t+ne2IIkBdrLpAaRQAvo7YI/Yyeg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "1.0.8" + }, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=18.0.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.59.0", + "@rollup/rollup-android-arm64": "4.59.0", + "@rollup/rollup-darwin-arm64": "4.59.0", + "@rollup/rollup-darwin-x64": "4.59.0", + "@rollup/rollup-freebsd-arm64": "4.59.0", + "@rollup/rollup-freebsd-x64": "4.59.0", + "@rollup/rollup-linux-arm-gnueabihf": "4.59.0", + "@rollup/rollup-linux-arm-musleabihf": "4.59.0", + "@rollup/rollup-linux-arm64-gnu": "4.59.0", + "@rollup/rollup-linux-arm64-musl": "4.59.0", + "@rollup/rollup-linux-loong64-gnu": "4.59.0", + "@rollup/rollup-linux-loong64-musl": "4.59.0", + "@rollup/rollup-linux-ppc64-gnu": "4.59.0", + "@rollup/rollup-linux-ppc64-musl": "4.59.0", + "@rollup/rollup-linux-riscv64-gnu": "4.59.0", + "@rollup/rollup-linux-riscv64-musl": "4.59.0", + "@rollup/rollup-linux-s390x-gnu": "4.59.0", + "@rollup/rollup-linux-x64-gnu": "4.59.0", + "@rollup/rollup-linux-x64-musl": "4.59.0", + "@rollup/rollup-openbsd-x64": "4.59.0", + "@rollup/rollup-openharmony-arm64": "4.59.0", + "@rollup/rollup-win32-arm64-msvc": "4.59.0", + "@rollup/rollup-win32-ia32-msvc": "4.59.0", + "@rollup/rollup-win32-x64-gnu": "4.59.0", + "@rollup/rollup-win32-x64-msvc": "4.59.0", + "fsevents": "~2.3.2" + } + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/scheduler": { + "version": "0.27.0", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.27.0.tgz", + "integrity": "sha512-eNv+WrVbKu1f3vbYJT/xtiF5syA5HPIMtf9IgY/nKg0sWqzAUEvqY/xm7OcZc/qafLx/iO9FgOmeSAp4v5ti/Q==", + "license": "MIT" + }, + "node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/sucrase": { + "version": "3.35.1", + "resolved": "https://registry.npmjs.org/sucrase/-/sucrase-3.35.1.tgz", + "integrity": "sha512-DhuTmvZWux4H1UOnWMB3sk0sbaCVOoQZjv8u1rDoTV0HTdGem9hkAZtl4JZy8P2z4Bg0nT+YMeOFyVr4zcG5Tw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.2", + "commander": "^4.0.0", + "lines-and-columns": "^1.1.6", + "mz": "^2.7.0", + "pirates": "^4.0.1", + "tinyglobby": "^0.2.11", + "ts-interface-checker": "^0.1.9" + }, + "bin": { + "sucrase": "bin/sucrase", + "sucrase-node": "bin/sucrase-node" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/tailwindcss": { + "version": "3.4.19", + "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.4.19.tgz", + "integrity": "sha512-3ofp+LL8E+pK/JuPLPggVAIaEuhvIz4qNcf3nA1Xn2o/7fb7s/TYpHhwGDv1ZU3PkBluUVaF8PyCHcm48cKLWQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@alloc/quick-lru": "^5.2.0", + "arg": "^5.0.2", + "chokidar": "^3.6.0", + "didyoumean": "^1.2.2", + "dlv": "^1.1.3", + "fast-glob": "^3.3.2", + "glob-parent": "^6.0.2", + "is-glob": "^4.0.3", + "jiti": "^1.21.7", + "lilconfig": "^3.1.3", + "micromatch": "^4.0.8", + "normalize-path": "^3.0.0", + "object-hash": "^3.0.0", + "picocolors": "^1.1.1", + "postcss": "^8.4.47", + "postcss-import": "^15.1.0", + "postcss-js": "^4.0.1", + "postcss-load-config": "^4.0.2 || ^5.0 || ^6.0", + "postcss-nested": "^6.2.0", + "postcss-selector-parser": "^6.1.2", + "resolve": "^1.22.8", + "sucrase": "^3.35.0" + }, + "bin": { + "tailwind": "lib/cli.js", + "tailwindcss": "lib/cli.js" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/thenify": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/thenify/-/thenify-3.3.1.tgz", + "integrity": "sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==", + "dev": true, + "license": "MIT", + "dependencies": { + "any-promise": "^1.0.0" + } + }, + "node_modules/thenify-all": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/thenify-all/-/thenify-all-1.6.0.tgz", + "integrity": "sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==", + "dev": true, + "license": "MIT", + "dependencies": { + "thenify": ">= 3.1.0 < 4" + }, + "engines": { + "node": ">=0.8" + } + }, + "node_modules/tinyglobby": { + "version": "0.2.15", + "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz", + "integrity": "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "fdir": "^6.5.0", + "picomatch": "^4.0.3" + }, + "engines": { + "node": ">=12.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/SuperchupuDev" + } + }, + "node_modules/tinyglobby/node_modules/fdir": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", + "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, + "node_modules/tinyglobby/node_modules/picomatch": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/ts-interface-checker": { + "version": "0.1.13", + "resolved": "https://registry.npmjs.org/ts-interface-checker/-/ts-interface-checker-0.1.13.tgz", + "integrity": "sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/typescript": { + "version": "5.9.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz", + "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/update-browserslist-db": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.2.3.tgz", + "integrity": "sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "escalade": "^3.2.0", + "picocolors": "^1.1.1" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", + "dev": true, + "license": "MIT" + }, + "node_modules/vite": { + "version": "6.4.1", + "resolved": "https://registry.npmjs.org/vite/-/vite-6.4.1.tgz", + "integrity": "sha512-+Oxm7q9hDoLMyJOYfUYBuHQo+dkAloi33apOPP56pzj+vsdJDzr+j1NISE5pyaAuKL4A3UD34qd0lx5+kfKp2g==", + "dev": true, + "license": "MIT", + "dependencies": { + "esbuild": "^0.25.0", + "fdir": "^6.4.4", + "picomatch": "^4.0.2", + "postcss": "^8.5.3", + "rollup": "^4.34.9", + "tinyglobby": "^0.2.13" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^18.0.0 || ^20.0.0 || >=22.0.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^18.0.0 || ^20.0.0 || >=22.0.0", + "jiti": ">=1.21.0", + "less": "*", + "lightningcss": "^1.21.0", + "sass": "*", + "sass-embedded": "*", + "stylus": "*", + "sugarss": "*", + "terser": "^5.16.0", + "tsx": "^4.8.1", + "yaml": "^2.4.2" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "jiti": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "sass-embedded": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + }, + "tsx": { + "optional": true + }, + "yaml": { + "optional": true + } + } + }, + "node_modules/vite/node_modules/fdir": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", + "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, + "node_modules/vite/node_modules/picomatch": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/wavesurfer.js": { + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/wavesurfer.js/-/wavesurfer.js-7.12.1.tgz", + "integrity": "sha512-NswPjVHxk0Q1F/VMRemCPUzSojjuHHisQrBqQiRXg7MVbe3f5vQ6r0rTTXA/a/neC/4hnOEC4YpXca4LpH0SUg==", + "license": "BSD-3-Clause" + }, + "node_modules/yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", + "dev": true, + "license": "ISC" + }, + "node_modules/zundo": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/zundo/-/zundo-2.3.0.tgz", + "integrity": "sha512-4GXYxXA17SIKYhVbWHdSEU04P697IMyVGXrC2TnzoyohEAWytFNOKqOp5gTGvaW93F/PM5Y0evbGtOPF0PWQwQ==", + "license": "MIT", + "funding": { + "type": "individual", + "url": "https://github.com/sponsors/charkour" + }, + "peerDependencies": { + "zustand": "^4.3.0 || ^5.0.0" + }, + "peerDependenciesMeta": { + "zustand": { + "optional": false + } + } + }, + "node_modules/zustand": { + "version": "5.0.11", + "resolved": "https://registry.npmjs.org/zustand/-/zustand-5.0.11.tgz", + "integrity": "sha512-fdZY+dk7zn/vbWNCYmzZULHRrss0jx5pPFiOuMZ/5HJN6Yv3u+1Wswy/4MpZEkEGhtNH+pwxZB8OKgUBPzYAGg==", + "license": "MIT", + "engines": { + "node": ">=12.20.0" + }, + "peerDependencies": { + "@types/react": ">=18.0.0", + "immer": ">=9.0.6", + "react": ">=18.0.0", + "use-sync-external-store": ">=1.2.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "immer": { + "optional": true + }, + "react": { + "optional": true + }, + "use-sync-external-store": { + "optional": true + } + } + } + } +} diff --git a/frontend/package.json b/frontend/package.json new file mode 100644 index 0000000..9f57b01 --- /dev/null +++ b/frontend/package.json @@ -0,0 +1,31 @@ +{ + "name": "cutscript-frontend", + "private": true, + "version": "0.1.0", + "type": "module", + "scripts": { + "dev": "vite", + "build": "tsc -b && vite build", + "lint": "eslint .", + "preview": "vite preview" + }, + "dependencies": { + "lucide-react": "^0.468.0", + "react": "^19.0.0", + "react-dom": "^19.0.0", + "react-virtuoso": "^4.18.3", + "wavesurfer.js": "^7.8.0", + "zundo": "^2.3.0", + "zustand": "^5.0.0" + }, + "devDependencies": { + "@types/react": "^19.0.0", + "@types/react-dom": "^19.0.0", + "@vitejs/plugin-react": "^4.3.0", + "autoprefixer": "^10.4.20", + "postcss": "^8.4.49", + "tailwindcss": "^3.4.0", + "typescript": "^5.7.0", + "vite": "^6.0.0" + } +} diff --git a/frontend/postcss.config.js b/frontend/postcss.config.js new file mode 100644 index 0000000..2aa7205 --- /dev/null +++ b/frontend/postcss.config.js @@ -0,0 +1,6 @@ +export default { + plugins: { + tailwindcss: {}, + autoprefixer: {}, + }, +}; diff --git a/frontend/src/App.tsx b/frontend/src/App.tsx new file mode 100644 index 0000000..8542392 --- /dev/null +++ b/frontend/src/App.tsx @@ -0,0 +1,310 @@ +import { useEffect, useState, useRef } from 'react'; +import { useEditorStore } from './store/editorStore'; +import VideoPlayer from './components/VideoPlayer'; +import TranscriptEditor from './components/TranscriptEditor'; +import WaveformTimeline from './components/WaveformTimeline'; +import AIPanel from './components/AIPanel'; +import ExportDialog from './components/ExportDialog'; +import SettingsPanel from './components/SettingsPanel'; +import { useKeyboardShortcuts } from './hooks/useKeyboardShortcuts'; +import { + Film, + FolderOpen, + Settings, + Sparkles, + Download, + Loader2, + FolderSearch, + FileInput, +} from 'lucide-react'; + +const IS_ELECTRON = !!window.electronAPI; + +type Panel = 'ai' | 'settings' | 'export' | null; + +export default function App() { + const { + videoPath, + words, + isTranscribing, + transcriptionProgress, + loadVideo, + setBackendUrl, + setTranscription, + setTranscribing, + backendUrl, + } = useEditorStore(); + + const [activePanel, setActivePanel] = useState(null); + const [manualPath, setManualPath] = useState(''); + const [whisperModel, setWhisperModel] = useState('base'); + const fileInputRef = useRef(null); + + useKeyboardShortcuts(); + + useEffect(() => { + if (IS_ELECTRON) { + window.electronAPI!.getBackendUrl().then(setBackendUrl); + } + }, [setBackendUrl]); + + const handleLoadProject = async () => { + if (!IS_ELECTRON) return; + try { + const projectPath = await window.electronAPI!.openProject(); + if (!projectPath) return; + const content = await window.electronAPI!.readFile(projectPath); + const data = JSON.parse(content); + useEditorStore.getState().loadProject(data); + } catch (err) { + console.error('Failed to load project:', err); + alert(`Failed to load project: ${err}`); + } + }; + + const handleOpenFile = async () => { + if (IS_ELECTRON) { + const path = await window.electronAPI!.openFile(); + if (path) { + loadVideo(path); + await transcribeVideo(path); + } + } else { + // Browser: use the manual path input + const path = manualPath.trim(); + if (path) { + loadVideo(path); + await transcribeVideo(path); + } + } + }; + + const handleManualSubmit = async (e: React.FormEvent) => { + e.preventDefault(); + const path = manualPath.trim(); + if (!path) return; + loadVideo(path); + await transcribeVideo(path); + }; + + const transcribeVideo = async (path: string) => { + setTranscribing(true, 0); + try { + const res = await fetch(`${backendUrl}/transcribe`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ file_path: path, model: whisperModel }), + }); + if (!res.ok) throw new Error(`Transcription failed: ${res.statusText}`); + const data = await res.json(); + setTranscription(data); + } catch (err) { + console.error('Transcription error:', err); + alert(`Transcription failed. Check the console for details.\n\n${err}`); + } finally { + setTranscribing(false); + } + }; + + const togglePanel = (panel: Panel) => + setActivePanel((prev) => (prev === panel ? null : panel)); + + if (!videoPath) { + return ( +
+
+ +

CutScript

+

+ Open-source text-based video editing powered by AI. +

+
+ + {/* Whisper model selector */} +
+ + +
+ + {IS_ELECTRON ? ( +
+ + +
+ ) : ( + /* Browser: manual path input */ +
+
+ + Running in browser — paste the full path to your video file below. + +
+
+
+ + setManualPath(e.target.value)} + placeholder="C:\Videos\my-video.mp4" + className="w-full pl-9 pr-3 py-2.5 bg-editor-surface border border-editor-border rounded-lg text-sm text-editor-text placeholder:text-editor-text-muted/40 focus:outline-none focus:border-editor-accent" + autoFocus + /> +
+ +
+

+ Supported: MP4, AVI, MOV, MKV, WebM, M4A +

+
+ )} +
+ ); + } + + return ( +
+ {/* Top bar */} +
+
+ + + {videoPath.split(/[\\/]/).pop()} + +
+
+ } + label="Open" + onClick={IS_ELECTRON ? handleOpenFile : () => useEditorStore.getState().reset()} + /> + } + label="AI" + active={activePanel === 'ai'} + onClick={() => togglePanel('ai')} + disabled={words.length === 0} + /> + } + label="Export" + active={activePanel === 'export'} + onClick={() => togglePanel('export')} + disabled={words.length === 0} + /> + } + label="Settings" + active={activePanel === 'settings'} + onClick={() => togglePanel('settings')} + /> +
+
+ + {/* Main content */} +
+ {/* Left: video + transcript */} +
+
+ {/* Video player */} +
+ +
+ + {/* Transcript */} +
+ {isTranscribing ? ( +
+ +

+ Transcribing... {Math.round(transcriptionProgress)}% +

+
+ ) : words.length > 0 ? ( + + ) : ( +
+ No transcript yet +
+ )} +
+
+ + {/* Waveform timeline */} +
+ +
+
+ + {/* Right panel (AI / Export / Settings) */} + {activePanel && ( +
+ {activePanel === 'ai' && } + {activePanel === 'export' && } + {activePanel === 'settings' && } +
+ )} +
+
+ ); +} + +function ToolbarButton({ + icon, + label, + active, + onClick, + disabled, +}: { + icon: React.ReactNode; + label: string; + active?: boolean; + onClick: () => void; + disabled?: boolean; +}) { + return ( + + ); +} diff --git a/frontend/src/components/AIPanel.tsx b/frontend/src/components/AIPanel.tsx new file mode 100644 index 0000000..25f6ff6 --- /dev/null +++ b/frontend/src/components/AIPanel.tsx @@ -0,0 +1,332 @@ +import { useCallback, useState } from 'react'; +import { useEditorStore } from '../store/editorStore'; +import { useAIStore } from '../store/aiStore'; +import { Sparkles, Scissors, Film, Loader2, Check, X, Play, Download } from 'lucide-react'; +import type { ClipSuggestion } from '../types/project'; + +export default function AIPanel() { + const { words, videoPath, backendUrl, deleteWordRange, setCurrentTime } = useEditorStore(); + const { + defaultProvider, + providers, + customFillerWords, + fillerResult, + clipSuggestions, + isProcessing, + processingMessage, + setCustomFillerWords, + setFillerResult, + setClipSuggestions, + setProcessing, + } = useAIStore(); + + const [activeTab, setActiveTab] = useState<'filler' | 'clips'>('filler'); + + const detectFillers = useCallback(async () => { + if (words.length === 0) return; + setProcessing(true, 'Detecting filler words...'); + try { + const config = providers[defaultProvider]; + const transcript = words.map((w) => w.word).join(' '); + const res = await fetch(`${backendUrl}/ai/filler-removal`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + transcript, + words: words.map((w, i) => ({ index: i, word: w.word })), + provider: defaultProvider, + model: config.model, + api_key: config.apiKey || undefined, + base_url: config.baseUrl || undefined, + custom_filler_words: customFillerWords || undefined, + }), + }); + if (!res.ok) throw new Error('Filler detection failed'); + const data = await res.json(); + setFillerResult(data); + } catch (err) { + console.error(err); + } finally { + setProcessing(false); + } + }, [words, backendUrl, defaultProvider, providers, customFillerWords, setProcessing, setFillerResult]); + + const createClips = useCallback(async () => { + if (words.length === 0) return; + setProcessing(true, 'Finding best clip segments...'); + try { + const config = providers[defaultProvider]; + const transcript = words.map((w) => w.word).join(' '); + const res = await fetch(`${backendUrl}/ai/create-clip`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + transcript, + words: words.map((w, i) => ({ + index: i, + word: w.word, + start: w.start, + end: w.end, + })), + provider: defaultProvider, + model: config.model, + api_key: config.apiKey || undefined, + base_url: config.baseUrl || undefined, + target_duration: 60, + }), + }); + if (!res.ok) throw new Error('Clip creation failed'); + const data = await res.json(); + setClipSuggestions(data.clips || []); + } catch (err) { + console.error(err); + } finally { + setProcessing(false); + } + }, [words, backendUrl, defaultProvider, providers, setProcessing, setClipSuggestions]); + + const applyFillerDeletions = useCallback(() => { + if (!fillerResult) return; + const sorted = [...fillerResult.fillerWords].sort((a, b) => b.index - a.index); + for (const fw of sorted) { + deleteWordRange(fw.index, fw.index); + } + setFillerResult(null); + }, [fillerResult, deleteWordRange, setFillerResult]); + + const handlePreviewClip = useCallback( + (clip: ClipSuggestion) => { + setCurrentTime(clip.startTime); + const video = document.querySelector('video'); + if (video) { + video.currentTime = clip.startTime; + video.play(); + } + }, + [setCurrentTime], + ); + + const [exportingClipIndex, setExportingClipIndex] = useState(null); + + const handleExportClip = useCallback( + async (clip: ClipSuggestion, index: number) => { + if (!videoPath) return; + setExportingClipIndex(index); + try { + const safeName = clip.title.replace(/[^a-zA-Z0-9_-]/g, '_').substring(0, 40); + const dirSep = videoPath.lastIndexOf('\\') >= 0 ? '\\' : '/'; + const dir = videoPath.substring(0, videoPath.lastIndexOf(dirSep)); + const outputPath = `${dir}${dirSep}${safeName}_clip.mp4`; + + const res = await fetch(`${backendUrl}/export`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + input_path: videoPath, + output_path: outputPath, + keep_segments: [{ start: clip.startTime, end: clip.endTime }], + mode: 'fast', + format: 'mp4', + }), + }); + if (!res.ok) throw new Error('Export failed'); + const data = await res.json(); + alert(`Clip exported to: ${data.output_path}`); + } catch (err) { + console.error(err); + alert('Failed to export clip. Check console for details.'); + } finally { + setExportingClipIndex(null); + } + }, + [videoPath, backendUrl], + ); + + return ( +
+
+ setActiveTab('filler')} + icon={} + label="Filler Words" + /> + setActiveTab('clips')} + icon={} + label="Create Clips" + /> +
+ +
+ {activeTab === 'filler' && ( +
+

+ Use AI to detect and remove filler words like "um", "uh", "like", "you know" from + your transcript. +

+
+ + setCustomFillerWords(e.target.value)} + placeholder="e.g. okay, alright, anyway" + className="w-full px-2.5 py-1.5 text-xs bg-editor-surface border border-editor-border rounded focus:border-editor-accent focus:outline-none" + /> +
+ + + {fillerResult && fillerResult.fillerWords.length > 0 && ( +
+
+ + Found {fillerResult.fillerWords.length} filler words + +
+ + +
+
+
+ {fillerResult.fillerWords.map((fw) => ( +
+ + "{fw.word}" + — {fw.reason} + +
+ ))} +
+
+ )} + + {fillerResult && fillerResult.fillerWords.length === 0 && ( +

No filler words detected.

+ )} +
+ )} + + {activeTab === 'clips' && ( +
+

+ AI analyzes your transcript and suggests the most engaging segments for a + YouTube Short or social media clip. +

+ + + {clipSuggestions.length > 0 && ( +
+ {clipSuggestions.map((clip, i) => ( +
+
+ {clip.title} + + {Math.round(clip.endTime - clip.startTime)}s + +
+

{clip.reason}

+
+ + +
+
+ ))} +
+ )} +
+ )} +
+
+ ); +} + +function TabButton({ + active, + onClick, + icon, + label, +}: { + active: boolean; + onClick: () => void; + icon: React.ReactNode; + label: string; +}) { + return ( + + ); +} diff --git a/frontend/src/components/ExportDialog.tsx b/frontend/src/components/ExportDialog.tsx new file mode 100644 index 0000000..5b682da --- /dev/null +++ b/frontend/src/components/ExportDialog.tsx @@ -0,0 +1,229 @@ +import { useState, useCallback, useMemo } from 'react'; +import { useEditorStore } from '../store/editorStore'; +import { Download, Loader2, Zap, Cog, Info } from 'lucide-react'; +import type { ExportOptions } from '../types/project'; + +export default function ExportDialog() { + const { videoPath, words, deletedRanges, isExporting, exportProgress, backendUrl, setExporting, getKeepSegments } = + useEditorStore(); + + const hasCuts = deletedRanges.length > 0; + + const [options, setOptions] = useState>({ + mode: 'fast', + resolution: '1080p', + format: 'mp4', + enhanceAudio: false, + captions: 'none', + }); + + const handleExport = useCallback(async () => { + if (!videoPath) return; + + const outputPath = await window.electronAPI?.saveFile({ + defaultPath: videoPath.replace(/\.[^.]+$/, '_edited.mp4'), + filters: [ + { name: 'MP4', extensions: ['mp4'] }, + { name: 'MOV', extensions: ['mov'] }, + { name: 'WebM', extensions: ['webm'] }, + ], + }); + if (!outputPath) return; + + setExporting(true, 0); + try { + const keepSegments = getKeepSegments(); + + const deletedSet = new Set(); + for (const range of deletedRanges) { + for (const idx of range.wordIndices) deletedSet.add(idx); + } + + const res = await fetch(`${backendUrl}/export`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + input_path: videoPath, + output_path: outputPath, + keep_segments: keepSegments, + words: options.captions !== 'none' ? words : undefined, + deleted_indices: options.captions !== 'none' ? [...deletedSet] : undefined, + ...options, + }), + }); + if (!res.ok) throw new Error(`Export failed: ${res.statusText}`); + setExporting(false, 100); + } catch (err) { + console.error('Export error:', err); + setExporting(false); + } + }, [videoPath, options, backendUrl, setExporting, getKeepSegments]); + + return ( +
+

Export Video

+ + {/* Mode */} +
+ Export Mode +
+ setOptions((o) => ({ ...o, mode: 'fast' }))} + icon={} + title="Fast" + desc="Stream copy, seconds" + /> + setOptions((o) => ({ ...o, mode: 'reencode' }))} + icon={} + title="Re-encode" + desc="Custom quality, slower" + /> +
+
+ + {/* Resolution (only for re-encode) */} + {options.mode === 'reencode' && ( + setOptions((o) => ({ ...o, resolution: v as ExportOptions['resolution'] }))} + options={[ + { value: '720p', label: '720p (HD)' }, + { value: '1080p', label: '1080p (Full HD)' }, + { value: '4k', label: '4K (Ultra HD)' }, + ]} + /> + )} + + {/* Format */} + setOptions((o) => ({ ...o, format: v as ExportOptions['format'] }))} + options={[ + { value: 'mp4', label: 'MP4 (H.264)' }, + { value: 'mov', label: 'MOV (QuickTime)' }, + { value: 'webm', label: 'WebM (VP9)' }, + ]} + /> + + {/* Audio enhancement */} + + + {/* Captions */} + setOptions((o) => ({ ...o, captions: v as ExportOptions['captions'] }))} + options={[ + { value: 'none', label: 'No captions' }, + { value: 'burn-in', label: 'Burn-in (permanent)' }, + { value: 'sidecar', label: 'Sidecar SRT file' }, + ]} + /> + + {/* Export button */} + + + {options.mode === 'fast' && !hasCuts && ( +

+ Fast mode uses stream copy — no quality loss, exports in seconds. +

+ )} + {options.mode === 'fast' && hasCuts && ( +
+ + + Word-level cuts require re-encoding for frame-accurate output. Export will + automatically use re-encode mode. This takes longer but ensures your cuts are precise. + +
+ )} +
+ ); +} + +function ModeCard({ + active, + onClick, + icon, + title, + desc, +}: { + active: boolean; + onClick: () => void; + icon: React.ReactNode; + title: string; + desc: string; +}) { + return ( + + ); +} + +function SelectField({ + label, + value, + onChange, + options, +}: { + label: string; + value: string; + onChange: (value: string) => void; + options: Array<{ value: string; label: string }>; +}) { + return ( +
+ + +
+ ); +} diff --git a/frontend/src/components/SettingsPanel.tsx b/frontend/src/components/SettingsPanel.tsx new file mode 100644 index 0000000..7919d1b --- /dev/null +++ b/frontend/src/components/SettingsPanel.tsx @@ -0,0 +1,192 @@ +import { useAIStore } from '../store/aiStore'; +import { useState, useEffect } from 'react'; +import type { AIProvider } from '../types/project'; +import { useEditorStore } from '../store/editorStore'; +import { Bot, Cloud, Brain, RefreshCw } from 'lucide-react'; + +export default function SettingsPanel() { + const { providers, defaultProvider, setProviderConfig, setDefaultProvider } = useAIStore(); + const { backendUrl } = useEditorStore(); + const [ollamaModels, setOllamaModels] = useState([]); + const [loadingModels, setLoadingModels] = useState(false); + + const fetchOllamaModels = async () => { + setLoadingModels(true); + try { + const res = await fetch(`${backendUrl}/ai/ollama-models`); + if (res.ok) { + const data = await res.json(); + setOllamaModels(data.models || []); + } + } catch { + setOllamaModels([]); + } finally { + setLoadingModels(false); + } + }; + + useEffect(() => { + fetchOllamaModels(); + }, [backendUrl]); + + const providerIcons: Record = { + ollama: , + openai: , + claude: , + }; + + const providerLabels: Record = { + ollama: 'Ollama (Local)', + openai: 'OpenAI', + claude: 'Claude (Anthropic)', + }; + + return ( +
+

AI Settings

+ + {/* Default provider selector */} +
+ +
+ {(['ollama', 'openai', 'claude'] as AIProvider[]).map((p) => ( + + ))} +
+
+ + {/* Ollama settings */} + + setProviderConfig('ollama', { baseUrl: v })} + placeholder="http://localhost:11434" + /> +
+
+ + +
+ {ollamaModels.length > 0 ? ( + + ) : ( + setProviderConfig('ollama', { model: v })} + placeholder="llama3" + /> + )} +
+
+ + {/* OpenAI settings */} + + setProviderConfig('openai', { apiKey: v })} + placeholder="sk-..." + type="password" + /> + setProviderConfig('openai', { model: v })} + placeholder="gpt-4o" + /> + + + {/* Claude settings */} + + setProviderConfig('claude', { apiKey: v })} + placeholder="sk-ant-..." + type="password" + /> + setProviderConfig('claude', { model: v })} + placeholder="claude-sonnet-4-20250514" + /> + +
+ ); +} + +function ProviderSection({ + title, + icon, + children, +}: { + title: string; + icon: React.ReactNode; + children: React.ReactNode; +}) { + return ( +
+
+ {icon} + {title} +
+
{children}
+
+ ); +} + +function InputField({ + label, + value, + onChange, + placeholder, + type = 'text', +}: { + label: string; + value: string; + onChange: (value: string) => void; + placeholder: string; + type?: string; +}) { + return ( +
+ {label && } + onChange(e.target.value)} + placeholder={placeholder} + className="w-full px-3 py-2 bg-editor-bg border border-editor-border rounded-lg text-xs text-editor-text placeholder:text-editor-text-muted/50 focus:outline-none focus:border-editor-accent" + /> +
+ ); +} diff --git a/frontend/src/components/TranscriptEditor.tsx b/frontend/src/components/TranscriptEditor.tsx new file mode 100644 index 0000000..48fa313 --- /dev/null +++ b/frontend/src/components/TranscriptEditor.tsx @@ -0,0 +1,204 @@ +import { useCallback, useRef, useEffect, useMemo, useState } from 'react'; +import { useEditorStore } from '../store/editorStore'; +import { Virtuoso } from 'react-virtuoso'; +import { Trash2, RotateCcw } from 'lucide-react'; + +export default function TranscriptEditor() { + const words = useEditorStore((s) => s.words); + const segments = useEditorStore((s) => s.segments); + const deletedRanges = useEditorStore((s) => s.deletedRanges); + const selectedWordIndices = useEditorStore((s) => s.selectedWordIndices); + const hoveredWordIndex = useEditorStore((s) => s.hoveredWordIndex); + const setSelectedWordIndices = useEditorStore((s) => s.setSelectedWordIndices); + const setHoveredWordIndex = useEditorStore((s) => s.setHoveredWordIndex); + const deleteSelectedWords = useEditorStore((s) => s.deleteSelectedWords); + const restoreRange = useEditorStore((s) => s.restoreRange); + const getWordAtTime = useEditorStore((s) => s.getWordAtTime); + + const selectionStart = useRef(null); + const wasDragging = useRef(false); + const virtuosoRef = useRef(null); + + const deletedSet = useMemo(() => { + const s = new Set(); + for (const range of deletedRanges) { + for (const idx of range.wordIndices) s.add(idx); + } + return s; + }, [deletedRanges]); + + const selectedSet = useMemo(() => new Set(selectedWordIndices), [selectedWordIndices]); + + const [activeWordIndex, setActiveWordIndex] = useState(-1); + + useEffect(() => { + if (words.length === 0) return; + const interval = setInterval(() => { + const video = document.querySelector('video') as HTMLVideoElement | null; + if (!video) return; + const idx = getWordAtTime(video.currentTime); + setActiveWordIndex((prev) => (prev === idx ? prev : idx)); + }, 250); + return () => clearInterval(interval); + }, [words, getWordAtTime]); + + // Auto-scroll to active segment via Virtuoso + useEffect(() => { + if (activeWordIndex < 0 || segments.length === 0) return; + const segIdx = segments.findIndex((seg) => { + const start = seg.globalStartIndex ?? 0; + return activeWordIndex >= start && activeWordIndex < start + seg.words.length; + }); + if (segIdx >= 0 && virtuosoRef.current) { + virtuosoRef.current.scrollIntoView({ index: segIdx, behavior: 'smooth', align: 'center' }); + } + }, [activeWordIndex, segments]); + + const handleWordMouseDown = useCallback( + (index: number, e: React.MouseEvent) => { + e.preventDefault(); + wasDragging.current = false; + if (e.shiftKey && selectedWordIndices.length > 0) { + const first = selectedWordIndices[0]; + const start = Math.min(first, index); + const end = Math.max(first, index); + const indices = []; + for (let i = start; i <= end; i++) indices.push(i); + setSelectedWordIndices(indices); + } else { + selectionStart.current = index; + setSelectedWordIndices([index]); + } + }, + [selectedWordIndices, setSelectedWordIndices], + ); + + const handleWordMouseEnter = useCallback( + (index: number) => { + setHoveredWordIndex(index); + if (selectionStart.current !== null) { + wasDragging.current = true; + const start = Math.min(selectionStart.current, index); + const end = Math.max(selectionStart.current, index); + const indices = []; + for (let i = start; i <= end; i++) indices.push(i); + setSelectedWordIndices(indices); + } + }, + [setHoveredWordIndex, setSelectedWordIndices], + ); + + const handleMouseUp = useCallback(() => { + selectionStart.current = null; + }, []); + + const handleClickOutside = useCallback( + (e: React.MouseEvent) => { + if (wasDragging.current) { + wasDragging.current = false; + return; + } + if ((e.target as HTMLElement).dataset.wordIndex === undefined) { + setSelectedWordIndices([]); + } + }, + [setSelectedWordIndices], + ); + + const getRangeForWord = useCallback( + (wordIndex: number) => deletedRanges.find((r) => r.wordIndices.includes(wordIndex)), + [deletedRanges], + ); + + const renderSegment = useCallback( + (index: number) => { + const segment = segments[index]; + if (!segment) return null; + return ( +
+ {segment.speaker && ( +
+ {segment.speaker} +
+ )} +

+ {segment.words.map((word, localIndex) => { + const globalIndex = (segment.globalStartIndex ?? 0) + localIndex; + const isDeleted = deletedSet.has(globalIndex); + const isSelected = selectedSet.has(globalIndex); + const isActive = globalIndex === activeWordIndex; + const isHovered = globalIndex === hoveredWordIndex; + const deletedRange = isDeleted ? getRangeForWord(globalIndex) : null; + + return ( + handleWordMouseDown(globalIndex, e)} + onMouseEnter={() => handleWordMouseEnter(globalIndex)} + onMouseLeave={() => setHoveredWordIndex(null)} + className={` + relative px-[2px] py-[1px] rounded cursor-pointer transition-colors + ${isDeleted ? 'line-through text-editor-text-muted/40 bg-editor-word-deleted' : ''} + ${isSelected && !isDeleted ? 'bg-editor-word-selected text-white' : ''} + ${isActive && !isDeleted && !isSelected ? 'bg-editor-accent/20 text-editor-accent' : ''} + ${isHovered && !isDeleted && !isSelected && !isActive ? 'bg-editor-word-hover' : ''} + `} + > + {word.word}{' '} + {isDeleted && isHovered && deletedRange && ( + + )} + + ); + })} +

+
+ ); + }, + [segments, deletedSet, selectedSet, activeWordIndex, hoveredWordIndex, handleWordMouseDown, handleWordMouseEnter, setHoveredWordIndex, getRangeForWord, restoreRange], + ); + + return ( +
+
+ + {words.length} words · {deletedRanges.length} cuts + + {selectedWordIndices.length > 0 && ( + + )} +
+ +
+ +
+
+ ); +} diff --git a/frontend/src/components/VideoPlayer.tsx b/frontend/src/components/VideoPlayer.tsx new file mode 100644 index 0000000..f1de762 --- /dev/null +++ b/frontend/src/components/VideoPlayer.tsx @@ -0,0 +1,133 @@ +import { useRef, useCallback, useState, useEffect } from 'react'; +import { useEditorStore } from '../store/editorStore'; +import { useVideoSync } from '../hooks/useVideoSync'; +import { Play, Pause, SkipBack, SkipForward, Volume2 } from 'lucide-react'; + +export default function VideoPlayer() { + const videoRef = useRef(null); + const videoUrl = useEditorStore((s) => s.videoUrl); + const isPlaying = useEditorStore((s) => s.isPlaying); + const duration = useEditorStore((s) => s.duration); + const { seekTo, togglePlay } = useVideoSync(videoRef); + + const [displayTime, setDisplayTime] = useState(0); + + useEffect(() => { + const video = videoRef.current; + if (!video) return; + let raf = 0; + const tick = () => { + setDisplayTime(video.currentTime); + raf = requestAnimationFrame(tick); + }; + raf = requestAnimationFrame(tick); + return () => cancelAnimationFrame(raf); + }, [videoUrl]); + + const formatTime = (seconds: number) => { + const m = Math.floor(seconds / 60); + const s = Math.floor(seconds % 60); + return `${m}:${s.toString().padStart(2, '0')}`; + }; + + const handleProgressClick = useCallback( + (e: React.MouseEvent) => { + const rect = e.currentTarget.getBoundingClientRect(); + const ratio = (e.clientX - rect.left) / rect.width; + seekTo(ratio * duration); + }, + [seekTo, duration], + ); + + const skip = useCallback( + (delta: number) => { + const video = videoRef.current; + if (!video) return; + seekTo(Math.max(0, Math.min(duration, video.currentTime + delta))); + }, + [seekTo, duration], + ); + + if (!videoUrl) { + return ( +
+ No video loaded +
+ ); + } + + return ( +
+
+
+ +
+
+
0 ? `${(displayTime / duration) * 100}%` : '0%' }} + > +
+
+
+ +
+
+ skip(-5)} title="Back 5s"> + + + + {isPlaying ? : } + + skip(5)} title="Forward 5s"> + + +
+ +
+ + + {formatTime(displayTime)} / {formatTime(duration)} + +
+
+
+
+ ); +} + +function ControlButton({ + children, + onClick, + title, + primary, +}: { + children: React.ReactNode; + onClick: () => void; + title: string; + primary?: boolean; +}) { + return ( + + ); +} diff --git a/frontend/src/components/WaveformTimeline.tsx b/frontend/src/components/WaveformTimeline.tsx new file mode 100644 index 0000000..080a481 --- /dev/null +++ b/frontend/src/components/WaveformTimeline.tsx @@ -0,0 +1,220 @@ +import { useRef, useEffect, useCallback, useState } from 'react'; +import { useEditorStore } from '../store/editorStore'; +import { ZoomIn, ZoomOut, AlertTriangle } from 'lucide-react'; + +export default function WaveformTimeline() { + const waveCanvasRef = useRef(null); + const headCanvasRef = useRef(null); + const containerRef = useRef(null); + const [audioError, setAudioError] = useState(null); + + const videoUrl = useEditorStore((s) => s.videoUrl); + const videoPath = useEditorStore((s) => s.videoPath); + const duration = useEditorStore((s) => s.duration); + const deletedRanges = useEditorStore((s) => s.deletedRanges); + const setCurrentTime = useEditorStore((s) => s.setCurrentTime); + + const audioContextRef = useRef(null); + const audioBufferRef = useRef(null); + const zoomRef = useRef(1); + const rafRef = useRef(0); + + useEffect(() => { + if (!videoUrl || !videoPath) return; + setAudioError(null); + + const loadAudio = async () => { + try { + const ctx = new AudioContext(); + audioContextRef.current = ctx; + + const response = await fetch(videoUrl); + if (!response.ok) throw new Error(`HTTP ${response.status}`); + const arrayBuffer = await response.arrayBuffer(); + const audioBuffer = await ctx.decodeAudioData(arrayBuffer); + audioBufferRef.current = audioBuffer; + drawStaticWaveform(); + } catch (err) { + console.warn('Could not decode audio for waveform:', err); + setAudioError('Waveform unavailable — audio could not be decoded'); + } + }; + + loadAudio(); + + return () => { + audioContextRef.current?.close(); + }; + }, [videoUrl, videoPath]); + + const drawStaticWaveform = useCallback(() => { + const canvas = waveCanvasRef.current; + const buffer = audioBufferRef.current; + if (!canvas || !buffer) return; + + const ctx = canvas.getContext('2d'); + if (!ctx) return; + + const dpr = window.devicePixelRatio || 1; + const rect = canvas.getBoundingClientRect(); + canvas.width = rect.width * dpr; + canvas.height = rect.height * dpr; + ctx.scale(dpr, dpr); + + const width = rect.width; + const height = rect.height; + const channelData = buffer.getChannelData(0); + const samplesPerPixel = Math.floor(channelData.length / width); + + ctx.clearRect(0, 0, width, height); + + for (const range of deletedRanges) { + const x1 = (range.start / buffer.duration) * width; + const x2 = (range.end / buffer.duration) * width; + ctx.fillStyle = 'rgba(239, 68, 68, 0.15)'; + ctx.fillRect(x1, 0, x2 - x1, height); + } + + const mid = height / 2; + ctx.beginPath(); + ctx.strokeStyle = '#4a4d5e'; + ctx.lineWidth = 1; + + for (let x = 0; x < width; x++) { + const start = x * samplesPerPixel; + const end = Math.min(start + samplesPerPixel, channelData.length); + + let min = 0; + let max = 0; + for (let i = start; i < end; i++) { + if (channelData[i] < min) min = channelData[i]; + if (channelData[i] > max) max = channelData[i]; + } + + const yMin = mid + min * mid * 0.9; + const yMax = mid + max * mid * 0.9; + ctx.moveTo(x, yMin); + ctx.lineTo(x, yMax); + } + ctx.stroke(); + }, [deletedRanges]); + + // Redraw static layer when deletedRanges change + useEffect(() => { + drawStaticWaveform(); + }, [drawStaticWaveform]); + + // Lightweight RAF loop for playhead only -- reads video.currentTime directly, + // never triggers React re-renders + useEffect(() => { + const headCanvas = headCanvasRef.current; + const waveCanvas = waveCanvasRef.current; + if (!headCanvas || !waveCanvas) return; + + const tick = () => { + const ctx = headCanvas.getContext('2d'); + if (!ctx) { rafRef.current = requestAnimationFrame(tick); return; } + + const buffer = audioBufferRef.current; + const video = document.querySelector('video') as HTMLVideoElement | null; + const dur = buffer?.duration ?? 0; + + const dpr = window.devicePixelRatio || 1; + const rect = headCanvas.getBoundingClientRect(); + if (headCanvas.width !== waveCanvas.width || headCanvas.height !== waveCanvas.height) { + headCanvas.width = rect.width * dpr; + headCanvas.height = rect.height * dpr; + } + ctx.setTransform(dpr, 0, 0, dpr, 0, 0); + + const width = rect.width; + const height = rect.height; + ctx.clearRect(0, 0, width, height); + + if (dur > 0 && video) { + const px = (video.currentTime / dur) * width; + ctx.beginPath(); + ctx.strokeStyle = '#6366f1'; + ctx.lineWidth = 2; + ctx.moveTo(px, 0); + ctx.lineTo(px, height); + ctx.stroke(); + } + + rafRef.current = requestAnimationFrame(tick); + }; + + rafRef.current = requestAnimationFrame(tick); + return () => cancelAnimationFrame(rafRef.current); + }, [videoUrl]); + + useEffect(() => { + const observer = new ResizeObserver(() => { + drawStaticWaveform(); + }); + if (containerRef.current) observer.observe(containerRef.current); + return () => observer.disconnect(); + }, [drawStaticWaveform]); + + const handleClick = useCallback( + (e: React.MouseEvent) => { + if (!headCanvasRef.current || duration === 0) return; + const rect = headCanvasRef.current.getBoundingClientRect(); + const ratio = (e.clientX - rect.left) / rect.width; + const newTime = ratio * duration; + setCurrentTime(newTime); + const video = document.querySelector('video'); + if (video) video.currentTime = newTime; + }, + [duration, setCurrentTime], + ); + + if (!videoUrl) { + return ( +
+ Load a video to see the waveform +
+ ); + } + + return ( +
+
+ + Timeline + +
+ + +
+
+ {audioError ? ( +
+ + {audioError} +
+ ) : ( +
+ + +
+ )} +
+ ); +} diff --git a/frontend/src/hooks/useKeyboardShortcuts.ts b/frontend/src/hooks/useKeyboardShortcuts.ts new file mode 100644 index 0000000..91c9ce4 --- /dev/null +++ b/frontend/src/hooks/useKeyboardShortcuts.ts @@ -0,0 +1,236 @@ +import { useEffect, useRef } from 'react'; +import { useEditorStore } from '../store/editorStore'; + +export function useKeyboardShortcuts() { + const deleteSelectedWords = useEditorStore((s) => s.deleteSelectedWords); + const selectedWordIndices = useEditorStore((s) => s.selectedWordIndices); + + const playbackRateRef = useRef(1); + + useEffect(() => { + const getVideo = (): HTMLVideoElement | null => document.querySelector('video'); + + const handler = (e: KeyboardEvent) => { + const target = e.target as HTMLElement; + if (target.tagName === 'INPUT' || target.tagName === 'TEXTAREA' || target.tagName === 'SELECT') return; + + const video = getVideo(); + + switch (true) { + // --- Undo / Redo --- + case e.key === 'z' && (e.ctrlKey || e.metaKey) && e.shiftKey: { + e.preventDefault(); + useEditorStore.temporal.getState().redo(); + return; + } + case e.key === 'z' && (e.ctrlKey || e.metaKey): { + e.preventDefault(); + useEditorStore.temporal.getState().undo(); + return; + } + + // --- Delete / Backspace: delete selected words --- + case e.key === 'Delete' || e.key === 'Backspace': { + if (selectedWordIndices.length > 0) { + e.preventDefault(); + deleteSelectedWords(); + } + return; + } + + // --- Space: play / pause --- + case e.key === ' ' && !e.ctrlKey: { + e.preventDefault(); + if (video) { + if (video.paused) video.play(); + else video.pause(); + } + return; + } + + // --- J: reverse / slow down --- + case e.key === 'j' || e.key === 'J': { + e.preventDefault(); + if (video) { + playbackRateRef.current = Math.max(-2, playbackRateRef.current - 0.5); + if (playbackRateRef.current < 0) { + // HTML5 video doesn't support negative rates natively; step back + video.currentTime = Math.max(0, video.currentTime - 2); + } else { + video.playbackRate = playbackRateRef.current; + if (video.paused) video.play(); + } + } + return; + } + + // --- K: pause --- + case e.key === 'k' || e.key === 'K': { + e.preventDefault(); + if (video) { + video.pause(); + playbackRateRef.current = 1; + } + return; + } + + // --- L: forward / speed up --- + case e.key === 'l' || e.key === 'L': { + e.preventDefault(); + if (video) { + playbackRateRef.current = Math.min(4, playbackRateRef.current + 0.5); + video.playbackRate = Math.max(0.25, playbackRateRef.current); + if (video.paused) video.play(); + } + return; + } + + // --- Arrow Left: seek back 5s --- + case e.key === 'ArrowLeft' && !e.ctrlKey: { + e.preventDefault(); + if (video) video.currentTime = Math.max(0, video.currentTime - 5); + return; + } + + // --- Arrow Right: seek forward 5s --- + case e.key === 'ArrowRight' && !e.ctrlKey: { + e.preventDefault(); + if (video) video.currentTime = Math.min(video.duration, video.currentTime + 5); + return; + } + + // --- [ mark in-point (home) --- + case e.key === '[': { + e.preventDefault(); + if (video) video.currentTime = 0; + return; + } + + // --- ] mark out-point (end) --- + case e.key === ']': { + e.preventDefault(); + if (video) video.currentTime = video.duration; + return; + } + + // --- Ctrl+S: save project --- + case e.key === 's' && (e.ctrlKey || e.metaKey): { + e.preventDefault(); + saveProject(); + return; + } + + // --- Ctrl+E: export --- + case e.key === 'e' && (e.ctrlKey || e.metaKey): { + e.preventDefault(); + // Trigger export panel via DOM click + const exportBtn = document.querySelector('[title="Export"]') as HTMLButtonElement; + if (exportBtn) exportBtn.click(); + return; + } + + // --- ?: show shortcut cheatsheet --- + case e.key === '?' || (e.key === '/' && e.shiftKey): { + e.preventDefault(); + toggleCheatsheet(); + return; + } + + default: + break; + } + }; + + window.addEventListener('keydown', handler); + return () => window.removeEventListener('keydown', handler); + }, [deleteSelectedWords, selectedWordIndices]); +} + +async function saveProject() { + const state = useEditorStore.getState(); + if (!state.videoPath || state.words.length === 0) return; + + try { + const projectData = { + version: 1, + videoPath: state.videoPath, + words: state.words, + segments: state.segments, + deletedRanges: state.deletedRanges, + language: state.language, + createdAt: new Date().toISOString(), + modifiedAt: new Date().toISOString(), + }; + + const outputPath = await window.electronAPI?.saveFile({ + defaultPath: state.videoPath.replace(/\.[^.]+$/, '.aive'), + filters: [{ name: 'CutScript Project', extensions: ['aive'] }], + }); + + if (outputPath) { + if (window.electronAPI?.writeFile) { + await window.electronAPI.writeFile(outputPath, JSON.stringify(projectData, null, 2)); + } else { + const blob = new Blob([JSON.stringify(projectData, null, 2)], { type: 'application/json' }); + const url = URL.createObjectURL(blob); + const a = document.createElement('a'); + a.href = url; + a.download = outputPath.split(/[\\/]/).pop() || 'project.aive'; + a.click(); + URL.revokeObjectURL(url); + } + } + } catch (err) { + console.error('Failed to save project:', err); + } +} + +let cheatsheetVisible = false; + +function toggleCheatsheet() { + const existing = document.getElementById('keyboard-cheatsheet'); + if (existing) { + existing.remove(); + cheatsheetVisible = false; + return; + } + + cheatsheetVisible = true; + const overlay = document.createElement('div'); + overlay.id = 'keyboard-cheatsheet'; + overlay.style.cssText = + 'position:fixed;inset:0;z-index:9999;display:flex;align-items:center;justify-content:center;background:rgba(0,0,0,0.7);'; + overlay.onclick = () => { + overlay.remove(); + cheatsheetVisible = false; + }; + + const shortcuts = [ + ['Space', 'Play / Pause'], + ['J', 'Reverse / Slow down'], + ['K', 'Pause'], + ['L', 'Forward / Speed up'], + ['\u2190 / \u2192', 'Seek \u00b15 seconds'], + ['Delete', 'Delete selected words'], + ['Ctrl+Z', 'Undo'], + ['Ctrl+Shift+Z', 'Redo'], + ['Ctrl+S', 'Save project'], + ['Ctrl+E', 'Export'], + ['?', 'This cheatsheet'], + ]; + + const rows = shortcuts + .map( + ([key, desc]) => + `${key}${desc}`, + ) + .join(''); + + overlay.innerHTML = `
+

Keyboard Shortcuts

+ ${rows}
+

Press ? or click outside to close

+
`; + + document.body.appendChild(overlay); +} diff --git a/frontend/src/hooks/useVideoSync.ts b/frontend/src/hooks/useVideoSync.ts new file mode 100644 index 0000000..74aae9c --- /dev/null +++ b/frontend/src/hooks/useVideoSync.ts @@ -0,0 +1,69 @@ +import { useCallback, useRef, useEffect } from 'react'; +import { useEditorStore } from '../store/editorStore'; + +export function useVideoSync(videoRef: React.RefObject) { + const rafRef = useRef(0); + const { + setCurrentTime, + setDuration, + setIsPlaying, + deletedRanges, + } = useEditorStore(); + + const seekTo = useCallback( + (time: number) => { + if (videoRef.current) { + videoRef.current.currentTime = time; + setCurrentTime(time); + } + }, + [videoRef, setCurrentTime], + ); + + const togglePlay = useCallback(() => { + if (!videoRef.current) return; + if (videoRef.current.paused) { + videoRef.current.play(); + } else { + videoRef.current.pause(); + } + }, [videoRef]); + + useEffect(() => { + const video = videoRef.current; + if (!video) return; + + const onTimeUpdate = () => { + cancelAnimationFrame(rafRef.current); + rafRef.current = requestAnimationFrame(() => { + const t = video.currentTime; + for (const range of deletedRanges) { + if (t >= range.start && t < range.end) { + video.currentTime = range.end; + return; + } + } + setCurrentTime(t); + }); + }; + + const onPlay = () => setIsPlaying(true); + const onPause = () => setIsPlaying(false); + const onLoadedMetadata = () => setDuration(video.duration); + + video.addEventListener('timeupdate', onTimeUpdate); + video.addEventListener('play', onPlay); + video.addEventListener('pause', onPause); + video.addEventListener('loadedmetadata', onLoadedMetadata); + + return () => { + video.removeEventListener('timeupdate', onTimeUpdate); + video.removeEventListener('play', onPlay); + video.removeEventListener('pause', onPause); + video.removeEventListener('loadedmetadata', onLoadedMetadata); + cancelAnimationFrame(rafRef.current); + }; + }, [videoRef, deletedRanges, setCurrentTime, setIsPlaying, setDuration]); + + return { seekTo, togglePlay }; +} diff --git a/frontend/src/index.css b/frontend/src/index.css new file mode 100644 index 0000000..80a0377 --- /dev/null +++ b/frontend/src/index.css @@ -0,0 +1,37 @@ +@tailwind base; +@tailwind components; +@tailwind utilities; + +* { + margin: 0; + padding: 0; + box-sizing: border-box; +} + +body { + font-family: 'Inter', system-ui, -apple-system, sans-serif; + overflow: hidden; + user-select: none; +} + +::-webkit-scrollbar { + width: 6px; + height: 6px; +} + +::-webkit-scrollbar-track { + background: transparent; +} + +::-webkit-scrollbar-thumb { + background: #2a2d3a; + border-radius: 3px; +} + +::-webkit-scrollbar-thumb:hover { + background: #3a3d4a; +} + +video::-webkit-media-controls { + display: none !important; +} diff --git a/frontend/src/main.tsx b/frontend/src/main.tsx new file mode 100644 index 0000000..9aa52ff --- /dev/null +++ b/frontend/src/main.tsx @@ -0,0 +1,10 @@ +import React from 'react'; +import ReactDOM from 'react-dom/client'; +import App from './App'; +import './index.css'; + +ReactDOM.createRoot(document.getElementById('root')!).render( + + + , +); diff --git a/frontend/src/store/aiStore.ts b/frontend/src/store/aiStore.ts new file mode 100644 index 0000000..78bc277 --- /dev/null +++ b/frontend/src/store/aiStore.ts @@ -0,0 +1,129 @@ +import { create } from 'zustand'; +import { persist } from 'zustand/middleware'; +import type { AIProvider, AIProviderConfig, FillerWordResult, ClipSuggestion } from '../types/project'; + +const ENCRYPTED_KEY_PREFIX = 'aive_enc_'; + +interface AIState { + providers: Record; + defaultProvider: AIProvider; + customFillerWords: string; + fillerResult: FillerWordResult | null; + clipSuggestions: ClipSuggestion[]; + isProcessing: boolean; + processingMessage: string; + _keysHydrated: boolean; +} + +interface AIActions { + setProviderConfig: (provider: AIProvider, config: Partial) => void; + setDefaultProvider: (provider: AIProvider) => void; + setCustomFillerWords: (words: string) => void; + setFillerResult: (result: FillerWordResult | null) => void; + setClipSuggestions: (suggestions: ClipSuggestion[]) => void; + setProcessing: (active: boolean, message?: string) => void; + hydrateKeys: () => Promise; +} + +async function encryptAndStore(key: string, value: string): Promise { + if (!value) { + localStorage.removeItem(ENCRYPTED_KEY_PREFIX + key); + return; + } + if (window.electronAPI) { + const encrypted = await window.electronAPI.encryptString(value); + localStorage.setItem(ENCRYPTED_KEY_PREFIX + key, encrypted); + } else { + localStorage.setItem(ENCRYPTED_KEY_PREFIX + key, btoa(value)); + } +} + +async function loadAndDecrypt(key: string): Promise { + const stored = localStorage.getItem(ENCRYPTED_KEY_PREFIX + key); + if (!stored) return ''; + if (window.electronAPI) { + try { + return await window.electronAPI.decryptString(stored); + } catch { + return ''; + } + } + try { + return atob(stored); + } catch { + return ''; + } +} + +export const useAIStore = create()( + persist( + (set, get) => ({ + providers: { + ollama: { provider: 'ollama', baseUrl: 'http://localhost:11434', model: 'llama3' }, + openai: { provider: 'openai', apiKey: '', model: 'gpt-4o' }, + claude: { provider: 'claude', apiKey: '', model: 'claude-sonnet-4-20250514' }, + }, + defaultProvider: 'ollama', + customFillerWords: '', + fillerResult: null, + clipSuggestions: [], + isProcessing: false, + processingMessage: '', + _keysHydrated: false, + + setProviderConfig: (provider, config) => { + set((state) => ({ + providers: { + ...state.providers, + [provider]: { ...state.providers[provider], ...config }, + }, + })); + + if (config.apiKey !== undefined) { + encryptAndStore(`${provider}_apiKey`, config.apiKey); + } + }, + + setDefaultProvider: (provider) => set({ defaultProvider: provider }), + + setCustomFillerWords: (words) => set({ customFillerWords: words }), + + setFillerResult: (result) => set({ fillerResult: result }), + + setClipSuggestions: (suggestions) => set({ clipSuggestions: suggestions }), + + setProcessing: (active, message) => + set({ isProcessing: active, processingMessage: message ?? '' }), + + hydrateKeys: async () => { + const [openaiKey, claudeKey] = await Promise.all([ + loadAndDecrypt('openai_apiKey'), + loadAndDecrypt('claude_apiKey'), + ]); + const state = get(); + set({ + providers: { + ...state.providers, + openai: { ...state.providers.openai, apiKey: openaiKey }, + claude: { ...state.providers.claude, apiKey: claudeKey }, + }, + _keysHydrated: true, + }); + }, + }), + { + name: 'aive-ai-settings', + partialize: (state) => ({ + providers: { + ollama: { ...state.providers.ollama, apiKey: undefined }, + openai: { ...state.providers.openai, apiKey: '' }, + claude: { ...state.providers.claude, apiKey: '' }, + }, + defaultProvider: state.defaultProvider, + customFillerWords: state.customFillerWords, + }), + }, + ), +); + +useAIStore.getState().hydrateKeys(); diff --git a/frontend/src/store/editorStore.ts b/frontend/src/store/editorStore.ts new file mode 100644 index 0000000..ec9448d --- /dev/null +++ b/frontend/src/store/editorStore.ts @@ -0,0 +1,232 @@ +import { create } from 'zustand'; +import { temporal } from 'zundo'; +import type { Word, Segment, DeletedRange, TranscriptionResult } from '../types/project'; + +interface EditorState { + videoPath: string | null; + videoUrl: string | null; + words: Word[]; + segments: Segment[]; + deletedRanges: DeletedRange[]; + language: string; + + currentTime: number; + duration: number; + isPlaying: boolean; + + selectedWordIndices: number[]; + hoveredWordIndex: number | null; + + isTranscribing: boolean; + transcriptionProgress: number; + isExporting: boolean; + exportProgress: number; + + backendUrl: string; +} + +interface EditorActions { + setBackendUrl: (url: string) => void; + loadVideo: (path: string) => void; + setTranscription: (result: TranscriptionResult) => void; + setCurrentTime: (time: number) => void; + setDuration: (duration: number) => void; + setIsPlaying: (playing: boolean) => void; + setSelectedWordIndices: (indices: number[]) => void; + setHoveredWordIndex: (index: number | null) => void; + deleteSelectedWords: () => void; + deleteWordRange: (startIndex: number, endIndex: number) => void; + restoreRange: (rangeId: string) => void; + setTranscribing: (active: boolean, progress?: number) => void; + setExporting: (active: boolean, progress?: number) => void; + getKeepSegments: () => Array<{ start: number; end: number }>; + getWordAtTime: (time: number) => number; + loadProject: (projectData: any) => void; + reset: () => void; +} + +const initialState: EditorState = { + videoPath: null, + videoUrl: null, + words: [], + segments: [], + deletedRanges: [], + language: '', + currentTime: 0, + duration: 0, + isPlaying: false, + selectedWordIndices: [], + hoveredWordIndex: null, + isTranscribing: false, + transcriptionProgress: 0, + isExporting: false, + exportProgress: 0, + backendUrl: 'http://localhost:8642', +}; + +let nextRangeId = 1; + +export const useEditorStore = create()( + temporal( + (set, get) => ({ + ...initialState, + + setBackendUrl: (url) => set({ backendUrl: url }), + + loadVideo: (path) => { + const backend = get().backendUrl; + const url = `${backend}/file?path=${encodeURIComponent(path)}`; + set({ + ...initialState, + backendUrl: backend, + videoPath: path, + videoUrl: url, + }); + }, + + setTranscription: (result) => { + let globalIdx = 0; + const annotatedSegments = result.segments.map((seg) => { + const annotated = { ...seg, globalStartIndex: globalIdx }; + globalIdx += seg.words.length; + return annotated; + }); + set({ + words: result.words, + segments: annotatedSegments, + language: result.language, + deletedRanges: [], + selectedWordIndices: [], + }); + }, + + setCurrentTime: (time) => set({ currentTime: time }), + setDuration: (duration) => set({ duration }), + setIsPlaying: (playing) => set({ isPlaying: playing }), + setSelectedWordIndices: (indices) => set({ selectedWordIndices: indices }), + setHoveredWordIndex: (index) => set({ hoveredWordIndex: index }), + + deleteSelectedWords: () => { + const { selectedWordIndices, words, deletedRanges } = get(); + if (selectedWordIndices.length === 0) return; + + const sorted = [...selectedWordIndices].sort((a, b) => a - b); + const startWord = words[sorted[0]]; + const endWord = words[sorted[sorted.length - 1]]; + + const newRange: DeletedRange = { + id: `dr_${nextRangeId++}`, + start: startWord.start, + end: endWord.end, + wordIndices: sorted, + }; + + set({ + deletedRanges: [...deletedRanges, newRange], + selectedWordIndices: [], + }); + }, + + deleteWordRange: (startIndex, endIndex) => { + const { words, deletedRanges } = get(); + const indices = []; + for (let i = startIndex; i <= endIndex; i++) indices.push(i); + + const newRange: DeletedRange = { + id: `dr_${nextRangeId++}`, + start: words[startIndex].start, + end: words[endIndex].end, + wordIndices: indices, + }; + + set({ deletedRanges: [...deletedRanges, newRange] }); + }, + + restoreRange: (rangeId) => { + const { deletedRanges } = get(); + set({ deletedRanges: deletedRanges.filter((r) => r.id !== rangeId) }); + }, + + setTranscribing: (active, progress) => + set({ + isTranscribing: active, + transcriptionProgress: progress ?? (active ? 0 : 100), + }), + + setExporting: (active, progress) => + set({ + isExporting: active, + exportProgress: progress ?? (active ? 0 : 100), + }), + + getKeepSegments: () => { + const { words, deletedRanges, duration } = get(); + if (words.length === 0) return [{ start: 0, end: duration }]; + + const deletedSet = new Set(); + for (const range of deletedRanges) { + for (const idx of range.wordIndices) deletedSet.add(idx); + } + + const segments: Array<{ start: number; end: number }> = []; + let segStart: number | null = null; + + for (let i = 0; i < words.length; i++) { + if (!deletedSet.has(i)) { + if (segStart === null) segStart = words[i].start; + } else { + if (segStart !== null) { + segments.push({ start: segStart, end: words[i - 1].end }); + segStart = null; + } + } + } + + if (segStart !== null) { + segments.push({ start: segStart, end: words[words.length - 1].end }); + } + + return segments; + }, + + getWordAtTime: (time) => { + const { words } = get(); + let lo = 0; + let hi = words.length - 1; + while (lo <= hi) { + const mid = (lo + hi) >>> 1; + if (words[mid].end < time) lo = mid + 1; + else if (words[mid].start > time) hi = mid - 1; + else return mid; + } + return lo < words.length ? lo : words.length - 1; + }, + + loadProject: (data) => { + const backend = get().backendUrl; + const url = `${backend}/file?path=${encodeURIComponent(data.videoPath)}`; + + let globalIdx = 0; + const annotatedSegments = (data.segments || []).map((seg: Segment) => { + const annotated = { ...seg, globalStartIndex: globalIdx }; + globalIdx += seg.words.length; + return annotated; + }); + + set({ + ...initialState, + backendUrl: backend, + videoPath: data.videoPath, + videoUrl: url, + words: data.words || [], + segments: annotatedSegments, + deletedRanges: data.deletedRanges || [], + language: data.language || '', + }); + }, + + reset: () => set(initialState), + }), + { limit: 100 }, + ), +); diff --git a/frontend/src/types/project.ts b/frontend/src/types/project.ts new file mode 100644 index 0000000..7e4892d --- /dev/null +++ b/frontend/src/types/project.ts @@ -0,0 +1,86 @@ +export interface Word { + word: string; + start: number; + end: number; + confidence: number; + speaker?: string; +} + +export interface Segment { + id: number; + start: number; + end: number; + text: string; + words: Word[]; + speaker?: string; + globalStartIndex: number; +} + +export interface TimeRange { + start: number; + end: number; +} + +export interface DeletedRange extends TimeRange { + id: string; + wordIndices: number[]; +} + +export interface ProjectFile { + version: 1; + videoPath: string; + words: Word[]; + segments: Segment[]; + deletedRanges: DeletedRange[]; + language: string; + createdAt: string; + modifiedAt: string; +} + +export interface TranscriptionResult { + words: Word[]; + segments: Segment[]; + language: string; +} + +export interface ExportOptions { + outputPath: string; + mode: 'fast' | 'reencode'; + resolution: '720p' | '1080p' | '4k'; + format: 'mp4' | 'mov' | 'webm'; + enhanceAudio: boolean; + captions: 'none' | 'burn-in' | 'sidecar'; + captionStyle?: CaptionStyle; +} + +export interface CaptionStyle { + fontName: string; + fontSize: number; + fontColor: string; + backgroundColor: string; + position: 'bottom' | 'top' | 'center'; + bold: boolean; +} + +export type AIProvider = 'ollama' | 'openai' | 'claude'; + +export interface AIProviderConfig { + provider: AIProvider; + apiKey?: string; + baseUrl?: string; + model: string; +} + +export interface FillerWordResult { + wordIndices: number[]; + fillerWords: Array<{ index: number; word: string; reason: string }>; +} + +export interface ClipSuggestion { + title: string; + startWordIndex: number; + endWordIndex: number; + startTime: number; + endTime: number; + reason: string; +} diff --git a/frontend/src/vite-env.d.ts b/frontend/src/vite-env.d.ts new file mode 100644 index 0000000..089ea01 --- /dev/null +++ b/frontend/src/vite-env.d.ts @@ -0,0 +1,16 @@ +/// + +interface ElectronAPI { + openFile: (options?: Record) => Promise; + saveFile: (options?: Record) => Promise; + openProject: () => Promise; + getBackendUrl: () => Promise; + encryptString: (data: string) => Promise; + decryptString: (encrypted: string) => Promise; + readFile: (path: string) => Promise; + writeFile: (path: string, content: string) => Promise; +} + +interface Window { + electronAPI?: ElectronAPI; +} diff --git a/frontend/tailwind.config.js b/frontend/tailwind.config.js new file mode 100644 index 0000000..39a4cbf --- /dev/null +++ b/frontend/tailwind.config.js @@ -0,0 +1,30 @@ +/** @type {import('tailwindcss').Config} */ +export default { + content: ['./index.html', './src/**/*.{js,ts,jsx,tsx}'], + theme: { + extend: { + colors: { + editor: { + bg: '#0f1117', + surface: '#1a1d27', + border: '#2a2d3a', + accent: '#6366f1', + 'accent-hover': '#818cf8', + text: '#e2e8f0', + 'text-muted': '#94a3b8', + danger: '#ef4444', + success: '#22c55e', + warning: '#f59e0b', + 'word-hover': 'rgba(99, 102, 241, 0.15)', + 'word-selected': 'rgba(99, 102, 241, 0.3)', + 'word-deleted': 'rgba(239, 68, 68, 0.2)', + 'word-filler': 'rgba(245, 158, 11, 0.25)', + }, + }, + fontFamily: { + mono: ['JetBrains Mono', 'Fira Code', 'monospace'], + }, + }, + }, + plugins: [], +}; diff --git a/frontend/tsconfig.json b/frontend/tsconfig.json new file mode 100644 index 0000000..ca4bbc0 --- /dev/null +++ b/frontend/tsconfig.json @@ -0,0 +1,23 @@ +{ + "compilerOptions": { + "target": "ES2020", + "useDefineForClassFields": true, + "lib": ["ES2020", "DOM", "DOM.Iterable"], + "module": "ESNext", + "skipLibCheck": true, + "moduleResolution": "bundler", + "allowImportingTsExtensions": true, + "isolatedModules": true, + "moduleDetection": "force", + "noEmit": true, + "jsx": "react-jsx", + "strict": true, + "noUnusedLocals": false, + "noUnusedParameters": false, + "noFallthroughCasesInSwitch": true, + "forceConsistentCasingInFileNames": true, + "resolveJsonModule": true, + "esModuleInterop": true + }, + "include": ["src", "src/vite-env.d.ts"] +} diff --git a/frontend/vite.config.ts b/frontend/vite.config.ts new file mode 100644 index 0000000..bc167f2 --- /dev/null +++ b/frontend/vite.config.ts @@ -0,0 +1,15 @@ +import { defineConfig } from 'vite'; +import react from '@vitejs/plugin-react'; + +export default defineConfig({ + plugins: [react()], + base: './', + server: { + port: 5173, + strictPort: true, + }, + build: { + outDir: 'dist', + emptyOutDir: true, + }, +}); diff --git a/install.bat b/install.bat deleted file mode 100644 index 6917ecb..0000000 --- a/install.bat +++ /dev/null @@ -1,25 +0,0 @@ -@echo off -echo =================================================== -echo OBS Recording Transcriber - Windows Installation -echo =================================================== -echo. - -:: Check for Python -python --version > nul 2>&1 -if %errorlevel% neq 0 ( - echo Python not found! Please install Python 3.8 or higher. - echo Download from: https://www.python.org/downloads/ - echo Make sure to check "Add Python to PATH" during installation. - pause - exit /b 1 -) - -:: Run the installation script -echo Running installation script... -python install.py - -echo. -echo If the installation was successful, you can run the application with: -echo streamlit run app.py -echo. -pause \ No newline at end of file diff --git a/install.py b/install.py deleted file mode 100644 index dc5f9b4..0000000 --- a/install.py +++ /dev/null @@ -1,307 +0,0 @@ -#!/usr/bin/env python -""" -Installation script for OBS Recording Transcriber. -This script helps install all required dependencies and checks for common issues. -""" - -import os -import sys -import platform -import subprocess -import shutil -from pathlib import Path - -def print_header(text): - """Print a formatted header.""" - print("\n" + "=" * 80) - print(f" {text}") - print("=" * 80) - -def print_step(text): - """Print a step in the installation process.""" - print(f"\n>> {text}") - -def run_command(command, check=True): - """Run a shell command and return the result.""" - try: - result = subprocess.run( - command, - shell=True, - check=check, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - text=True - ) - return result - except subprocess.CalledProcessError as e: - print(f"Error executing command: {command}") - print(f"Error message: {e.stderr}") - return None - -def check_python_version(): - """Check if Python version is 3.8 or higher.""" - print_step("Checking Python version") - version = sys.version_info - if version.major < 3 or (version.major == 3 and version.minor < 8): - print(f"Python 3.8 or higher is required. You have {sys.version}") - print("Please upgrade your Python installation.") - return False - print(f"Python version: {sys.version}") - return True - -def check_ffmpeg(): - """Check if FFmpeg is installed.""" - print_step("Checking FFmpeg installation") - result = shutil.which("ffmpeg") - if result is None: - print("FFmpeg not found in PATH.") - print("Please install FFmpeg:") - if platform.system() == "Windows": - print(" - Download from: https://www.gyan.dev/ffmpeg/builds/") - print(" - Extract and add the bin folder to your PATH") - elif platform.system() == "Darwin": # macOS - print(" - Install with Homebrew: brew install ffmpeg") - else: # Linux - print(" - Install with apt: sudo apt update && sudo apt install ffmpeg") - return False - - # Check FFmpeg version - version_result = run_command("ffmpeg -version") - if version_result: - print(f"FFmpeg is installed: {version_result.stdout.splitlines()[0]}") - return True - return False - -def check_gpu(): - """Check for GPU availability.""" - print_step("Checking GPU availability") - - # Check for NVIDIA GPU - if platform.system() == "Windows": - nvidia_smi = shutil.which("nvidia-smi") - if nvidia_smi: - result = run_command("nvidia-smi", check=False) - if result and result.returncode == 0: - print("NVIDIA GPU detected:") - for line in result.stdout.splitlines()[:10]: - print(f" {line}") - return "nvidia" - - # Check for Apple Silicon - if platform.system() == "Darwin" and platform.machine() == "arm64": - print("Apple Silicon (M1/M2) detected") - return "apple" - - print("No GPU detected or GPU drivers not installed. CPU will be used for processing.") - return "cpu" - -def setup_virtual_env(): - """Set up a virtual environment.""" - print_step("Setting up virtual environment") - - # Check if venv module is available - try: - import venv - print("Python venv module is available") - except ImportError: - print("Python venv module is not available. Please install it.") - return False - - # Create virtual environment if it doesn't exist - venv_path = Path("venv") - if venv_path.exists(): - print(f"Virtual environment already exists at {venv_path}") - activate_venv() - return True - - print(f"Creating virtual environment at {venv_path}") - try: - subprocess.run([sys.executable, "-m", "venv", "venv"], check=True) - print("Virtual environment created successfully") - activate_venv() - return True - except subprocess.CalledProcessError as e: - print(f"Error creating virtual environment: {e}") - return False - -def activate_venv(): - """Activate the virtual environment.""" - print_step("Activating virtual environment") - - venv_path = Path("venv") - if not venv_path.exists(): - print("Virtual environment not found") - return False - - # Get the path to the activate script - if platform.system() == "Windows": - activate_script = venv_path / "Scripts" / "activate.bat" - activate_cmd = f"call {activate_script}" - else: - activate_script = venv_path / "bin" / "activate" - activate_cmd = f"source {activate_script}" - - print(f"To activate the virtual environment, run:") - print(f" {activate_cmd}") - - # We can't actually activate the venv in this script because it would only - # affect the subprocess, not the parent process. We just provide instructions. - return True - -def install_pytorch(gpu_type): - """Install PyTorch with appropriate GPU support.""" - print_step("Installing PyTorch") - - if gpu_type == "nvidia": - print("Installing PyTorch with CUDA support") - cmd = "pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118" - elif gpu_type == "apple": - print("Installing PyTorch with MPS support") - cmd = "pip install torch torchvision torchaudio" - else: - print("Installing PyTorch (CPU version)") - cmd = "pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu" - - result = run_command(cmd) - if result and result.returncode == 0: - print("PyTorch installed successfully") - return True - else: - print("Failed to install PyTorch") - return False - -def install_dependencies(): - """Install dependencies from requirements.txt.""" - print_step("Installing dependencies from requirements.txt") - - requirements_path = Path("requirements.txt") - if not requirements_path.exists(): - print("requirements.txt not found") - return False - - result = run_command("pip install -r requirements.txt") - if result and result.returncode == 0: - print("Dependencies installed successfully") - return True - else: - print("Some dependencies failed to install. See error messages above.") - return False - -def install_tokenizers(): - """Install tokenizers package separately.""" - print_step("Installing tokenizers package") - - # First try the normal installation - result = run_command("pip install tokenizers", check=False) - if result and result.returncode == 0: - print("Tokenizers installed successfully") - return True - - # If that fails, try the no-binary option - print("Standard installation failed, trying alternative method...") - result = run_command("pip install tokenizers --no-binary tokenizers", check=False) - if result and result.returncode == 0: - print("Tokenizers installed successfully with alternative method") - return True - - print("Failed to install tokenizers. You may need to install Rust or Visual C++ Build Tools.") - if platform.system() == "Windows": - print("Download Visual C++ Build Tools: https://visualstudio.microsoft.com/visual-cpp-build-tools/") - print("Install Rust: https://rustup.rs/") - return False - -def check_installation(): - """Verify the installation by importing key packages.""" - print_step("Verifying installation") - - packages_to_check = [ - "streamlit", - "torch", - "transformers", - "whisper", - "numpy", - "sklearn" - ] - - all_successful = True - for package in packages_to_check: - try: - __import__(package) - print(f"✓ {package} imported successfully") - except ImportError: - print(f"✗ Failed to import {package}") - all_successful = False - - # Check optional packages - optional_packages = [ - "pyannote.audio", - "iso639" - ] - - print("\nChecking optional packages:") - for package in optional_packages: - try: - if package == "pyannote.audio": - # Just try to import pyannote - __import__("pyannote") - else: - __import__(package) - print(f"✓ {package} imported successfully") - except ImportError: - print(f"⚠ {package} not available (required for some advanced features)") - - return all_successful - -def main(): - """Main installation function.""" - print_header("OBS Recording Transcriber - Installation Script") - - # Check prerequisites - if not check_python_version(): - return - - ffmpeg_available = check_ffmpeg() - gpu_type = check_gpu() - - # Setup environment - if not setup_virtual_env(): - print("Failed to set up virtual environment. Continuing with system Python...") - - # Install packages - print("\nReady to install packages. Make sure your virtual environment is activated.") - input("Press Enter to continue...") - - install_pytorch(gpu_type) - install_dependencies() - install_tokenizers() - - # Verify installation - success = check_installation() - - print_header("Installation Summary") - print(f"Python: {'✓ OK' if check_python_version() else '✗ Needs upgrade'}") - print(f"FFmpeg: {'✓ Installed' if ffmpeg_available else '✗ Not found'}") - print(f"GPU Support: {gpu_type.upper()}") - print(f"Dependencies: {'✓ Installed' if success else '⚠ Some issues'}") - - print("\nNext steps:") - if not ffmpeg_available: - print("1. Install FFmpeg (required for audio processing)") - - print("1. Activate your virtual environment:") - if platform.system() == "Windows": - print(" venv\\Scripts\\activate") - else: - print(" source venv/bin/activate") - - print("2. Run the application:") - print(" streamlit run app.py") - - print("\nFor advanced features like speaker diarization:") - print("1. Get a HuggingFace token: https://huggingface.co/settings/tokens") - print("2. Request access to pyannote models: https://huggingface.co/pyannote/speaker-diarization-3.0") - - print("\nSee INSTALLATION.md for more details and troubleshooting.") - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/install.sh b/install.sh deleted file mode 100644 index 7298af9..0000000 --- a/install.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -echo "===================================================" -echo " OBS Recording Transcriber - Unix Installation" -echo "===================================================" -echo - -# Check for Python -if ! command -v python3 &> /dev/null; then - echo "Python 3 not found! Please install Python 3.8 or higher." - echo "For Ubuntu/Debian: sudo apt update && sudo apt install python3 python3-pip python3-venv" - echo "For macOS: brew install python3" - exit 1 -fi - -# Make the script executable -chmod +x install.py - -# Run the installation script -echo "Running installation script..." -python3 ./install.py - -echo -echo "If the installation was successful, you can run the application with:" -echo "streamlit run app.py" -echo \ No newline at end of file diff --git a/package.json b/package.json new file mode 100644 index 0000000..8311061 --- /dev/null +++ b/package.json @@ -0,0 +1,49 @@ +{ + "name": "cutscript", + "version": "0.1.0", + "private": true, + "description": "CutScript — Open-source AI-powered text-based video editor", + "main": "electron/main.js", + "scripts": { + "dev": "concurrently \"npm run dev:backend\" \"npm run dev:frontend\" \"wait-on http://localhost:5173 && npm run dev:electron\"", + "dev:frontend": "cd frontend && npm run dev", + "dev:electron": "electron .", + "dev:backend": "cd backend && python -m uvicorn main:app --reload --port 8642", + "build": "cd frontend && npm run build && electron-builder", + "lint": "cd frontend && npm run lint" + }, + "devDependencies": { + "concurrently": "^9.1.0", + "electron": "^33.2.0", + "electron-builder": "^25.1.0", + "wait-on": "^8.0.0" + }, + "dependencies": { + "python-shell": "^5.0.0" + }, + "build": { + "appId": "com.dataants.cutscript", + "productName": "CutScript", + "files": [ + "electron/**/*", + "frontend/dist/**/*", + "backend/**/*", + "shared/**/*" + ], + "extraResources": [ + { + "from": "backend", + "to": "backend" + } + ], + "win": { + "target": "nsis" + }, + "mac": { + "target": "dmg" + }, + "linux": { + "target": "AppImage" + } + } +} diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 11c57dd..0000000 --- a/requirements.txt +++ /dev/null @@ -1,54 +0,0 @@ -# OBS Recording Transcriber Dependencies -# Core dependencies -# streamlit>=1.30.0 required for protobuf>=5.0 compatibility -streamlit>=1.30.0 -moviepy>=1.0.3 -openai-whisper>=20231117 -requests>=2.28.0 -humanize>=4.6.0 - -# PyTorch ecosystem - DO NOT include here for Docker builds -# These are installed separately with CUDA support in Dockerfile.gpu -# For local installs: pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118 -# torchaudio >= 2.1.0 is REQUIRED for diarization to work properly - -# Transformers ecosystem -transformers>=4.35.0,<5.0.0 -tokenizers>=0.14.0 - -# ML dependencies - use flexible versions for compatibility -numpy>=1.24.0 -scipy>=1.10.0 -scikit-learn>=1.3.0 - -# Audio processing and ML models -# speechbrain 1.0+ required for pyannote compatibility -speechbrain>=1.0.0 -pyannote.audio>=3.1.1 -pytorch-lightning>=2.0.0 - -# Other dependencies -iso639>=0.1.4 -# protobuf>=5.0 required by opentelemetry (pyannote.audio dependency) -protobuf>=5.0.0 -matplotlib>=3.5.0 -soundfile>=0.10.3 -ffmpeg-python>=0.2.0 - -# Optional: Ollama Python client (uncomment to install) -# ollama - -# Installation notes: -# 1. For Windows users, you may need to install PyTorch separately: -# pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118 -# -# 2. For tokenizers issues, try installing Visual C++ Build Tools: -# https://visualstudio.microsoft.com/visual-cpp-build-tools/ -# -# 3. For pyannote.audio, you'll need a HuggingFace token with access to: -# https://huggingface.co/pyannote/speaker-diarization-3.0 -# -# 4. FFmpeg is required for audio processing: -# Windows: https://www.gyan.dev/ffmpeg/builds/ -# Mac: brew install ffmpeg -# Linux: apt-get install ffmpeg diff --git a/shared/project-schema.json b/shared/project-schema.json new file mode 100644 index 0000000..53c361a --- /dev/null +++ b/shared/project-schema.json @@ -0,0 +1,55 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "AI Video Editor Project", + "type": "object", + "required": ["version", "videoPath", "words", "segments", "deletedRanges", "language", "createdAt", "modifiedAt"], + "properties": { + "version": { "type": "integer", "const": 1 }, + "videoPath": { "type": "string" }, + "words": { + "type": "array", + "items": { + "type": "object", + "required": ["word", "start", "end", "confidence"], + "properties": { + "word": { "type": "string" }, + "start": { "type": "number" }, + "end": { "type": "number" }, + "confidence": { "type": "number" }, + "speaker": { "type": "string" } + } + } + }, + "segments": { + "type": "array", + "items": { + "type": "object", + "required": ["id", "start", "end", "text", "words"], + "properties": { + "id": { "type": "integer" }, + "start": { "type": "number" }, + "end": { "type": "number" }, + "text": { "type": "string" }, + "words": { "$ref": "#/properties/words" }, + "speaker": { "type": "string" } + } + } + }, + "deletedRanges": { + "type": "array", + "items": { + "type": "object", + "required": ["id", "start", "end", "wordIndices"], + "properties": { + "id": { "type": "string" }, + "start": { "type": "number" }, + "end": { "type": "number" }, + "wordIndices": { "type": "array", "items": { "type": "integer" } } + } + } + }, + "language": { "type": "string" }, + "createdAt": { "type": "string", "format": "date-time" }, + "modifiedAt": { "type": "string", "format": "date-time" } + } +} diff --git a/utils/diarization.py b/utils/diarization.py deleted file mode 100644 index 2326dc0..0000000 --- a/utils/diarization.py +++ /dev/null @@ -1,226 +0,0 @@ -""" -Speaker diarization utilities for the Video Transcriber. -Provides functions to identify different speakers in audio recordings. -""" - -import logging -import os -import numpy as np -from pathlib import Path -import torch -from pyannote.audio import Pipeline -from pyannote.core import Segment -import whisper -import streamlit as st - -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) - -try: - from utils.gpu_utils import get_optimal_device - GPU_UTILS_AVAILABLE = True -except ImportError: - GPU_UTILS_AVAILABLE = False - -HF_TOKEN_ENV = "HF_TOKEN" - - -@st.cache_resource -def _load_diarization_pipeline(hf_token, device_str): - """Load and cache the speaker diarization pipeline.""" - logger.info(f"Loading diarization pipeline on {device_str}") - pipe = Pipeline.from_pretrained( - "pyannote/speaker-diarization-3.0", - use_auth_token=hf_token - ) - device = torch.device(device_str) - if device.type == "cuda": - pipe = pipe.to(device) - return pipe - - -def get_diarization_pipeline(use_gpu=True, hf_token=None): - """ - Initialize the speaker diarization pipeline. - - Args: - use_gpu (bool): Whether to use GPU acceleration if available - hf_token (str, optional): HuggingFace API token for accessing the model - - Returns: - Pipeline or None: Diarization pipeline if successful, None otherwise - """ - if hf_token is None: - hf_token = os.environ.get(HF_TOKEN_ENV) - if hf_token is None: - logger.error(f"HuggingFace token not provided. Set {HF_TOKEN_ENV} environment variable or pass token directly.") - return None - - try: - device = torch.device("cpu") - if use_gpu and GPU_UTILS_AVAILABLE: - device = get_optimal_device() - logger.info(f"Using device: {device} for diarization") - - return _load_diarization_pipeline(hf_token, str(device)) - except Exception as e: - logger.error(f"Error initializing diarization pipeline: {e}") - return None - - -def diarize_audio(audio_path, pipeline=None, num_speakers=None, use_gpu=True, hf_token=None): - """ - Perform speaker diarization on an audio file. - - Args: - audio_path (Path): Path to the audio file - pipeline (Pipeline, optional): Pre-initialized diarization pipeline - num_speakers (int, optional): Number of speakers (if known) - use_gpu (bool): Whether to use GPU acceleration if available - hf_token (str, optional): HuggingFace API token - - Returns: - dict: Dictionary mapping time segments to speaker IDs - """ - audio_path = Path(audio_path) - - # Initialize pipeline if not provided - if pipeline is None: - pipeline = get_diarization_pipeline(use_gpu, hf_token) - if pipeline is None: - return None - - try: - # Run diarization - logger.info(f"Running speaker diarization on {audio_path}") - diarization = pipeline(audio_path, num_speakers=num_speakers) - - # Extract speaker segments - speaker_segments = {} - for turn, _, speaker in diarization.itertracks(yield_label=True): - segment = (turn.start, turn.end) - speaker_segments[segment] = speaker - - return speaker_segments - except Exception as e: - logger.error(f"Error during diarization: {e}") - return None - - -def apply_diarization_to_transcript(transcript_segments, speaker_segments): - """ - Apply speaker diarization results to transcript segments. - - Args: - transcript_segments (list): List of transcript segments with timing info - speaker_segments (dict): Dictionary mapping time segments to speaker IDs - - Returns: - list: Updated transcript segments with speaker information - """ - if not speaker_segments: - return transcript_segments - - # Convert speaker segments to a more usable format - speaker_ranges = [(Segment(start, end), speaker) - for (start, end), speaker in speaker_segments.items()] - - # Update transcript segments with speaker information - for segment in transcript_segments: - segment_start = segment['start'] - segment_end = segment['end'] - segment_range = Segment(segment_start, segment_end) - - # Find overlapping speaker segments - overlaps = [] - for (spk_range, speaker) in speaker_ranges: - overlap = segment_range.intersect(spk_range) - if overlap: - overlaps.append((overlap.duration, speaker)) - - # Assign the speaker with the most overlap - if overlaps: - overlaps.sort(reverse=True) # Sort by duration (descending) - segment['speaker'] = overlaps[0][1] - else: - segment['speaker'] = "UNKNOWN" - - return transcript_segments - - -def format_transcript_with_speakers(transcript_segments): - """ - Format transcript with speaker labels. - - Args: - transcript_segments (list): List of transcript segments with speaker info - - Returns: - str: Formatted transcript with speaker labels - """ - formatted_lines = [] - current_speaker = None - - for segment in transcript_segments: - speaker = segment.get('speaker', 'UNKNOWN') - text = segment['text'].strip() - - # Add speaker label when speaker changes - if speaker != current_speaker: - formatted_lines.append(f"\n[{speaker}]") - current_speaker = speaker - - formatted_lines.append(text) - - return " ".join(formatted_lines) - - -def transcribe_with_diarization(audio_path, whisper_model="base", num_speakers=None, - use_gpu=True, hf_token=None): - """ - Transcribe audio with speaker diarization. - - Args: - audio_path (Path): Path to the audio file - whisper_model (str): Whisper model size to use - num_speakers (int, optional): Number of speakers (if known) - use_gpu (bool): Whether to use GPU acceleration if available - hf_token (str, optional): HuggingFace API token - - Returns: - tuple: (diarized_segments, formatted_transcript) - """ - audio_path = Path(audio_path) - - # Configure device - device = torch.device("cpu") - if use_gpu and GPU_UTILS_AVAILABLE: - device = get_optimal_device() - - try: - from utils.transcription import _load_whisper_model - logger.info(f"Transcribing audio with Whisper model: {whisper_model}") - model = _load_whisper_model(whisper_model, str(device)) - result = model.transcribe(str(audio_path)) - transcript_segments = result["segments"] - - # Step 2: Perform speaker diarization - logger.info("Performing speaker diarization") - pipeline = get_diarization_pipeline(use_gpu, hf_token) - if pipeline is None: - logger.warning("Diarization pipeline not available, returning transcript without speakers") - return transcript_segments, result["text"] - - speaker_segments = diarize_audio(audio_path, pipeline, num_speakers, use_gpu) - - # Step 3: Apply diarization to transcript - if speaker_segments: - diarized_segments = apply_diarization_to_transcript(transcript_segments, speaker_segments) - formatted_transcript = format_transcript_with_speakers(diarized_segments) - return diarized_segments, formatted_transcript - else: - return transcript_segments, result["text"] - - except Exception as e: - logger.error(f"Error in transcribe_with_diarization: {e}") - return None, None \ No newline at end of file diff --git a/utils/export.py b/utils/export.py deleted file mode 100644 index e540c76..0000000 --- a/utils/export.py +++ /dev/null @@ -1,284 +0,0 @@ -""" -Subtitle export utilities for the OBS Recording Transcriber. -Supports exporting transcripts to SRT, ASS, and WebVTT subtitle formats. -""" - -from pathlib import Path -import re -from datetime import timedelta -import gzip -import zipfile -import logging - -# Configure logging -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) - - -def format_timestamp_srt(timestamp_ms): - """ - Format a timestamp in milliseconds to SRT format (HH:MM:SS,mmm). - - Args: - timestamp_ms (int): Timestamp in milliseconds - - Returns: - str: Formatted timestamp string - """ - hours, remainder = divmod(timestamp_ms, 3600000) - minutes, remainder = divmod(remainder, 60000) - seconds, milliseconds = divmod(remainder, 1000) - return f"{int(hours):02d}:{int(minutes):02d}:{int(seconds):02d},{int(milliseconds):03d}" - - -def format_timestamp_ass(timestamp_ms): - """ - Format a timestamp in milliseconds to ASS format (H:MM:SS.cc). - - Args: - timestamp_ms (int): Timestamp in milliseconds - - Returns: - str: Formatted timestamp string - """ - hours, remainder = divmod(timestamp_ms, 3600000) - minutes, remainder = divmod(remainder, 60000) - seconds, remainder = divmod(remainder, 1000) - centiseconds = remainder // 10 - return f"{int(hours)}:{int(minutes):02d}:{int(seconds):02d}.{int(centiseconds):02d}" - - -def format_timestamp_vtt(timestamp_ms): - """ - Format a timestamp in milliseconds to WebVTT format (HH:MM:SS.mmm). - - Args: - timestamp_ms (int): Timestamp in milliseconds - - Returns: - str: Formatted timestamp string - """ - hours, remainder = divmod(timestamp_ms, 3600000) - minutes, remainder = divmod(remainder, 60000) - seconds, milliseconds = divmod(remainder, 1000) - return f"{int(hours):02d}:{int(minutes):02d}:{int(seconds):02d}.{int(milliseconds):03d}" - - -def export_to_srt(segments, output_path): - """ - Export transcript segments to SRT format. - - Args: - segments (list): List of transcript segments with start, end, and text - output_path (Path): Path to save the SRT file - - Returns: - Path: Path to the saved SRT file - """ - with open(output_path, 'w', encoding='utf-8') as f: - for i, segment in enumerate(segments, 1): - start_time = format_timestamp_srt(int(segment['start'] * 1000)) - end_time = format_timestamp_srt(int(segment['end'] * 1000)) - - f.write(f"{i}\n") - f.write(f"{start_time} --> {end_time}\n") - f.write(f"{segment['text'].strip()}\n\n") - - return output_path - - -def export_to_ass(segments, output_path, video_width=1920, video_height=1080, style=None): - """ - Export transcript segments to ASS format with styling. - - Args: - segments (list): List of transcript segments with start, end, and text - output_path (Path): Path to save the ASS file - video_width (int): Width of the video in pixels - video_height (int): Height of the video in pixels - style (dict, optional): Custom style parameters - - Returns: - Path: Path to the saved ASS file - """ - # Default style - default_style = { - "fontname": "Arial", - "fontsize": "48", - "primary_color": "&H00FFFFFF", # White - "secondary_color": "&H000000FF", # Blue - "outline_color": "&H00000000", # Black - "back_color": "&H80000000", # Semi-transparent black - "bold": "-1", # True - "italic": "0", # False - "alignment": "2", # Bottom center - } - - # Apply custom style if provided - if style: - default_style.update(style) - - # ASS header template - ass_header = f"""[Script Info] -Title: Transcription -ScriptType: v4.00+ -WrapStyle: 0 -PlayResX: {video_width} -PlayResY: {video_height} -ScaledBorderAndShadow: yes - -[V4+ Styles] -Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding -Style: Default,{default_style['fontname']},{default_style['fontsize']},{default_style['primary_color']},{default_style['secondary_color']},{default_style['outline_color']},{default_style['back_color']},{default_style['bold']},{default_style['italic']},0,0,100,100,0,0,1,2,2,{default_style['alignment']},10,10,10,1 - -[Events] -Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text -""" - - with open(output_path, 'w', encoding='utf-8') as f: - f.write(ass_header) - - for segment in segments: - start_time = format_timestamp_ass(int(segment['start'] * 1000)) - end_time = format_timestamp_ass(int(segment['end'] * 1000)) - text = segment['text'].strip().replace('\n', '\\N') - - f.write(f"Dialogue: 0,{start_time},{end_time},Default,,0,0,0,,{text}\n") - - return output_path - - -def export_to_vtt(segments, output_path): - """ - Export transcript segments to WebVTT format. - - Args: - segments (list): List of transcript segments with start, end, and text - output_path (Path): Path to save the WebVTT file - - Returns: - Path: Path to the saved WebVTT file - """ - with open(output_path, 'w', encoding='utf-8') as f: - # WebVTT header - f.write("WEBVTT\n\n") - - for i, segment in enumerate(segments, 1): - start_time = format_timestamp_vtt(int(segment['start'] * 1000)) - end_time = format_timestamp_vtt(int(segment['end'] * 1000)) - - # Optional cue identifier - f.write(f"{i}\n") - f.write(f"{start_time} --> {end_time}\n") - f.write(f"{segment['text'].strip()}\n\n") - - return output_path - - -def transcript_to_segments(transcript, segment_duration=5.0): - """ - Convert a plain transcript to timed segments for subtitle export. - Used when the original segments are not available. - - Args: - transcript (str): Full transcript text - segment_duration (float): Duration of each segment in seconds - - Returns: - list: List of segments with start, end, and text - """ - # Split transcript into sentences - sentences = re.split(r'(?<=[.!?])\s+', transcript) - segments = [] - - current_time = 0.0 - for sentence in sentences: - if not sentence.strip(): - continue - - # Estimate duration based on word count (approx. 2.5 words per second) - word_count = len(sentence.split()) - duration = max(2.0, word_count / 2.5) - - segments.append({ - 'start': current_time, - 'end': current_time + duration, - 'text': sentence - }) - - current_time += duration - - return segments - - -def compress_file(input_path, compression_type='gzip'): - """ - Compress a file using the specified compression method. - - Args: - input_path (Path): Path to the file to compress - compression_type (str): Type of compression ('gzip' or 'zip') - - Returns: - Path: Path to the compressed file - """ - input_path = Path(input_path) - - if compression_type == 'gzip': - output_path = input_path.with_suffix(input_path.suffix + '.gz') - with open(input_path, 'rb') as f_in: - with gzip.open(output_path, 'wb') as f_out: - f_out.write(f_in.read()) - return output_path - - elif compression_type == 'zip': - output_path = input_path.with_suffix('.zip') - with zipfile.ZipFile(output_path, 'w', zipfile.ZIP_DEFLATED) as zipf: - zipf.write(input_path, arcname=input_path.name) - return output_path - - else: - logger.warning(f"Unsupported compression type: {compression_type}") - return input_path - - -def export_transcript(transcript, output_path, format_type='srt', segments=None, - compress=False, compression_type='gzip', style=None): - """ - Export transcript to the specified subtitle format. - - Args: - transcript (str): Full transcript text - output_path (Path): Base path for the output file (without extension) - format_type (str): 'srt', 'ass', or 'vtt' - segments (list, optional): List of transcript segments with timing information - compress (bool): Whether to compress the output file - compression_type (str): Type of compression ('gzip' or 'zip') - style (dict, optional): Custom style parameters for ASS format - - Returns: - Path: Path to the saved subtitle file - """ - output_path = Path(output_path) - - # If segments are not provided, create them from the transcript - if segments is None: - segments = transcript_to_segments(transcript) - - if format_type.lower() == 'srt': - output_file = output_path.with_suffix('.srt') - result_path = export_to_srt(segments, output_file) - elif format_type.lower() == 'ass': - output_file = output_path.with_suffix('.ass') - result_path = export_to_ass(segments, output_file, style=style) - elif format_type.lower() == 'vtt': - output_file = output_path.with_suffix('.vtt') - result_path = export_to_vtt(segments, output_file) - else: - raise ValueError(f"Unsupported format type: {format_type}. Use 'srt', 'ass', or 'vtt'.") - - # Compress the file if requested - if compress: - result_path = compress_file(result_path, compression_type) - - return result_path \ No newline at end of file diff --git a/utils/keyword_extraction.py b/utils/keyword_extraction.py deleted file mode 100644 index 733ec4e..0000000 --- a/utils/keyword_extraction.py +++ /dev/null @@ -1,334 +0,0 @@ -""" -Keyword extraction utilities for the Video Transcriber. -Provides functions to extract keywords and link them to timestamps. -""" - -import logging -import re -import torch -import numpy as np -from pathlib import Path -from transformers import pipeline -from sklearn.feature_extraction.text import TfidfVectorizer -from collections import Counter -import streamlit as st - -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) - -try: - from utils.gpu_utils import get_optimal_device - GPU_UTILS_AVAILABLE = True -except ImportError: - GPU_UTILS_AVAILABLE = False - -NER_MODEL = "dslim/bert-base-NER" - - -@st.cache_resource -def _load_ner_pipeline(model_name, device_int): - """Load and cache the NER pipeline.""" - logger.info(f"Loading NER model: {model_name}") - return pipeline("ner", model=model_name, device=device_int, aggregation_strategy="simple") - - -def extract_keywords_tfidf(text, max_keywords=10, ngram_range=(1, 2)): - """ - Extract keywords using TF-IDF. - - Args: - text (str): Text to extract keywords from - max_keywords (int): Maximum number of keywords to extract - ngram_range (tuple): Range of n-grams to consider - - Returns: - list: List of (keyword, score) tuples - """ - try: - # Preprocess text - text = text.lower() - - # Remove common stopwords - convert to list for scikit-learn compatibility - stopwords = ['a', 'an', 'the', 'and', 'or', 'but', 'if', 'because', 'as', 'what', - 'when', 'where', 'how', 'who', 'which', 'this', 'that', 'these', 'those', - 'then', 'just', 'so', 'than', 'such', 'both', 'through', 'about', 'for', - 'is', 'of', 'while', 'during', 'to', 'from', 'in', 'out', 'on', 'off', 'by'] - - # Create sentences for better TF-IDF analysis - sentences = re.split(r'[.!?]', text) - sentences = [s.strip() for s in sentences if s.strip()] - - if not sentences: - return [] - - # Apply TF-IDF - vectorizer = TfidfVectorizer( - max_features=100, - stop_words=stopwords, - ngram_range=ngram_range - ) - - try: - tfidf_matrix = vectorizer.fit_transform(sentences) - feature_names = vectorizer.get_feature_names_out() - - # Calculate average TF-IDF score across all sentences - avg_tfidf = np.mean(tfidf_matrix.toarray(), axis=0) - - # Get top keywords - keywords = [(feature_names[i], avg_tfidf[i]) for i in avg_tfidf.argsort()[::-1]] - - # Filter out single-character keywords and limit to max_keywords - keywords = [(k, s) for k, s in keywords if len(k) > 1][:max_keywords] - - return keywords - except ValueError as e: - logger.warning(f"TF-IDF extraction failed: {e}") - return [] - - except Exception as e: - logger.error(f"Error extracting keywords with TF-IDF: {e}") - return [] - - -def extract_named_entities(text, model=NER_MODEL, use_gpu=True): - """ - Extract named entities from text. - - Args: - text (str): Text to extract entities from - model (str): Model to use for NER - use_gpu (bool): Whether to use GPU acceleration if available - - Returns: - list: List of (entity, type) tuples - """ - # Configure device - device = torch.device("cpu") - if use_gpu and GPU_UTILS_AVAILABLE: - device = get_optimal_device() - device_arg = 0 if device.type == "cuda" else -1 - else: - device_arg = -1 - - try: - ner_pipeline = _load_ner_pipeline(model, device_arg) - - # Split text into manageable chunks if too long - max_length = 512 - if len(text) > max_length: - chunks = [text[i:i+max_length] for i in range(0, len(text), max_length)] - else: - chunks = [text] - - # Process each chunk - all_entities = [] - for chunk in chunks: - entities = ner_pipeline(chunk) - all_entities.extend(entities) - - # Extract entity text and type - entity_info = [(entity["word"], entity["entity_group"]) for entity in all_entities] - - return entity_info - except Exception as e: - logger.error(f"Error extracting named entities: {e}") - return [] - - -def find_keyword_timestamps(segments, keywords): - """ - Find timestamps for keywords in transcript segments. - - Args: - segments (list): List of transcript segments with timing info - keywords (list): List of keywords to find - - Returns: - dict: Dictionary mapping keywords to lists of timestamps - """ - keyword_timestamps = {} - - # Convert keywords to lowercase for case-insensitive matching - # Check if keywords list is not empty before accessing keywords[0] - if not keywords: - return keyword_timestamps - - if isinstance(keywords[0], tuple): - # If keywords is a list of (keyword, score) tuples - keywords_lower = [k.lower() for k, _ in keywords] - else: - # If keywords is just a list of keywords - keywords_lower = [k.lower() for k in keywords] - - # Process each segment - for segment in segments: - segment_text = segment["text"].lower() - start_time = segment["start"] - end_time = segment["end"] - - # Check each keyword - for i, keyword in enumerate(keywords_lower): - if keyword in segment_text: - # Get the original case of the keyword - # Safe access to keywords[0] since we already checked keywords is not empty - original_keyword = keywords[i][0] if isinstance(keywords[0], tuple) else keywords[i] - - # Initialize the list if this is the first occurrence - if original_keyword not in keyword_timestamps: - keyword_timestamps[original_keyword] = [] - - # Add the timestamp - keyword_timestamps[original_keyword].append({ - "start": start_time, - "end": end_time, - "context": segment["text"] - }) - - return keyword_timestamps - - -def extract_keywords_from_transcript(transcript, segments, max_keywords=15, use_gpu=True): - """ - Extract keywords from transcript and link them to timestamps. - - Args: - transcript (str): Full transcript text - segments (list): List of transcript segments with timing info - max_keywords (int): Maximum number of keywords to extract - use_gpu (bool): Whether to use GPU acceleration if available - - Returns: - tuple: (keyword_timestamps, entities_with_timestamps) - """ - try: - # Extract keywords using TF-IDF - tfidf_keywords = extract_keywords_tfidf(transcript, max_keywords=max_keywords) - - # Extract named entities - entities = extract_named_entities(transcript, use_gpu=use_gpu) - - # Count entity occurrences and get the most frequent ones - entity_counter = Counter([entity for entity, _ in entities]) - top_entities = [(entity, count) for entity, count in entity_counter.most_common(max_keywords)] - - # Find timestamps for keywords and entities - keyword_timestamps = find_keyword_timestamps(segments, tfidf_keywords) - entity_timestamps = find_keyword_timestamps(segments, top_entities) - - return keyword_timestamps, entity_timestamps - - except Exception as e: - logger.error(f"Error extracting keywords from transcript: {e}") - return {}, {} - - -def generate_keyword_index(keyword_timestamps, entity_timestamps=None): - """ - Generate a keyword index with timestamps. - - Args: - keyword_timestamps (dict): Dictionary mapping keywords to timestamp lists - entity_timestamps (dict, optional): Dictionary mapping entities to timestamp lists - - Returns: - str: Formatted keyword index - """ - lines = ["# Keyword Index\n"] - - # Add keywords section - if keyword_timestamps: - lines.append("## Keywords\n") - for keyword, timestamps in sorted(keyword_timestamps.items()): - if timestamps: - times = [f"{int(ts['start'] // 60):02d}:{int(ts['start'] % 60):02d}" for ts in timestamps] - lines.append(f"- **{keyword}**: {', '.join(times)}\n") - - # Add entities section - if entity_timestamps: - lines.append("\n## Named Entities\n") - for entity, timestamps in sorted(entity_timestamps.items()): - if timestamps: - times = [f"{int(ts['start'] // 60):02d}:{int(ts['start'] % 60):02d}" for ts in timestamps] - lines.append(f"- **{entity}**: {', '.join(times)}\n") - - return "".join(lines) - - -def generate_interactive_transcript(segments, keyword_timestamps=None, entity_timestamps=None): - """ - Generate an interactive transcript with keyword highlighting. - - Args: - segments (list): List of transcript segments with timing info - keyword_timestamps (dict, optional): Dictionary mapping keywords to timestamp lists - entity_timestamps (dict, optional): Dictionary mapping entities to timestamp lists - - Returns: - str: HTML formatted interactive transcript - """ - # Combine keywords and entities - all_keywords = {} - if keyword_timestamps: - all_keywords.update(keyword_timestamps) - if entity_timestamps: - all_keywords.update(entity_timestamps) - - # Generate HTML - html = ["
"] - - for segment in segments: - start_time = segment["start"] - end_time = segment["end"] - text = segment["text"] - - # Format timestamp - timestamp = f"{int(start_time // 60):02d}:{int(start_time % 60):02d}" - - # Add speaker if available - speaker = segment.get("speaker", "") - speaker_html = f"[{speaker}] " if speaker else "" - - # Highlight keywords in text - highlighted_text = text - for keyword in all_keywords: - # Use regex to match whole words only - pattern = r'\b' + re.escape(keyword) + r'\b' - replacement = f"{keyword}" - highlighted_text = re.sub(pattern, replacement, highlighted_text, flags=re.IGNORECASE) - - # Add segment to HTML - html.append(f"

") - html.append(f"{timestamp} {speaker_html}{highlighted_text}") - html.append("

") - - html.append("
") - - return "\n".join(html) - - -def create_keyword_cloud_data(keyword_timestamps, entity_timestamps=None): - """ - Create data for a keyword cloud visualization. - - Args: - keyword_timestamps (dict): Dictionary mapping keywords to timestamp lists - entity_timestamps (dict, optional): Dictionary mapping entities to timestamp lists - - Returns: - list: List of (keyword, weight) tuples for visualization - """ - cloud_data = [] - - # Process keywords - for keyword, timestamps in keyword_timestamps.items(): - weight = len(timestamps) # Weight by occurrence count - cloud_data.append((keyword, weight)) - - # Process entities if provided - if entity_timestamps: - for entity, timestamps in entity_timestamps.items(): - weight = len(timestamps) * 1.5 # Give entities slightly higher weight - cloud_data.append((entity, weight)) - - return cloud_data \ No newline at end of file diff --git a/utils/ollama_integration.py b/utils/ollama_integration.py deleted file mode 100644 index d71caa4..0000000 --- a/utils/ollama_integration.py +++ /dev/null @@ -1,201 +0,0 @@ -""" -Ollama integration for local AI model inference. -Provides functions to use Ollama's API for text summarization with streaming support. -""" - -import requests -import json -import logging -from pathlib import Path -import os - -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) - -OLLAMA_API_URL = os.environ.get("OLLAMA_API_URL", "http://localhost:11434/api") - - -def check_ollama_available(): - """Check if Ollama service is available.""" - try: - response = requests.get(f"{OLLAMA_API_URL}/tags", timeout=2) - return response.status_code == 200 - except requests.exceptions.RequestException: - return False - - -def list_available_models(): - """List available models in Ollama.""" - try: - response = requests.get(f"{OLLAMA_API_URL}/tags") - if response.status_code == 200: - models = response.json().get('models', []) - return [model['name'] for model in models] - return [] - except requests.exceptions.RequestException as e: - logger.error(f"Error listing Ollama models: {e}") - return [] - - -def summarize_with_ollama(text, model="llama3", max_length=150): - """Summarize text using Ollama's local API (non-streaming).""" - if not check_ollama_available(): - logger.warning("Ollama service is not available") - return None - - prompt = f"Summarize the following text in about {max_length} words:\n\n{text}" - - try: - response = requests.post( - f"{OLLAMA_API_URL}/generate", - json={ - "model": model, - "prompt": prompt, - "stream": False, - "options": { - "temperature": 0.3, - "top_p": 0.9, - "max_tokens": max_length * 2 - } - } - ) - - if response.status_code == 200: - result = response.json() - return result.get('response', '').strip() - else: - logger.error(f"Ollama API error: {response.status_code} - {response.text}") - return None - except requests.exceptions.RequestException as e: - logger.error(f"Error communicating with Ollama: {e}") - return None - - -def stream_summarize_with_ollama(text, model="llama3", max_length=150): - """ - Summarize text using Ollama with streaming. Yields tokens as they arrive. - - Yields: - str: Individual response tokens - """ - if not check_ollama_available(): - logger.warning("Ollama service is not available") - return - - prompt = f"Summarize the following text in about {max_length} words:\n\n{text}" - - try: - response = requests.post( - f"{OLLAMA_API_URL}/generate", - json={ - "model": model, - "prompt": prompt, - "stream": True, - "options": { - "temperature": 0.3, - "top_p": 0.9, - "max_tokens": max_length * 2 - } - }, - stream=True - ) - - if response.status_code == 200: - for line in response.iter_lines(): - if line: - data = json.loads(line) - token = data.get('response', '') - if token: - yield token - if data.get('done', False): - break - else: - logger.error(f"Ollama API error: {response.status_code}") - except requests.exceptions.RequestException as e: - logger.error(f"Error communicating with Ollama: {e}") - - -def chunk_and_summarize(text, model="llama3", chunk_size=4000, max_length=150): - """Chunk long text and summarize each chunk, then combine.""" - if len(text) <= chunk_size: - return summarize_with_ollama(text, model, max_length) - - words = text.split() - chunks = [] - current_chunk = [] - current_length = 0 - - for word in words: - if current_length + len(word) + 1 <= chunk_size: - current_chunk.append(word) - current_length += len(word) + 1 - else: - chunks.append(' '.join(current_chunk)) - current_chunk = [word] - current_length = len(word) + 1 - - if current_chunk: - chunks.append(' '.join(current_chunk)) - - chunk_summaries = [] - for i, chunk in enumerate(chunks): - logger.info(f"Summarizing chunk {i+1}/{len(chunks)}") - summary = summarize_with_ollama(chunk, model, max_length // len(chunks)) - if summary: - chunk_summaries.append(summary) - - if not chunk_summaries: - return None - - if len(chunk_summaries) == 1: - return chunk_summaries[0] - - combined_summary = " ".join(chunk_summaries) - return summarize_with_ollama(combined_summary, model, max_length) - - -def stream_chunk_and_summarize(text, model="llama3", chunk_size=4000, max_length=150): - """ - Chunk and summarize with streaming on the final summary. - Returns non-streaming chunk summaries, then streams the final combination. - - Yields: - str: Tokens from the final summary - """ - if len(text) <= chunk_size: - yield from stream_summarize_with_ollama(text, model, max_length) - return - - words = text.split() - chunks = [] - current_chunk = [] - current_length = 0 - - for word in words: - if current_length + len(word) + 1 <= chunk_size: - current_chunk.append(word) - current_length += len(word) + 1 - else: - chunks.append(' '.join(current_chunk)) - current_chunk = [word] - current_length = len(word) + 1 - - if current_chunk: - chunks.append(' '.join(current_chunk)) - - chunk_summaries = [] - for i, chunk in enumerate(chunks): - logger.info(f"Summarizing chunk {i+1}/{len(chunks)}") - summary = summarize_with_ollama(chunk, model, max_length // len(chunks)) - if summary: - chunk_summaries.append(summary) - - if not chunk_summaries: - return - - if len(chunk_summaries) == 1: - yield chunk_summaries[0] - return - - combined_summary = " ".join(chunk_summaries) - yield from stream_summarize_with_ollama(combined_summary, model, max_length) \ No newline at end of file diff --git a/utils/summarization.py b/utils/summarization.py deleted file mode 100644 index 28012c4..0000000 --- a/utils/summarization.py +++ /dev/null @@ -1,111 +0,0 @@ -from transformers import pipeline, AutoTokenizer -import torch -import logging -import streamlit as st - -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) - -SUMMARY_MODEL = "Falconsai/text_summarization" - - -@st.cache_resource -def _load_summarizer(device_int): - """Load and cache the summarization pipeline.""" - logger.info(f"Loading summarization model on device {device_int}") - return pipeline("summarization", model=SUMMARY_MODEL, device=device_int) - - -@st.cache_resource -def _load_summary_tokenizer(): - """Load and cache the summarization tokenizer.""" - return AutoTokenizer.from_pretrained(SUMMARY_MODEL) - - -def chunk_text(text, max_tokens, tokenizer): - """ - Splits text into chunks by tokenizing once, then splitting by token windows. - Much faster than the per-word tokenization approach. - """ - all_ids = tokenizer(text, return_tensors='pt', truncation=False)['input_ids'][0] - content_ids = all_ids[1:-1] # strip BOS/EOS - usable_max = max_tokens - 2 # leave room for special tokens - - chunks = [] - for i in range(0, len(content_ids), usable_max): - chunk_ids = content_ids[i : i + usable_max] - decoded = tokenizer.decode(chunk_ids, skip_special_tokens=True).strip() - if decoded: - chunks.append(decoded) - - if not chunks: - chunks.append(text) - - return chunks - - -def summarize_text(text, use_gpu=True, memory_fraction=0.8): - """ - Summarize text using a Hugging Face pipeline with chunking support. - - Args: - text (str): Text to summarize - use_gpu (bool): Whether to use GPU if available - memory_fraction (float): Fraction of GPU memory to use - - Returns: - str: Summarized text - """ - device = -1 - if use_gpu and torch.cuda.is_available(): - device = 0 - torch.cuda.set_per_process_memory_fraction(memory_fraction) - - logger.info(f"Using device {device} for summarization") - - try: - summarizer = _load_summarizer(device) - tokenizer = _load_summary_tokenizer() - - max_tokens = 512 - tokens = tokenizer(text, return_tensors='pt') - num_tokens = len(tokens['input_ids'][0]) - - if num_tokens > max_tokens: - chunks = chunk_text(text, max_tokens, tokenizer) - summaries = [] - - for i, chunk in enumerate(chunks): - logger.info(f"Summarizing chunk {i+1}/{len(chunks)}") - summary_output = summarizer( - "summarize: " + chunk, - max_length=150, - min_length=30, - do_sample=False - ) - summaries.append(summary_output[0]['summary_text']) - - if len(summaries) > 1: - logger.info("Generating final summary from chunk summaries") - combined_text = " ".join(summaries) - return summarizer( - "summarize: " + combined_text, - max_length=150, - min_length=30, - do_sample=False - )[0]['summary_text'] - return summaries[0] - else: - return summarizer( - "summarize: " + text, - max_length=150, - min_length=30, - do_sample=False - )[0]['summary_text'] - - except Exception as e: - logger.error(f"Error during summarization: {e}") - if device != -1: - logger.info("Falling back to CPU") - return summarize_text(text, use_gpu=False, memory_fraction=memory_fraction) - raise diff --git a/utils/transcription.py b/utils/transcription.py deleted file mode 100644 index 82d9054..0000000 --- a/utils/transcription.py +++ /dev/null @@ -1,103 +0,0 @@ -import whisper -from pathlib import Path -from utils.audio_processing import extract_audio -import logging -import torch -import streamlit as st - -try: - from utils.gpu_utils import configure_gpu, get_optimal_device - GPU_UTILS_AVAILABLE = True -except ImportError: - GPU_UTILS_AVAILABLE = False - -try: - from utils.cache import load_from_cache, save_to_cache - CACHE_AVAILABLE = True -except ImportError: - CACHE_AVAILABLE = False - -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) - -WHISPER_MODEL = "base" - -WHISPER_MODEL_SIZES = { - "tiny": 75, - "base": 140, - "small": 460, - "medium": 1500, - "large": 2900, - "large-v2": 2900, - "large-v3": 2900, -} - - -@st.cache_resource -def _load_whisper_model(model_name, device_str): - """Load and cache a Whisper model. Cached across reruns.""" - logger.info(f"Loading Whisper model: {model_name} on {device_str}") - device = torch.device(device_str) - try: - return whisper.load_model(model_name, device=device if device.type != "mps" else "cpu") - except (MemoryError, RuntimeError) as e: - err_str = str(e).lower() - if "out of memory" in err_str or "cannot allocate" in err_str or isinstance(e, MemoryError): - size_mb = WHISPER_MODEL_SIZES.get(model_name, "unknown") - raise MemoryError( - f"Not enough memory to load Whisper '{model_name}' model (~{size_mb}MB). " - f"Try a smaller model (tiny/base/small) or enable GPU acceleration." - ) from e - raise - - -def transcribe_audio(audio_path: Path, model=WHISPER_MODEL, use_cache=True, cache_max_age=None, - use_gpu=True, memory_fraction=0.8): - """ - Transcribe audio using Whisper and return both segments and full transcript. - - Args: - audio_path (Path): Path to the audio or video file - model (str): Whisper model size to use (tiny, base, small, medium, large) - use_cache (bool): Whether to use caching - cache_max_age (float, optional): Maximum age of cache in seconds - use_gpu (bool): Whether to use GPU acceleration if available - memory_fraction (float): Fraction of GPU memory to use (0.0 to 1.0) - - Returns: - tuple: (segments, transcript) where segments is a list of dicts with timing info - """ - audio_path = Path(audio_path) - - if use_cache and CACHE_AVAILABLE: - cached_data = load_from_cache(audio_path, model, "transcribe", cache_max_age) - if cached_data: - logger.info(f"Using cached transcription for {audio_path}") - return cached_data.get("segments", []), cached_data.get("transcript", "") - - video_extensions = ['.mp4', '.avi', '.mov', '.mkv'] - if audio_path.suffix.lower() in video_extensions: - audio_path = extract_audio(audio_path) - - device = torch.device("cpu") - if use_gpu and GPU_UTILS_AVAILABLE: - gpu_config = configure_gpu(model, memory_fraction) - device = gpu_config["device"] - logger.info(f"Using device: {device} for transcription") - - whisper_model = _load_whisper_model(model, str(device)) - - logger.info(f"Transcribing audio: {audio_path}") - result = whisper_model.transcribe(str(audio_path)) - - transcript = result["text"] - segments = result["segments"] - - if use_cache and CACHE_AVAILABLE: - cache_data = { - "transcript": transcript, - "segments": segments - } - save_to_cache(audio_path, cache_data, model, "transcribe") - - return segments, transcript \ No newline at end of file diff --git a/utils/translation.py b/utils/translation.py deleted file mode 100644 index 5d8094b..0000000 --- a/utils/translation.py +++ /dev/null @@ -1,262 +0,0 @@ -""" -Translation utilities for the Video Transcriber. -Provides functions for language detection and translation. -""" - -import logging -import torch -from pathlib import Path -from transformers import pipeline, AutoTokenizer, M2M100ForConditionalGeneration -import whisper -import iso639 -import streamlit as st - -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) - -try: - from utils.gpu_utils import get_optimal_device - GPU_UTILS_AVAILABLE = True -except ImportError: - GPU_UTILS_AVAILABLE = False - -TRANSLATION_MODEL = "facebook/m2m100_418M" -LANGUAGE_DETECTION_MODEL = "papluca/xlm-roberta-base-language-detection" - - -@st.cache_resource -def _load_language_detector(model_name, device_int): - """Load and cache the language detection pipeline.""" - logger.info(f"Loading language detection model: {model_name}") - return pipeline("text-classification", model=model_name, device=device_int) - - -@st.cache_resource -def _load_translation_model(model_name, device_str): - """Load and cache the M2M100 translation model and tokenizer.""" - logger.info(f"Loading translation model: {model_name} on {device_str}") - tokenizer = AutoTokenizer.from_pretrained(model_name) - model = M2M100ForConditionalGeneration.from_pretrained(model_name) - device = torch.device(device_str) - model = model.to(device) - return model, tokenizer - - -def get_language_name(code): - """Get the language name from ISO code.""" - try: - return iso639.languages.get(part1=code).name - except (KeyError, AttributeError): - try: - return iso639.languages.get(part2b=code).name - except (KeyError, AttributeError): - return code - - -def detect_language(text, model=LANGUAGE_DETECTION_MODEL, use_gpu=True): - """ - Detect the language of a text. - - Args: - text (str): Text to detect language for - model (str): Model to use for language detection - use_gpu (bool): Whether to use GPU acceleration if available - - Returns: - tuple: (language_code, confidence) - """ - device = torch.device("cpu") - if use_gpu and GPU_UTILS_AVAILABLE: - device = get_optimal_device() - device_arg = 0 if device.type == "cuda" else -1 - else: - device_arg = -1 - - try: - classifier = _load_language_detector(model, device_arg) - - max_length = 512 - if len(text) > max_length: - text = text[:max_length] - - result = classifier(text)[0] - return result["label"], result["score"] - except Exception as e: - logger.error(f"Error detecting language: {e}") - return None, 0.0 - - -def _translate_text_with_model(text, source_lang, target_lang, trans_model, tokenizer, device): - """Translate text using a pre-loaded model and tokenizer.""" - tokenizer.src_lang = source_lang - - max_length = 512 - if len(text) > max_length: - chunks = [text[i:i+max_length] for i in range(0, len(text), max_length)] - else: - chunks = [text] - - translated_chunks = [] - for chunk in chunks: - encoded = tokenizer(chunk, return_tensors="pt").to(device) - generated_tokens = trans_model.generate( - **encoded, - forced_bos_token_id=tokenizer.get_lang_id(target_lang), - max_length=max_length - ) - translated_chunk = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0] - translated_chunks.append(translated_chunk) - - return " ".join(translated_chunks) - - -def translate_text(text, source_lang=None, target_lang="en", model=TRANSLATION_MODEL, use_gpu=True): - """ - Translate text from source language to target language. - - Args: - text (str): Text to translate - source_lang (str, optional): Source language code (auto-detect if None) - target_lang (str): Target language code - model (str): Model to use for translation - use_gpu (bool): Whether to use GPU acceleration if available - - Returns: - str: Translated text - """ - if source_lang is None: - detected_lang, confidence = detect_language(text, use_gpu=use_gpu) - if detected_lang and confidence > 0.5: - source_lang = detected_lang - logger.info(f"Detected language: {get_language_name(source_lang)} ({source_lang}) with confidence {confidence:.2f}") - else: - logger.warning("Could not reliably detect language, defaulting to English") - source_lang = "en" - - if source_lang == target_lang: - logger.info(f"Source and target languages are the same ({source_lang}), skipping translation") - return text - - device = torch.device("cpu") - if use_gpu and GPU_UTILS_AVAILABLE: - device = get_optimal_device() - - try: - trans_model, tokenizer = _load_translation_model(model, str(device)) - return _translate_text_with_model(text, source_lang, target_lang, trans_model, tokenizer, device) - except Exception as e: - logger.error(f"Error translating text: {e}") - return text - - -def translate_segments(segments, source_lang=None, target_lang="en", use_gpu=True): - """ - Translate transcript segments. Loads the model once and reuses for all segments. - - Args: - segments (list): List of transcript segments - source_lang (str, optional): Source language code (auto-detect if None) - target_lang (str): Target language code - use_gpu (bool): Whether to use GPU acceleration if available - - Returns: - list: Translated segments - """ - if not segments: - return [] - - if source_lang is None: - combined_text = " ".join([segment["text"] for segment in segments]) - detected_lang, _ = detect_language(combined_text, use_gpu=use_gpu) - source_lang = detected_lang if detected_lang else "en" - - if source_lang == target_lang: - return segments - - device = torch.device("cpu") - if use_gpu and GPU_UTILS_AVAILABLE: - device = get_optimal_device() - - try: - trans_model, tokenizer = _load_translation_model(TRANSLATION_MODEL, str(device)) - - translated_segments = [] - for segment in segments: - translated_text = _translate_text_with_model( - segment["text"], source_lang, target_lang, trans_model, tokenizer, device - ) - - translated_segment = segment.copy() - translated_segment["text"] = translated_text - translated_segment["original_text"] = segment["text"] - translated_segment["source_lang"] = source_lang - translated_segment["target_lang"] = target_lang - translated_segments.append(translated_segment) - - return translated_segments - except Exception as e: - logger.error(f"Error translating segments: {e}") - return segments - - -def transcribe_and_translate(audio_path, whisper_model="base", target_lang="en", - use_gpu=True, detect_source=True): - """ - Transcribe audio and translate to target language. - - Args: - audio_path (Path): Path to the audio file - whisper_model (str): Whisper model size to use - target_lang (str): Target language code - use_gpu (bool): Whether to use GPU acceleration if available - detect_source (bool): Whether to auto-detect source language - - Returns: - tuple: (original_segments, translated_segments, original_transcript, translated_transcript) - """ - from utils.transcription import _load_whisper_model - - audio_path = Path(audio_path) - - device = torch.device("cpu") - if use_gpu and GPU_UTILS_AVAILABLE: - device = get_optimal_device() - - try: - logger.info(f"Transcribing audio with Whisper model: {whisper_model}") - model = _load_whisper_model(whisper_model, str(device)) - - if detect_source: - audio = whisper.load_audio(str(audio_path)) - audio = whisper.pad_or_trim(audio) - mel = whisper.log_mel_spectrogram(audio).to(device if device.type != "mps" else "cpu") - _, probs = model.detect_language(mel) - source_lang = max(probs, key=probs.get) - logger.info(f"Whisper detected language: {get_language_name(source_lang)} ({source_lang})") - result = model.transcribe(str(audio_path), language=source_lang) - else: - result = model.transcribe(str(audio_path)) - source_lang = result.get("language", "en") - - original_segments = result["segments"] - original_transcript = result["text"] - - if source_lang != target_lang: - logger.info(f"Translating from {source_lang} to {target_lang}") - translated_segments = translate_segments( - original_segments, - source_lang=source_lang, - target_lang=target_lang, - use_gpu=use_gpu - ) - translated_transcript = " ".join([segment["text"] for segment in translated_segments]) - else: - logger.info(f"Source and target languages are the same ({source_lang}), skipping translation") - translated_segments = original_segments - translated_transcript = original_transcript - - return original_segments, translated_segments, original_transcript, translated_transcript - - except Exception as e: - logger.error(f"Error in transcribe_and_translate: {e}") - return None, None, None, None \ No newline at end of file diff --git a/utils/validation.py b/utils/validation.py deleted file mode 100644 index 8b579af..0000000 --- a/utils/validation.py +++ /dev/null @@ -1,38 +0,0 @@ -from pathlib import Path -import shutil -import logging - -logger = logging.getLogger(__name__) - - -def validate_environment(obs_path: Path = None): - """Validate environment and prerequisites.""" - errors = [] - - if obs_path and not obs_path.exists(): - errors.append(f"Directory not found: {obs_path}") - - if not shutil.which("ffmpeg"): - errors.append("FFmpeg is not installed or not in PATH. Install it from https://ffmpeg.org/download.html") - - return errors - - -def get_system_capabilities(): - """Return a dict of detected system capabilities for display.""" - import torch - - caps = { - "ffmpeg": shutil.which("ffmpeg") is not None, - "cuda": torch.cuda.is_available(), - "mps": hasattr(torch.backends, "mps") and torch.backends.mps.is_available(), - "gpu_name": None, - "gpu_memory": None, - } - - if caps["cuda"] and torch.cuda.device_count() > 0: - props = torch.cuda.get_device_properties(0) - caps["gpu_name"] = props.name - caps["gpu_memory"] = props.total_memory - - return caps