Compare commits

4 Commits

Author SHA1 Message Date
a64ae78833 Update app icons to custom waveform SVG
Some checks failed
CI / rust (push) Failing after 2m46s
CI / frontend (push) Successful in 36s
CI / python (push) Failing after 8s
Validate All / validate-all (push) Failing after 4m53s
2026-05-07 02:58:45 -06:00
b558ef8a7f Simplify release workflow: deb, rpm, msi
Some checks failed
Release / windows (push) Waiting to run
CI / rust (push) Failing after 1m38s
CI / frontend (push) Successful in 29s
CI / python (push) Failing after 11s
Validate All / validate-all (push) Failing after 4m52s
Release / linux (push) Failing after 5m57s
2026-05-07 02:15:22 -06:00
f1e6c010eb Add AppImage to release bundles
Some checks failed
CI / python (push) Failing after 1m44s
Validate All / validate-all (push) Has been cancelled
Release / build (appimage, ubuntu-24.04, x86_64-unknown-linux-gnu) (push) Has been cancelled
Release / build (archlinux, ubuntu-24.04, x86_64-unknown-linux-gnu) (push) Has been cancelled
Release / build (deb, ubuntu-24.04, x86_64-unknown-linux-gnu) (push) Has been cancelled
Release / build (msi, windows-latest, x86_64-pc-windows-msvc) (push) Has been cancelled
Release / build (rpm, ubuntu-24.04, x86_64-unknown-linux-gnu) (push) Has been cancelled
CI / frontend (push) Failing after 14m32s
CI / rust (push) Failing after 14m49s
2026-05-07 01:35:42 -06:00
124f215a0a Add local LLM router and service
Some checks failed
CI / rust (push) Has been cancelled
CI / frontend (push) Has been cancelled
CI / python (push) Has been cancelled
Validate All / validate-all (push) Has been cancelled
Release / build (archlinux, ubuntu-24.04, x86_64-unknown-linux-gnu) (push) Has been cancelled
Release / build (deb, ubuntu-24.04, x86_64-unknown-linux-gnu) (push) Has been cancelled
Release / build (msi, windows-latest, x86_64-pc-windows-msvc) (push) Has been cancelled
Release / build (rpm, ubuntu-24.04, x86_64-unknown-linux-gnu) (push) Has been cancelled
2026-05-07 01:32:19 -06:00
19 changed files with 236 additions and 49 deletions

View File

@ -6,68 +6,29 @@ on:
- 'v*'
jobs:
build:
strategy:
fail-fast: false
matrix:
include:
- platform: ubuntu-24.04
target: x86_64-unknown-linux-gnu
bundles: deb
- platform: ubuntu-24.04
target: x86_64-unknown-linux-gnu
bundles: rpm
- platform: ubuntu-24.04
target: x86_64-unknown-linux-gnu
bundles: archlinux
- platform: windows-latest
target: x86_64-pc-windows-msvc
bundles: msi
runs-on: ${{ matrix.platform }}
linux:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Setup Node
uses: actions/setup-node@v4
- uses: actions/setup-node@v4
with:
node-version: 20
cache: npm
cache-dependency-path: frontend/package-lock.json
- name: Install frontend dependencies
run: npm ci
- run: npm ci
working-directory: frontend
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
with:
targets: ${{ matrix.target }}
- name: Install system dependencies (Linux)
if: runner.os == 'Linux'
run: |
- uses: dtolnay/rust-toolchain@stable
- run: |
sudo apt-get update
sudo apt-get install -y \
libwebkit2gtk-4.1-dev \
libappindicator3-dev \
librsvg2-dev \
patchelf \
libssl-dev \
libgtk-3-dev \
libayatana-appindicator3-dev
- name: Install RPM build tools
if: matrix.bundles == 'rpm'
run: sudo apt-get install -y rpm
- name: Install ArchLinux build tools
if: matrix.bundles == 'archlinux'
run: sudo apt-get install -y pacman-pkg-strap
- name: Build Tauri app
uses: tauri-apps/tauri-action@v0
libayatana-appindicator3-dev \
rpm
- uses: tauri-apps/tauri-action@v0
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
@ -76,4 +37,50 @@ jobs:
releaseBody: 'See the assets to download and install this version.'
releaseDraft: false
includeUpdaterJson: true
args: --bundles ${{ matrix.bundles }} --target ${{ matrix.target }}
args: --bundles deb,rpm
windows:
runs-on: windows-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: 20
cache: npm
cache-dependency-path: frontend/package-lock.json
- run: npm ci
working-directory: frontend
- uses: dtolnay/rust-toolchain@stable
- uses: tauri-apps/tauri-action@v0
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tagName: ${{ github.ref_name }}
releaseName: 'TalkEdit ${{ github.ref_name }}'
releaseBody: 'See the assets to download and install this version.'
releaseDraft: false
includeUpdaterJson: true
args: --bundles msi
# macos:
# runs-on: macos-latest
# steps:
# - uses: actions/checkout@v4
# - uses: actions/setup-node@v4
# with:
# node-version: 20
# cache: npm
# cache-dependency-path: frontend/package-lock.json
# - run: npm ci
# working-directory: frontend
# - uses: dtolnay/rust-toolchain@stable
# - uses: tauri-apps/tauri-action@v0
# env:
# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# with:
# tagName: ${{ github.ref_name }}
# releaseName: 'TalkEdit ${{ github.ref_name }}'
# releaseBody: 'See the assets to download and install this version.'
# releaseDraft: false
# includeUpdaterJson: true
# args: --bundles dmg

View File

@ -0,0 +1,54 @@
"""Local LLM endpoints for bundled Qwen3 inference."""
import logging
from typing import Optional
from fastapi import APIRouter, HTTPException
from pydantic import BaseModel
from services.local_llm import get_status, download_model, complete
logger = logging.getLogger(__name__)
router = APIRouter()
class CompleteRequest(BaseModel):
prompt: str
model_id: str = "qwen3-1.7b"
system_prompt: Optional[str] = None
temperature: float = 0.3
max_tokens: int = 2048
@router.get("/local-llm/status")
async def llm_status():
try:
return get_status()
except Exception as e:
logger.error(f"Local LLM status failed: {e}", exc_info=True)
raise HTTPException(status_code=500, detail=str(e))
@router.post("/local-llm/download")
async def llm_download(model_id: str = "qwen3-1.7b"):
try:
return download_model(model_id)
except Exception as e:
logger.error(f"Local LLM download failed: {e}", exc_info=True)
raise HTTPException(status_code=500, detail=str(e))
@router.post("/local-llm/complete")
async def llm_complete(req: CompleteRequest):
try:
result = complete(
prompt=req.prompt,
model_id=req.model_id,
system_prompt=req.system_prompt,
temperature=req.temperature,
max_tokens=req.max_tokens,
)
return {"response": result}
except Exception as e:
logger.error(f"Local LLM completion failed: {e}", exc_info=True)
raise HTTPException(status_code=500, detail=str(e))

View File

@ -0,0 +1,125 @@
"""
Local LLM inference using llama.cpp via llama-cpp-python.
Handles model download from HuggingFace and text completion.
"""
import json
import logging
import os
import subprocess
import sys
from pathlib import Path
from typing import Optional
logger = logging.getLogger(__name__)
LOCAL_MODELS_DIR = Path.home() / ".cache" / "talkedit" / "models"
QWEN_MODELS = {
"qwen3-1.7b": {
"repo": "Qwen/Qwen3-1.7B-Instruct-GGUF",
"file": "qwen3-1.7b-instruct-q4_k_m.gguf",
"size_gb": 1.0,
},
"qwen3-4b": {
"repo": "Qwen/Qwen3-4B-Instruct-GGUF",
"file": "qwen3-4b-instruct-q4_k_m.gguf",
"size_gb": 2.5,
},
}
def _ensure_llama_cpp() -> bool:
try:
from llama_cpp import Llama
return True
except ImportError:
return False
def _model_path(model_id: str) -> Path:
info = QWEN_MODELS.get(model_id)
if not info:
raise ValueError(f"Unknown model: {model_id}")
return LOCAL_MODELS_DIR / model_id / info["file"]
def get_status() -> dict:
"""Check status of local LLM setup."""
llama_available = _ensure_llama_cpp()
models = {}
for model_id in QWEN_MODELS:
path = _model_path(model_id)
models[model_id] = {
"downloaded": path.exists(),
"size_bytes": path.stat().st_size if path.exists() else 0,
"total_gb": QWEN_MODELS[model_id]["size_gb"],
}
return {
"llama_cpp_available": llama_available,
"models": models,
"models_dir": str(LOCAL_MODELS_DIR),
}
def download_model(model_id: str) -> dict:
"""Download a Qwen3 GGUF model from HuggingFace."""
info = QWEN_MODELS.get(model_id)
if not info:
raise ValueError(f"Unknown model: {model_id}")
model_dir = LOCAL_MODELS_DIR / model_id
model_dir.mkdir(parents=True, exist_ok=True)
output_path = model_dir / info["file"]
if output_path.exists():
return {"status": "already_downloaded", "path": str(output_path)}
logger.info(f"Downloading {info['repo']}/{info['file']} ({info['size_gb']} GB)...")
subprocess.run([
sys.executable, "-m", "huggingface_hub", "download",
info["repo"], info["file"],
"--local-dir", str(model_dir),
"--local-dir-use-symlinks", "False",
], check=True)
if not output_path.exists():
raise RuntimeError(f"Download failed: {output_path} not found")
return {"status": "downloaded", "path": str(output_path)}
def complete(
prompt: str,
model_id: str = "qwen3-1.7b",
system_prompt: Optional[str] = None,
temperature: float = 0.3,
max_tokens: int = 2048,
) -> str:
"""Run inference using a local Qwen3 model."""
model_path = _model_path(model_id)
if not model_path.exists():
raise RuntimeError(f"Model not downloaded: {model_id}")
from llama_cpp import Llama
llm = Llama(
model_path=str(model_path),
n_ctx=4096,
n_threads=4,
n_gpu_layers=-1,
verbose=False,
)
messages = []
if system_prompt:
messages.append({"role": "system", "content": system_prompt})
messages.append({"role": "user", "content": prompt})
response = llm.create_chat_completion(
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
)
return response["choices"][0]["message"]["content"].strip()

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

After

Width:  |  Height:  |  Size: 2.3 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 23 KiB

After

Width:  |  Height:  |  Size: 4.7 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.2 KiB

After

Width:  |  Height:  |  Size: 719 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.0 KiB

After

Width:  |  Height:  |  Size: 2.0 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 12 KiB

After

Width:  |  Height:  |  Size: 2.6 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 13 KiB

After

Width:  |  Height:  |  Size: 2.6 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 25 KiB

After

Width:  |  Height:  |  Size: 5.3 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.0 KiB

After

Width:  |  Height:  |  Size: 629 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 28 KiB

After

Width:  |  Height:  |  Size: 5.8 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.3 KiB

After

Width:  |  Height:  |  Size: 922 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.9 KiB

After

Width:  |  Height:  |  Size: 1.4 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 7.4 KiB

After

Width:  |  Height:  |  Size: 1.7 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.9 KiB

After

Width:  |  Height:  |  Size: 968 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 37 KiB

After

Width:  |  Height:  |  Size: 4.2 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 49 KiB

After

Width:  |  Height:  |  Size: 10 KiB

1
src-tauri/icons/icon.svg Normal file
View File

@ -0,0 +1 @@
<svg width="48" height="48" viewBox="0 0 36 36" fill="none" xmlns="http://www.w3.org/2000/svg"><path d="M6 10h12a6 6 0 0 1 0 12H8l-2 4V10Z" stroke="#818cf8" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" opacity="0.7"></path><path d="M6 10h12a6 6 0 0 1 0 12H8l-2 4V10Z" stroke="#6366f1" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"></path><path d="M10 14v4M13 13v6M16 14v4" stroke="#6366f1" stroke-width="1.5" stroke-linecap="round"></path><path d="M22 16h6M22 19h4M22 22h5" stroke="#818cf8" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round" opacity="0.6"></path></svg>

After

Width:  |  Height:  |  Size: 622 B