got cpu based backend working; trying python/gpu solution bc faster probs

This commit is contained in:
2026-03-26 00:58:57 -06:00
parent 00ee076baa
commit 164b2f87d4
11 changed files with 688 additions and 23 deletions

View File

@ -1,5 +1,7 @@
// --- Commands ---
mod transcription;
/// Returns the backend URL. Stubbed for now; will be replaced once the
/// Python/Rust backend is fully wired up.
#[tauri::command]
@ -34,6 +36,26 @@ fn decrypt_string(encrypted: String) -> Result<String, String> {
.and_then(|b| String::from_utf8(b).map_err(|e| format!("utf8 error: {e}")))
}
/// Ensure a Whisper model is downloaded, downloading it if not present.
#[tauri::command]
async fn ensure_model(model_name: String) -> Result<String, String> {
tauri::async_runtime::spawn_blocking(move || {
transcription::ensure_model_downloaded(&model_name)
})
.await
.map_err(|e| format!("Task error: {:?}", e))?
}
/// Transcribe audio file using Whisper.cpp (runs on a background thread)
#[tauri::command]
async fn transcribe_audio(file_path: String, model_name: String, language: Option<String>) -> Result<transcription::TranscriptionResult, String> {
tauri::async_runtime::spawn_blocking(move || {
transcription::transcribe_audio(&file_path, &model_name, language.as_deref())
})
.await
.map_err(|e| format!("Task error: {:?}", e))?
}
// --- App entry point ---
#[cfg_attr(mobile, tauri::mobile_entry_point)]
@ -55,6 +77,8 @@ pub fn run() {
get_backend_url,
encrypt_string,
decrypt_string,
ensure_model,
transcribe_audio,
])
.run(tauri::generate_context!())
.expect("error while running tauri application");