From d480c635ff01d8e00c148ff57af473a3b21defb2 Mon Sep 17 00:00:00 2001 From: Jeff Emmett Date: Mon, 9 Feb 2026 21:27:36 +0000 Subject: [PATCH] Increase Ollama timeout to 1800s for long video transcripts 47-minute videos produce ~48K chars of transcript which takes >10 minutes for llama3.1:8b on CPU to process. Co-Authored-By: Claude Opus 4.6 --- backend/app/services/ai_analysis.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/app/services/ai_analysis.py b/backend/app/services/ai_analysis.py index e17d4eb..c6e16b5 100644 --- a/backend/app/services/ai_analysis.py +++ b/backend/app/services/ai_analysis.py @@ -87,7 +87,7 @@ Identify the {settings.target_clips} best viral clips from this transcript.""" logger.info(f"Sending transcript to Ollama ({settings.ollama_model})...") - async with httpx.AsyncClient(timeout=600.0) as client: + async with httpx.AsyncClient(timeout=1800.0) as client: response = await client.post( f"{settings.ollama_url}/api/chat", json={