From 1784bb35ffc5083104d67bee5d82754e8bb53226 Mon Sep 17 00:00:00 2001 From: Jeff Emmett Date: Sun, 8 Feb 2026 13:05:29 +0000 Subject: [PATCH] fix: increase Ollama timeout to 600s for CPU inference Co-Authored-By: Claude Opus 4.6 --- backend/app/services/ai_analysis.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/app/services/ai_analysis.py b/backend/app/services/ai_analysis.py index 5e55dce..e17d4eb 100644 --- a/backend/app/services/ai_analysis.py +++ b/backend/app/services/ai_analysis.py @@ -87,7 +87,7 @@ Identify the {settings.target_clips} best viral clips from this transcript.""" logger.info(f"Sending transcript to Ollama ({settings.ollama_model})...") - async with httpx.AsyncClient(timeout=300.0) as client: + async with httpx.AsyncClient(timeout=600.0) as client: response = await client.post( f"{settings.ollama_url}/api/chat", json={