fix: increase Ollama timeout to 600s for CPU inference

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Jeff Emmett 2026-02-08 13:05:29 +00:00
parent 4619b53b5e
commit 1784bb35ff
1 changed files with 1 additions and 1 deletions

View File

@ -87,7 +87,7 @@ Identify the {settings.target_clips} best viral clips from this transcript."""
logger.info(f"Sending transcript to Ollama ({settings.ollama_model})...") logger.info(f"Sending transcript to Ollama ({settings.ollama_model})...")
async with httpx.AsyncClient(timeout=300.0) as client: async with httpx.AsyncClient(timeout=600.0) as client:
response = await client.post( response = await client.post(
f"{settings.ollama_url}/api/chat", f"{settings.ollama_url}/api/chat",
json={ json={