fix: strip markdown code fences from LLM JSON response
Gemini wraps JSON output in ```json fences which broke the parser. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
parent
362fe1e860
commit
6c40f713a4
|
|
@ -259,6 +259,9 @@ async def _call_openai(system: str, user_prompt: str) -> str:
|
|||
|
||||
def _parse_clips(content: str, video_duration: float) -> list[dict]:
|
||||
"""Parse LLM response into clip list, handling imperfect JSON."""
|
||||
# Strip markdown code fences (e.g. ```json ... ```)
|
||||
content = re.sub(r"```(?:json)?\s*", "", content).strip()
|
||||
|
||||
# Try to extract JSON from response
|
||||
json_match = re.search(r"\{[\s\S]*\}", content)
|
||||
if not json_match:
|
||||
|
|
|
|||
Loading…
Reference in New Issue