fix(blender): use correct Ollama model (qwen2.5-coder:7b)
qwen2.5:14b doesn't exist on the server, causing silent 404 from Ollama and 502 to the client. Also added error logging for non-ok Ollama responses. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
parent
1a422f06ac
commit
55b973ebc2
|
|
@ -1594,7 +1594,7 @@ app.post("/api/blender-gen", async (c) => {
|
|||
method: "POST",
|
||||
headers: { "Content-Type": "application/json" },
|
||||
body: JSON.stringify({
|
||||
model: process.env.OLLAMA_MODEL || "qwen2.5:14b",
|
||||
model: process.env.OLLAMA_MODEL || "qwen2.5-coder:7b",
|
||||
prompt: `Generate a Blender Python script that creates: ${prompt}\n\nThe script should:\n- Import bpy\n- Clear the default scene\n- Create the described objects with materials\n- Set up basic lighting and camera\n- Render to /tmp/render.png at 1024x1024\n\nOnly output the Python code, no explanations.`,
|
||||
stream: false,
|
||||
}),
|
||||
|
|
@ -1606,6 +1606,9 @@ app.post("/api/blender-gen", async (c) => {
|
|||
// Extract code block if wrapped in markdown
|
||||
const codeMatch = script.match(/```(?:python)?\n([\s\S]*?)```/);
|
||||
if (codeMatch) script = codeMatch[1].trim();
|
||||
} else {
|
||||
const errText = await llmRes.text().catch(() => "");
|
||||
console.error(`[blender-gen] Ollama ${llmRes.status}: ${errText}`);
|
||||
}
|
||||
} catch (e) {
|
||||
console.error("[blender-gen] LLM error:", e);
|
||||
|
|
|
|||
Loading…
Reference in New Issue