diff --git a/server/index.ts b/server/index.ts index 93b02f5..e453264 100644 --- a/server/index.ts +++ b/server/index.ts @@ -1556,52 +1556,44 @@ const RUNPOD_API_KEY = process.env.RUNPOD_API_KEY || ""; app.get("/api/blender-gen/health", async (c) => { const issues: string[] = []; const warnings: string[] = []; - const OLLAMA_URL = process.env.OLLAMA_URL || "http://localhost:11434"; - try { - const res = await fetch(`${OLLAMA_URL}/api/tags`, { signal: AbortSignal.timeout(3000) }); - if (!res.ok) issues.push("Ollama not responding"); - } catch { - issues.push("Ollama unreachable"); - } + if (!GEMINI_API_KEY) issues.push("GEMINI_API_KEY not configured"); if (!RUNPOD_API_KEY) warnings.push("RunPod not configured — script-only mode"); return c.json({ available: issues.length === 0, issues, warnings }); }); app.post("/api/blender-gen", async (c) => { + if (!GEMINI_API_KEY) return c.json({ error: "GEMINI_API_KEY not configured" }, 503); + const { prompt } = await c.req.json(); if (!prompt) return c.json({ error: "prompt required" }, 400); - // Step 1: Generate Blender Python script via LLM - const OLLAMA_URL = process.env.OLLAMA_URL || "http://localhost:11434"; + // Step 1: Generate Blender Python script via Gemini Flash let script = ""; try { - const llmRes = await fetch(`${OLLAMA_URL}/api/generate`, { - method: "POST", - headers: { "Content-Type": "application/json" }, - body: JSON.stringify({ - model: process.env.OLLAMA_MODEL || "qwen2.5-coder:7b", - prompt: `Generate a Blender Python script that creates: ${prompt}\n\nThe script should:\n- Import bpy\n- Clear the default scene\n- Create the described objects with materials\n- Set up basic lighting and camera\n- Render to /tmp/render.png at 1024x1024\n\nOnly output the Python code, no explanations.`, - stream: false, - }), - }); + const { GoogleGenerativeAI } = await import("@google/generative-ai"); + const genAI = new GoogleGenerativeAI(GEMINI_API_KEY); + const model = genAI.getGenerativeModel({ model: "gemini-2.0-flash" }); + const result = await model.generateContent(`Generate a Blender Python script that creates: ${prompt} - if (llmRes.ok) { - const llmData = await llmRes.json(); - script = llmData.response || ""; - // Extract code block if wrapped in markdown - const codeMatch = script.match(/```(?:python)?\n([\s\S]*?)```/); - if (codeMatch) script = codeMatch[1].trim(); - } else { - const errText = await llmRes.text().catch(() => ""); - console.error(`[blender-gen] Ollama ${llmRes.status}: ${errText}`); - } +The script should: +- Import bpy +- Clear the default scene (delete all default objects) +- Create the described objects with materials and colors +- Set up basic lighting (sun + area light) and camera positioned to frame the scene +- Render to /tmp/render.png at 1024x1024 + +Output ONLY the Python code, no explanations or comments outside the code.`); + + const text = result.response.text(); + const codeMatch = text.match(/```(?:python)?\n([\s\S]*?)```/); + script = codeMatch ? codeMatch[1].trim() : text.trim(); } catch (e) { - console.error("[blender-gen] LLM error:", e); + console.error("[blender-gen] Gemini error:", e); } if (!script) { - return c.json({ error: "Failed to generate Blender script — is Ollama running?" }, 502); + return c.json({ error: "Failed to generate Blender script" }, 502); } // Step 2: Execute on RunPod (headless Blender) — optional