Compare commits

...

2 Commits

Author SHA1 Message Date
Jeff Emmett 19b5356f3d feat: add server-side AI service proxies for fal.ai and RunPod
Add proxy endpoints to Cloudflare Worker for AI services, keeping
API credentials server-side for better security architecture.

Changes:
- Add fal.ai proxy endpoints (/api/fal/*) for image generation
- Add RunPod proxy endpoints (/api/runpod/*) for image, video, text, whisper
- Update client code to use proxy pattern:
  - useLiveImage.tsx (fal.ai live image generation)
  - VideoGenShapeUtil.tsx (video generation)
  - ImageGenShapeUtil.tsx (image generation)
  - runpodApi.ts (whisper transcription)
  - llmUtils.ts (LLM text generation)
- Add Environment types for AI service configuration
- Improve Automerge migration: compare shape counts between formats
  to prevent data loss during format conversion

To deploy, set secrets:
  wrangler secret put FAL_API_KEY
  wrangler secret put RUNPOD_API_KEY

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2025-12-25 20:26:04 -05:00
Jeff Emmett 1df612660d Update task task-027 2025-12-25 18:59:45 -05:00
11 changed files with 625 additions and 298 deletions

View File

@ -4,7 +4,7 @@ title: Implement proper Automerge CRDT sync for offline-first support
status: In Progress status: In Progress
assignee: [] assignee: []
created_date: '2025-12-04 21:06' created_date: '2025-12-04 21:06'
updated_date: '2025-12-25 23:38' updated_date: '2025-12-25 23:59'
labels: labels:
- offline-sync - offline-sync
- crdt - crdt
@ -110,4 +110,10 @@ The Automerge Repo requires proper peer discovery. The adapter emits `peer-candi
1. Add debug logging to adapter.send() to verify Repo calls 1. Add debug logging to adapter.send() to verify Repo calls
2. Check sync states between local peer and server 2. Check sync states between local peer and server
3. May need to manually trigger sync or fix Repo configuration 3. May need to manually trigger sync or fix Repo configuration
Dec 25: Added debug logging and peer-candidate re-emission fix to CloudflareAdapter.ts
Key fix: Re-emit peer-candidate after documentId is set to trigger Repo sync (timing issue)
Committed and pushed to dev branch - needs testing to verify binary sync is now working
<!-- SECTION:NOTES:END --> <!-- SECTION:NOTES:END -->

View File

@ -2,12 +2,14 @@
* useLiveImage Hook * useLiveImage Hook
* Captures drawings within a frame shape and sends them to Fal.ai for AI enhancement * Captures drawings within a frame shape and sends them to Fal.ai for AI enhancement
* Based on draw-fast implementation, adapted for canvas-website with Automerge sync * Based on draw-fast implementation, adapted for canvas-website with Automerge sync
*
* SECURITY: All fal.ai API calls go through the Cloudflare Worker proxy
* API keys are stored server-side, never exposed to the browser
*/ */
import React, { createContext, useContext, useEffect, useRef, useCallback, useState } from 'react' import React, { createContext, useContext, useEffect, useRef, useCallback, useState } from 'react'
import { Editor, TLShapeId, Box, exportToBlob } from 'tldraw' import { Editor, TLShapeId, Box, exportToBlob } from 'tldraw'
import { fal } from '@fal-ai/client' import { getFalProxyConfig } from '@/lib/clientConfig'
import { getFalConfig } from '@/lib/clientConfig'
// Fal.ai model endpoints // Fal.ai model endpoints
const FAL_MODEL_LCM = 'fal-ai/lcm-sd15-i2i' // Fast, real-time (~150ms) const FAL_MODEL_LCM = 'fal-ai/lcm-sd15-i2i' // Fast, real-time (~150ms)
@ -15,7 +17,7 @@ const FAL_MODEL_FLUX_CANNY = 'fal-ai/flux-control-lora-canny/image-to-image' //
interface LiveImageContextValue { interface LiveImageContextValue {
isConnected: boolean isConnected: boolean
apiKey: string | null // Note: apiKey is no longer exposed to the browser
setApiKey: (key: string) => void setApiKey: (key: string) => void
} }
@ -23,53 +25,31 @@ const LiveImageContext = createContext<LiveImageContextValue | null>(null)
interface LiveImageProviderProps { interface LiveImageProviderProps {
children: React.ReactNode children: React.ReactNode
apiKey?: string apiKey?: string // Deprecated - API keys are now server-side
} }
/** /**
* Provider component that manages Fal.ai connection * Provider component that manages Fal.ai connection
* API keys are now stored server-side and proxied through Cloudflare Worker
*/ */
export function LiveImageProvider({ children, apiKey: initialApiKey }: LiveImageProviderProps) { export function LiveImageProvider({ children }: LiveImageProviderProps) {
// Get default FAL key from clientConfig (includes the hardcoded default) // Fal.ai is always "connected" via the proxy - actual auth happens server-side
const falConfig = getFalConfig() const [isConnected, setIsConnected] = useState(true)
const defaultApiKey = falConfig?.apiKey || null
const [apiKey, setApiKeyState] = useState<string | null>( // Log that we're using the proxy
initialApiKey || import.meta.env.VITE_FAL_API_KEY || defaultApiKey
)
const [isConnected, setIsConnected] = useState(false)
// Configure Fal.ai client when API key is available
useEffect(() => { useEffect(() => {
if (apiKey) { const { proxyUrl } = getFalProxyConfig()
fal.config({ credentials: apiKey }) console.log('LiveImage: Using fal.ai proxy at', proxyUrl || '(same origin)')
setIsConnected(true)
} else {
setIsConnected(false)
}
}, [apiKey])
const setApiKey = useCallback((key: string) => {
setApiKeyState(key)
// Also save to localStorage for persistence
localStorage.setItem('fal_api_key', key)
}, []) }, [])
// Try to load API key from localStorage on mount (but only if no default key) // setApiKey is now a no-op since keys are server-side
useEffect(() => { // Kept for backward compatibility with any code that tries to set a key
if (!apiKey) { const setApiKey = useCallback((_key: string) => {
const storedKey = localStorage.getItem('fal_api_key') console.warn('LiveImage: setApiKey is deprecated. API keys are now stored server-side.')
if (storedKey) { }, [])
setApiKeyState(storedKey)
} else if (defaultApiKey) {
// Use default key from config
setApiKeyState(defaultApiKey)
}
}
}, [defaultApiKey])
return ( return (
<LiveImageContext.Provider value={{ isConnected, apiKey, setApiKey }}> <LiveImageContext.Provider value={{ isConnected, setApiKey }}>
{children} {children}
</LiveImageContext.Provider> </LiveImageContext.Provider>
) )
@ -177,7 +157,7 @@ export function useLiveImage({
} }
}, [editor, getChildShapes]) }, [editor, getChildShapes])
// Generate AI image from the sketch // Generate AI image from the sketch via proxy
const generateImage = useCallback(async () => { const generateImage = useCallback(async () => {
if (!context?.isConnected || !enabled) { if (!context?.isConnected || !enabled) {
return return
@ -206,9 +186,13 @@ export function useLiveImage({
? `${prompt}, hd, award-winning, impressive, detailed` ? `${prompt}, hd, award-winning, impressive, detailed`
: 'hd, award-winning, impressive, detailed illustration' : 'hd, award-winning, impressive, detailed illustration'
// Use the proxy endpoint instead of calling fal.ai directly
const { proxyUrl } = getFalProxyConfig()
const result = await fal.subscribe(modelEndpoint, { const response = await fetch(`${proxyUrl}/subscribe/${modelEndpoint}`, {
input: { method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
prompt: fullPrompt, prompt: fullPrompt,
image_url: imageDataUrl, image_url: imageDataUrl,
strength: strength, strength: strength,
@ -217,11 +201,20 @@ export function useLiveImage({
num_inference_steps: model === 'lcm' ? 4 : 20, num_inference_steps: model === 'lcm' ? 4 : 20,
guidance_scale: model === 'lcm' ? 1 : 7.5, guidance_scale: model === 'lcm' ? 1 : 7.5,
enable_safety_checks: false, enable_safety_checks: false,
}, })
pollInterval: 1000,
logs: true,
}) })
if (!response.ok) {
const errorData = await response.json().catch(() => ({ error: response.statusText })) as { error?: string }
throw new Error(errorData.error || `Proxy error: ${response.status}`)
}
const data = await response.json() as {
images?: Array<{ url?: string } | string>
image?: { url?: string } | string
output?: { url?: string } | string
}
// Check if this result is still relevant // Check if this result is still relevant
if (currentVersion !== requestVersionRef.current) { if (currentVersion !== requestVersionRef.current) {
return return
@ -230,15 +223,13 @@ export function useLiveImage({
// Extract image URL from result // Extract image URL from result
let imageUrl: string | null = null let imageUrl: string | null = null
if (result.data) { if (data.images && Array.isArray(data.images) && data.images.length > 0) {
const data = result.data as any const firstImage = data.images[0]
if (data.images && Array.isArray(data.images) && data.images.length > 0) { imageUrl = typeof firstImage === 'string' ? firstImage : firstImage?.url || null
imageUrl = data.images[0].url || data.images[0] } else if (data.image) {
} else if (data.image) { imageUrl = typeof data.image === 'string' ? data.image : data.image?.url || null
imageUrl = data.image.url || data.image } else if (data.output) {
} else if (data.output) { imageUrl = typeof data.output === 'string' ? data.output : data.output?.url || null
imageUrl = typeof data.output === 'string' ? data.output : data.output.url
}
} }
if (imageUrl) { if (imageUrl) {

View File

@ -99,118 +99,114 @@ export function getClientConfig(): ClientConfig {
} }
} }
// Default fal.ai API key - shared for all users // ============================================================================
const DEFAULT_FAL_API_KEY = 'a4125de3-283b-4a2b-a2ef-eeac8eb25d92:45f0c80070ff0fe3ed1d43a82a332442' // IMPORTANT: API keys are now stored server-side only!
// All AI service calls go through the Cloudflare Worker proxy at /api/fal/* and /api/runpod/*
// This prevents exposing API keys in the browser
// ============================================================================
// Default RunPod API key - shared across all endpoints /**
// This allows all users to access AI features without their own API keys * Get the worker API URL for proxied requests
const DEFAULT_RUNPOD_API_KEY = 'rpa_YYOARL5MEBTTKKWGABRKTW2CVHQYRBTOBZNSGIL3lwwfdz' * In production, this will be the same origin as the app
* In development, we need to use the worker's dev port
*/
export function getWorkerApiUrl(): string {
// Check for explicit worker URL override (useful for development)
const workerUrl = import.meta.env.VITE_WORKER_URL
if (workerUrl) {
return workerUrl
}
// Default RunPod endpoint IDs (from CLAUDE.md) // In production, use same origin (worker is served from same domain)
const DEFAULT_RUNPOD_IMAGE_ENDPOINT_ID = 'tzf1j3sc3zufsy' // Automatic1111 for image generation if (typeof window !== 'undefined' && window.location.hostname !== 'localhost') {
const DEFAULT_RUNPOD_VIDEO_ENDPOINT_ID = '4jql4l7l0yw0f3' // Wan2.2 for video generation return '' // Empty string = same origin
const DEFAULT_RUNPOD_TEXT_ENDPOINT_ID = '03g5hz3hlo8gr2' // vLLM for text generation }
const DEFAULT_RUNPOD_WHISPER_ENDPOINT_ID = 'lrtisuv8ixbtub' // Whisper for transcription
// In development, use the worker dev server
// Default to port 5172 as configured in wrangler.toml
return 'http://localhost:5172'
}
/**
* Get RunPod proxy configuration
* All RunPod calls now go through the Cloudflare Worker proxy
* API keys are stored server-side, never exposed to the browser
*/
export function getRunPodProxyConfig(type: 'image' | 'video' | 'text' | 'whisper' = 'image'): {
proxyUrl: string
endpointType: string
} {
const workerUrl = getWorkerApiUrl()
return {
proxyUrl: `${workerUrl}/api/runpod/${type}`,
endpointType: type
}
}
/** /**
* Get RunPod configuration for API calls (defaults to image endpoint) * Get RunPod configuration for API calls (defaults to image endpoint)
* Falls back to pre-configured endpoints if not set via environment * @deprecated Use getRunPodProxyConfig() instead - API keys are now server-side
*/ */
export function getRunPodConfig(): { apiKey: string; endpointId: string } | null { export function getRunPodConfig(): { proxyUrl: string } {
const config = getClientConfig() return { proxyUrl: `${getWorkerApiUrl()}/api/runpod/image` }
const apiKey = config.runpodApiKey || DEFAULT_RUNPOD_API_KEY
const endpointId = config.runpodEndpointId || config.runpodImageEndpointId || DEFAULT_RUNPOD_IMAGE_ENDPOINT_ID
return {
apiKey: apiKey,
endpointId: endpointId
}
} }
/** /**
* Get RunPod configuration for image generation * Get RunPod configuration for image generation
* Falls back to pre-configured Automatic1111 endpoint * @deprecated Use getRunPodProxyConfig('image') instead
*/ */
export function getRunPodImageConfig(): { apiKey: string; endpointId: string } | null { export function getRunPodImageConfig(): { proxyUrl: string } {
const config = getClientConfig() return getRunPodProxyConfig('image')
const apiKey = config.runpodApiKey || DEFAULT_RUNPOD_API_KEY
const endpointId = config.runpodImageEndpointId || config.runpodEndpointId || DEFAULT_RUNPOD_IMAGE_ENDPOINT_ID
return {
apiKey: apiKey,
endpointId: endpointId
}
} }
/** /**
* Get RunPod configuration for video generation * Get RunPod configuration for video generation
* Falls back to pre-configured Wan2.2 endpoint * @deprecated Use getRunPodProxyConfig('video') instead
*/ */
export function getRunPodVideoConfig(): { apiKey: string; endpointId: string } | null { export function getRunPodVideoConfig(): { proxyUrl: string } {
const config = getClientConfig() return getRunPodProxyConfig('video')
const apiKey = config.runpodApiKey || DEFAULT_RUNPOD_API_KEY
const endpointId = config.runpodVideoEndpointId || DEFAULT_RUNPOD_VIDEO_ENDPOINT_ID
return {
apiKey: apiKey,
endpointId: endpointId
}
} }
/** /**
* Get RunPod configuration for text generation (vLLM) * Get RunPod configuration for text generation (vLLM)
* Falls back to pre-configured vLLM endpoint * @deprecated Use getRunPodProxyConfig('text') instead
*/ */
export function getRunPodTextConfig(): { apiKey: string; endpointId: string } | null { export function getRunPodTextConfig(): { proxyUrl: string } {
const config = getClientConfig() return getRunPodProxyConfig('text')
const apiKey = config.runpodApiKey || DEFAULT_RUNPOD_API_KEY
const endpointId = config.runpodTextEndpointId || DEFAULT_RUNPOD_TEXT_ENDPOINT_ID
return {
apiKey: apiKey,
endpointId: endpointId
}
} }
/** /**
* Get RunPod configuration for Whisper transcription * Get RunPod configuration for Whisper transcription
* Falls back to pre-configured Whisper endpoint * @deprecated Use getRunPodProxyConfig('whisper') instead
*/ */
export function getRunPodWhisperConfig(): { apiKey: string; endpointId: string } | null { export function getRunPodWhisperConfig(): { proxyUrl: string } {
const config = getClientConfig() return getRunPodProxyConfig('whisper')
}
const apiKey = config.runpodApiKey || DEFAULT_RUNPOD_API_KEY /**
const endpointId = config.runpodWhisperEndpointId || DEFAULT_RUNPOD_WHISPER_ENDPOINT_ID * Get fal.ai proxy configuration
* All fal.ai calls now go through the Cloudflare Worker proxy
return { * API keys are stored server-side, never exposed to the browser
apiKey: apiKey, */
endpointId: endpointId export function getFalProxyConfig(): { proxyUrl: string } {
} const workerUrl = getWorkerApiUrl()
return { proxyUrl: `${workerUrl}/api/fal` }
} }
/** /**
* Get fal.ai configuration for image and video generation * Get fal.ai configuration for image and video generation
* Falls back to pre-configured API key if not set * @deprecated API keys are now server-side. Use getFalProxyConfig() for proxy URL.
*/ */
export function getFalConfig(): { apiKey: string } | null { export function getFalConfig(): { proxyUrl: string } {
const config = getClientConfig() return getFalProxyConfig()
const apiKey = config.falApiKey || DEFAULT_FAL_API_KEY
return {
apiKey: apiKey
}
} }
/** /**
* Check if fal.ai integration is configured * Check if fal.ai integration is configured
* Now always returns true since the proxy handles configuration
*/ */
export function isFalConfigured(): boolean { export function isFalConfigured(): boolean {
const config = getClientConfig() return true // Proxy is always available, server-side config determines availability
return !!(config.falApiKey || DEFAULT_FAL_API_KEY)
} }
/** /**
@ -231,10 +227,10 @@ export function getOllamaConfig(): { url: string } | null {
/** /**
* Check if RunPod integration is configured * Check if RunPod integration is configured
* Now always returns true since the proxy handles configuration
*/ */
export function isRunPodConfigured(): boolean { export function isRunPodConfigured(): boolean {
const config = getClientConfig() return true // Proxy is always available, server-side config determines availability
return !!(config.runpodApiKey && config.runpodEndpointId)
} }
/** /**

View File

@ -1,9 +1,12 @@
/** /**
* RunPod API utility functions * RunPod API utility functions
* Handles communication with RunPod WhisperX endpoints * Handles communication with RunPod WhisperX endpoints
*
* SECURITY: All RunPod calls go through the Cloudflare Worker proxy
* API keys are stored server-side, never exposed to the browser
*/ */
import { getRunPodConfig } from './clientConfig' import { getRunPodProxyConfig } from './clientConfig'
export interface RunPodTranscriptionResponse { export interface RunPodTranscriptionResponse {
id?: string id?: string
@ -40,18 +43,14 @@ export async function blobToBase64(blob: Blob): Promise<string> {
} }
/** /**
* Send transcription request to RunPod endpoint * Send transcription request to RunPod endpoint via proxy
* Handles both synchronous and asynchronous job patterns * Handles both synchronous and asynchronous job patterns
*/ */
export async function transcribeWithRunPod( export async function transcribeWithRunPod(
audioBlob: Blob, audioBlob: Blob,
language?: string language?: string
): Promise<string> { ): Promise<string> {
const config = getRunPodConfig() const { proxyUrl } = getRunPodProxyConfig('whisper')
if (!config) {
throw new Error('RunPod API key or endpoint ID not configured. Please set VITE_RUNPOD_API_KEY and VITE_RUNPOD_ENDPOINT_ID environment variables.')
}
// Check audio blob size (limit to ~10MB to prevent issues) // Check audio blob size (limit to ~10MB to prevent issues)
const maxSize = 10 * 1024 * 1024 // 10MB const maxSize = 10 * 1024 * 1024 // 10MB
@ -65,7 +64,8 @@ export async function transcribeWithRunPod(
// Detect audio format from blob type // Detect audio format from blob type
const audioFormat = audioBlob.type || 'audio/wav' const audioFormat = audioBlob.type || 'audio/wav'
const url = `https://api.runpod.ai/v2/${config.endpointId}/run` // Use proxy endpoint - API key and endpoint ID are handled server-side
const url = `${proxyUrl}/run`
// Prepare the request payload // Prepare the request payload
// WhisperX typically expects audio as base64 or file URL // WhisperX typically expects audio as base64 or file URL
@ -89,8 +89,8 @@ export async function transcribeWithRunPod(
const response = await fetch(url, { const response = await fetch(url, {
method: 'POST', method: 'POST',
headers: { headers: {
'Content-Type': 'application/json', 'Content-Type': 'application/json'
'Authorization': `Bearer ${config.apiKey}` // Authorization is handled by the proxy server-side
}, },
body: JSON.stringify(requestBody), body: JSON.stringify(requestBody),
signal: controller.signal signal: controller.signal
@ -99,13 +99,13 @@ export async function transcribeWithRunPod(
clearTimeout(timeoutId) clearTimeout(timeoutId)
if (!response.ok) { if (!response.ok) {
const errorText = await response.text() const errorData = await response.json().catch(() => ({ error: response.statusText })) as { error?: string; details?: string }
console.error('RunPod API error response:', { console.error('RunPod API error response:', {
status: response.status, status: response.status,
statusText: response.statusText, statusText: response.statusText,
body: errorText error: errorData
}) })
throw new Error(`RunPod API error: ${response.status} - ${errorText}`) throw new Error(`RunPod API error: ${response.status} - ${errorData.error || errorData.details || 'Unknown error'}`)
} }
const data: RunPodTranscriptionResponse = await response.json() const data: RunPodTranscriptionResponse = await response.json()
@ -113,7 +113,7 @@ export async function transcribeWithRunPod(
// Handle async job pattern (RunPod often returns job IDs) // Handle async job pattern (RunPod often returns job IDs)
if (data.id && (data.status === 'IN_QUEUE' || data.status === 'IN_PROGRESS')) { if (data.id && (data.status === 'IN_QUEUE' || data.status === 'IN_PROGRESS')) {
return await pollRunPodJob(data.id, config.apiKey, config.endpointId) return await pollRunPodJob(data.id, proxyUrl)
} }
// Handle direct response // Handle direct response
@ -134,8 +134,8 @@ export async function transcribeWithRunPod(
// Check if response has unexpected structure // Check if response has unexpected structure
console.warn('Unexpected RunPod response structure:', data) console.warn('Unexpected RunPod response structure:', data)
throw new Error('No transcription text found in RunPod response. Check endpoint response format.') throw new Error('No transcription text found in RunPod response. Check endpoint response format.')
} catch (error: any) { } catch (error: unknown) {
if (error.name === 'AbortError') { if (error instanceof Error && error.name === 'AbortError') {
throw new Error('RunPod request timed out after 30 seconds') throw new Error('RunPod request timed out after 30 seconds')
} }
console.error('RunPod transcription error:', error) console.error('RunPod transcription error:', error)
@ -144,16 +144,16 @@ export async function transcribeWithRunPod(
} }
/** /**
* Poll RunPod job status until completion * Poll RunPod job status until completion via proxy
*/ */
async function pollRunPodJob( async function pollRunPodJob(
jobId: string, jobId: string,
apiKey: string, proxyUrl: string,
endpointId: string,
maxAttempts: number = 120, // Increased to 120 attempts (2 minutes at 1s intervals) maxAttempts: number = 120, // Increased to 120 attempts (2 minutes at 1s intervals)
pollInterval: number = 1000 pollInterval: number = 1000
): Promise<string> { ): Promise<string> {
const statusUrl = `https://api.runpod.ai/v2/${endpointId}/status/${jobId}` // Use proxy endpoint for status checks
const statusUrl = `${proxyUrl}/status/${jobId}`
for (let attempt = 0; attempt < maxAttempts; attempt++) { for (let attempt = 0; attempt < maxAttempts; attempt++) {
@ -164,20 +164,18 @@ async function pollRunPodJob(
const response = await fetch(statusUrl, { const response = await fetch(statusUrl, {
method: 'GET', method: 'GET',
headers: { // Authorization is handled by the proxy server-side
'Authorization': `Bearer ${apiKey}`
},
signal: controller.signal signal: controller.signal
}) })
clearTimeout(timeoutId) clearTimeout(timeoutId)
if (!response.ok) { if (!response.ok) {
const errorText = await response.text() const errorData = await response.json().catch(() => ({ error: response.statusText })) as { error?: string; details?: string }
console.error(`Job status check failed (attempt ${attempt + 1}/${maxAttempts}):`, { console.error(`Job status check failed (attempt ${attempt + 1}/${maxAttempts}):`, {
status: response.status, status: response.status,
statusText: response.statusText, statusText: response.statusText,
body: errorText error: errorData
}) })
// Don't fail immediately on 404 - job might still be processing // Don't fail immediately on 404 - job might still be processing
@ -186,7 +184,7 @@ async function pollRunPodJob(
continue continue
} }
throw new Error(`Failed to check job status: ${response.status} - ${errorText}`) throw new Error(`Failed to check job status: ${response.status} - ${errorData.error || errorData.details || 'Unknown error'}`)
} }
const data: RunPodTranscriptionResponse = await response.json() const data: RunPodTranscriptionResponse = await response.json()
@ -216,8 +214,8 @@ async function pollRunPodJob(
if (attempt % 10 === 0) { if (attempt % 10 === 0) {
} }
await new Promise(resolve => setTimeout(resolve, pollInterval)) await new Promise(resolve => setTimeout(resolve, pollInterval))
} catch (error: any) { } catch (error: unknown) {
if (error.name === 'AbortError') { if (error instanceof Error && error.name === 'AbortError') {
console.warn(`Status check timed out (attempt ${attempt + 1}/${maxAttempts})`) console.warn(`Status check timed out (attempt ${attempt + 1}/${maxAttempts})`)
if (attempt < maxAttempts - 1) { if (attempt < maxAttempts - 1) {
await new Promise(resolve => setTimeout(resolve, pollInterval)) await new Promise(resolve => setTimeout(resolve, pollInterval))
@ -236,4 +234,3 @@ async function pollRunPodJob(
throw new Error(`Job polling timeout after ${maxAttempts} attempts (${(maxAttempts * pollInterval / 1000).toFixed(0)} seconds)`) throw new Error(`Job polling timeout after ${maxAttempts} attempts (${(maxAttempts * pollInterval / 1000).toFixed(0)} seconds)`)
} }

View File

@ -6,7 +6,7 @@ import {
TLBaseShape, TLBaseShape,
} from "tldraw" } from "tldraw"
import React, { useState } from "react" import React, { useState } from "react"
import { getRunPodConfig } from "@/lib/clientConfig" import { getRunPodProxyConfig } from "@/lib/clientConfig"
import { aiOrchestrator, isAIOrchestratorAvailable } from "@/lib/aiOrchestrator" import { aiOrchestrator, isAIOrchestratorAvailable } from "@/lib/aiOrchestrator"
import { StandardizedToolWrapper } from "@/components/StandardizedToolWrapper" import { StandardizedToolWrapper } from "@/components/StandardizedToolWrapper"
import { usePinnedToView } from "@/hooks/usePinnedToView" import { usePinnedToView } from "@/hooks/usePinnedToView"
@ -341,10 +341,8 @@ export class ImageGenShape extends BaseBoxShapeUtil<IImageGen> {
}) })
try { try {
// Get RunPod configuration // Get RunPod proxy configuration - API keys are now server-side
const runpodConfig = getRunPodConfig() const { proxyUrl } = getRunPodProxyConfig('image')
const endpointId = shape.props.endpointId || runpodConfig?.endpointId || "tzf1j3sc3zufsy"
const apiKey = runpodConfig?.apiKey
// Mock API mode: Return placeholder image without calling RunPod // Mock API mode: Return placeholder image without calling RunPod
if (USE_MOCK_API) { if (USE_MOCK_API) {
@ -382,20 +380,18 @@ export class ImageGenShape extends BaseBoxShapeUtil<IImageGen> {
return return
} }
// Real API mode: Use RunPod // Real API mode: Use RunPod via proxy
if (!apiKey) { // API key and endpoint ID are handled server-side
throw new Error("RunPod API key not configured. Please set VITE_RUNPOD_API_KEY environment variable.")
}
// Use runsync for synchronous execution - returns output directly without polling // Use runsync for synchronous execution - returns output directly without polling
const url = `https://api.runpod.ai/v2/${endpointId}/runsync` const url = `${proxyUrl}/runsync`
const response = await fetch(url, { const response = await fetch(url, {
method: "POST", method: "POST",
headers: { headers: {
"Content-Type": "application/json", "Content-Type": "application/json"
"Authorization": `Bearer ${apiKey}` // Authorization is handled by the proxy server-side
}, },
body: JSON.stringify({ body: JSON.stringify({
input: { input: {

View File

@ -6,7 +6,7 @@ import {
TLBaseShape, TLBaseShape,
} from "tldraw" } from "tldraw"
import React, { useState, useRef, useEffect } from "react" import React, { useState, useRef, useEffect } from "react"
import { getFalConfig } from "@/lib/clientConfig" import { getFalProxyConfig } from "@/lib/clientConfig"
import { StandardizedToolWrapper } from "@/components/StandardizedToolWrapper" import { StandardizedToolWrapper } from "@/components/StandardizedToolWrapper"
import { usePinnedToView } from "@/hooks/usePinnedToView" import { usePinnedToView } from "@/hooks/usePinnedToView"
import { useMaximize } from "@/hooks/useMaximize" import { useMaximize } from "@/hooks/useMaximize"
@ -166,16 +166,10 @@ export class VideoGenShape extends BaseBoxShapeUtil<IVideoGen> {
} }
} }
// Check fal.ai config // Get fal.ai proxy config
const falConfig = getFalConfig() const { proxyUrl } = getFalProxyConfig()
if (!falConfig) {
setError("fal.ai not configured. Please set VITE_FAL_API_KEY in your .env file.")
return
}
const currentMode = (imageUrl.trim() || imageBase64) ? 'i2v' : 't2v' const currentMode = (imageUrl.trim() || imageBase64) ? 'i2v' : 't2v'
if (currentMode === 'i2v') {
}
// Clear any existing video and set loading state // Clear any existing video and set loading state
setIsGenerating(true) setIsGenerating(true)
@ -198,14 +192,10 @@ export class VideoGenShape extends BaseBoxShapeUtil<IVideoGen> {
} }
try { try {
const { apiKey } = falConfig
// Choose fal.ai endpoint based on mode // Choose fal.ai endpoint based on mode
// WAN 2.1 models: fast startup, good quality // WAN 2.1 models: fast startup, good quality
const endpoint = currentMode === 'i2v' ? 'fal-ai/wan-i2v' : 'fal-ai/wan-t2v' const endpoint = currentMode === 'i2v' ? 'fal-ai/wan-i2v' : 'fal-ai/wan-t2v'
const submitUrl = `https://queue.fal.run/${endpoint}`
// Build input payload for fal.ai // Build input payload for fal.ai
const inputPayload: Record<string, any> = { const inputPayload: Record<string, any> = {
prompt: prompt, prompt: prompt,
@ -226,19 +216,16 @@ export class VideoGenShape extends BaseBoxShapeUtil<IVideoGen> {
} }
} }
// Submit to fal.ai queue // Submit to fal.ai queue via proxy
const response = await fetch(submitUrl, { const response = await fetch(`${proxyUrl}/queue/${endpoint}`, {
method: 'POST', method: 'POST',
headers: { headers: { 'Content-Type': 'application/json' },
'Authorization': `Key ${apiKey}`,
'Content-Type': 'application/json'
},
body: JSON.stringify(inputPayload) body: JSON.stringify(inputPayload)
}) })
if (!response.ok) { if (!response.ok) {
const errorText = await response.text() const errorData = await response.json().catch(() => ({ error: response.statusText })) as { error?: string; details?: string }
throw new Error(`fal.ai API error: ${response.status} - ${errorText}`) throw new Error(`fal.ai API error: ${response.status} - ${errorData.error || errorData.details || 'Unknown error'}`)
} }
const jobData = await response.json() as FalQueueResponse const jobData = await response.json() as FalQueueResponse
@ -247,10 +234,9 @@ export class VideoGenShape extends BaseBoxShapeUtil<IVideoGen> {
throw new Error('No request_id returned from fal.ai') throw new Error('No request_id returned from fal.ai')
} }
// Poll for completion // Poll for completion via proxy
// fal.ai is generally faster than RunPod due to warm instances // fal.ai is generally faster than RunPod due to warm instances
// Typical times: 30-90 seconds for video generation // Typical times: 30-90 seconds for video generation
const statusUrl = `https://queue.fal.run/${endpoint}/requests/${jobData.request_id}/status`
let attempts = 0 let attempts = 0
const maxAttempts = 120 // 4 minutes with 2s intervals const maxAttempts = 120 // 4 minutes with 2s intervals
@ -258,9 +244,7 @@ export class VideoGenShape extends BaseBoxShapeUtil<IVideoGen> {
await new Promise(resolve => setTimeout(resolve, 2000)) await new Promise(resolve => setTimeout(resolve, 2000))
attempts++ attempts++
const statusResponse = await fetch(statusUrl, { const statusResponse = await fetch(`${proxyUrl}/queue/${endpoint}/status/${jobData.request_id}`)
headers: { 'Authorization': `Key ${apiKey}` }
})
if (!statusResponse.ok) { if (!statusResponse.ok) {
console.warn(`🎬 VideoGen: Poll error (attempt ${attempts}):`, statusResponse.status) console.warn(`🎬 VideoGen: Poll error (attempt ${attempts}):`, statusResponse.status)
@ -270,11 +254,8 @@ export class VideoGenShape extends BaseBoxShapeUtil<IVideoGen> {
const statusData = await statusResponse.json() as FalQueueResponse const statusData = await statusResponse.json() as FalQueueResponse
if (statusData.status === 'COMPLETED') { if (statusData.status === 'COMPLETED') {
// Fetch the result // Fetch the result via proxy
const resultUrl = `https://queue.fal.run/${endpoint}/requests/${jobData.request_id}` const resultResponse = await fetch(`${proxyUrl}/queue/${endpoint}/result/${jobData.request_id}`)
const resultResponse = await fetch(resultUrl, {
headers: { 'Authorization': `Key ${apiKey}` }
})
if (!resultResponse.ok) { if (!resultResponse.ok) {
throw new Error(`Failed to fetch result: ${resultResponse.status}`) throw new Error(`Failed to fetch result: ${resultResponse.status}`)

View File

@ -1,7 +1,7 @@
import OpenAI from "openai"; import OpenAI from "openai";
import Anthropic from "@anthropic-ai/sdk"; import Anthropic from "@anthropic-ai/sdk";
import { makeRealSettings, AI_PERSONALITIES } from "@/lib/settings"; import { makeRealSettings, AI_PERSONALITIES } from "@/lib/settings";
import { getRunPodConfig, getRunPodTextConfig, getOllamaConfig } from "@/lib/clientConfig"; import { getRunPodProxyConfig, getOllamaConfig } from "@/lib/clientConfig";
export async function llm( export async function llm(
userPrompt: string, userPrompt: string,
@ -170,28 +170,15 @@ function getAvailableProviders(availableKeys: Record<string, string>, settings:
}); });
} }
// PRIORITY 1: Check for RunPod TEXT configuration from environment variables // PRIORITY 1: Add RunPod via proxy - API keys are stored server-side
// RunPod vLLM text endpoint is used as fallback when Ollama is not available // RunPod vLLM text endpoint is used as fallback when Ollama is not available
const runpodTextConfig = getRunPodTextConfig(); const runpodProxyConfig = getRunPodProxyConfig('text');
if (runpodTextConfig && runpodTextConfig.apiKey && runpodTextConfig.endpointId) { // Always add RunPod as a provider - the proxy handles auth server-side
providers.push({ providers.push({
provider: 'runpod', provider: 'runpod',
apiKey: runpodTextConfig.apiKey, proxyUrl: runpodProxyConfig.proxyUrl,
endpointId: runpodTextConfig.endpointId, model: 'default' // RunPod vLLM endpoint
model: 'default' // RunPod vLLM endpoint });
});
} else {
// Fallback to generic RunPod config if text endpoint not configured
const runpodConfig = getRunPodConfig();
if (runpodConfig && runpodConfig.apiKey && runpodConfig.endpointId) {
providers.push({
provider: 'runpod',
apiKey: runpodConfig.apiKey,
endpointId: runpodConfig.endpointId,
model: 'default'
});
}
}
// PRIORITY 2: Then add user-configured keys (they will be tried after RunPod) // PRIORITY 2: Then add user-configured keys (they will be tried after RunPod)
// First, try the preferred provider - support multiple keys if stored as comma-separated // First, try the preferred provider - support multiple keys if stored as comma-separated
@ -503,7 +490,7 @@ async function callProviderAPI(
userPrompt: string, userPrompt: string,
onToken: (partialResponse: string, done?: boolean) => void, onToken: (partialResponse: string, done?: boolean) => void,
settings?: any, settings?: any,
endpointId?: string, _endpointId?: string, // Deprecated - RunPod now uses proxy with server-side endpoint config
customSystemPrompt?: string | null customSystemPrompt?: string | null
) { ) {
let partial = ""; let partial = "";
@ -571,29 +558,18 @@ async function callProviderAPI(
throw error; throw error;
} }
} else if (provider === 'runpod') { } else if (provider === 'runpod') {
// RunPod API integration - uses environment variables for automatic setup // RunPod API integration via proxy - API keys are stored server-side
// Get endpointId from parameter or from config const { proxyUrl } = getRunPodProxyConfig('text');
let runpodEndpointId = endpointId;
if (!runpodEndpointId) {
const runpodConfig = getRunPodConfig();
if (runpodConfig) {
runpodEndpointId = runpodConfig.endpointId;
}
}
if (!runpodEndpointId) {
throw new Error('RunPod endpoint ID not configured');
}
// Try /runsync first for synchronous execution (returns output immediately) // Try /runsync first for synchronous execution (returns output immediately)
// Fall back to /run + polling if /runsync is not available // Fall back to /run + polling if /runsync is not available
const syncUrl = `https://api.runpod.ai/v2/${runpodEndpointId}/runsync`; const syncUrl = `${proxyUrl}/runsync`;
const asyncUrl = `https://api.runpod.ai/v2/${runpodEndpointId}/run`; const asyncUrl = `${proxyUrl}/run`;
// vLLM endpoints typically expect OpenAI-compatible format with messages array // vLLM endpoints typically expect OpenAI-compatible format with messages array
// But some endpoints might accept simple prompt format // But some endpoints might accept simple prompt format
// Try OpenAI-compatible format first, as it's more standard for vLLM // Try OpenAI-compatible format first, as it's more standard for vLLM
const messages = []; const messages: Array<{ role: string; content: string }> = [];
if (systemPrompt) { if (systemPrompt) {
messages.push({ role: 'system', content: systemPrompt }); messages.push({ role: 'system', content: systemPrompt });
} }
@ -615,8 +591,8 @@ async function callProviderAPI(
const syncResponse = await fetch(syncUrl, { const syncResponse = await fetch(syncUrl, {
method: 'POST', method: 'POST',
headers: { headers: {
'Content-Type': 'application/json', 'Content-Type': 'application/json'
'Authorization': `Bearer ${apiKey}` // Authorization is handled by the proxy server-side
}, },
body: JSON.stringify(requestBody) body: JSON.stringify(requestBody)
}); });
@ -654,7 +630,7 @@ async function callProviderAPI(
// If sync endpoint returned a job ID, fall through to async polling // If sync endpoint returned a job ID, fall through to async polling
if (syncData.id && (syncData.status === 'IN_QUEUE' || syncData.status === 'IN_PROGRESS')) { if (syncData.id && (syncData.status === 'IN_QUEUE' || syncData.status === 'IN_PROGRESS')) {
const result = await pollRunPodJob(syncData.id, apiKey, runpodEndpointId); const result = await pollRunPodJob(syncData.id, proxyUrl);
partial = result; partial = result;
onToken(partial, true); onToken(partial, true);
return; return;
@ -668,22 +644,22 @@ async function callProviderAPI(
const response = await fetch(asyncUrl, { const response = await fetch(asyncUrl, {
method: 'POST', method: 'POST',
headers: { headers: {
'Content-Type': 'application/json', 'Content-Type': 'application/json'
'Authorization': `Bearer ${apiKey}` // Authorization is handled by the proxy server-side
}, },
body: JSON.stringify(requestBody) body: JSON.stringify(requestBody)
}); });
if (!response.ok) { if (!response.ok) {
const errorText = await response.text(); const errorData = await response.json().catch(() => ({ error: response.statusText })) as { error?: string; details?: string };
throw new Error(`RunPod API error: ${response.status} - ${errorText}`); throw new Error(`RunPod API error: ${response.status} - ${errorData.error || errorData.details || 'Unknown error'}`);
} }
const data = await response.json() as Record<string, any>; const data = await response.json() as Record<string, any>;
// Handle async job pattern (RunPod often returns job IDs) // Handle async job pattern (RunPod often returns job IDs)
if (data.id && (data.status === 'IN_QUEUE' || data.status === 'IN_PROGRESS')) { if (data.id && (data.status === 'IN_QUEUE' || data.status === 'IN_PROGRESS')) {
const result = await pollRunPodJob(data.id, apiKey, runpodEndpointId); const result = await pollRunPodJob(data.id, proxyUrl);
partial = result; partial = result;
onToken(partial, true); onToken(partial, true);
return; return;
@ -835,28 +811,26 @@ async function callProviderAPI(
onToken(partial, true); onToken(partial, true);
} }
// Helper function to poll RunPod job status until completion // Helper function to poll RunPod job status until completion via proxy
async function pollRunPodJob( async function pollRunPodJob(
jobId: string, jobId: string,
apiKey: string, proxyUrl: string,
endpointId: string,
maxAttempts: number = 60, maxAttempts: number = 60,
pollInterval: number = 1000 pollInterval: number = 1000
): Promise<string> { ): Promise<string> {
const statusUrl = `https://api.runpod.ai/v2/${endpointId}/status/${jobId}`; // Use proxy endpoint for status checks
const statusUrl = `${proxyUrl}/status/${jobId}`;
for (let attempt = 0; attempt < maxAttempts; attempt++) { for (let attempt = 0; attempt < maxAttempts; attempt++) {
try { try {
const response = await fetch(statusUrl, { const response = await fetch(statusUrl, {
method: 'GET', method: 'GET'
headers: { // Authorization is handled by the proxy server-side
'Authorization': `Bearer ${apiKey}`
}
}); });
if (!response.ok) { if (!response.ok) {
const errorText = await response.text(); const errorData = await response.json().catch(() => ({ error: response.statusText })) as { error?: string; details?: string };
throw new Error(`Failed to check job status: ${response.status} - ${errorText}`); throw new Error(`Failed to check job status: ${response.status} - ${errorData.error || errorData.details || 'Unknown error'}`);
} }
const data = await response.json() as Record<string, any>; const data = await response.json() as Record<string, any>;
@ -872,12 +846,10 @@ async function pollRunPodJob(
// After a few retries, try the stream endpoint as fallback // After a few retries, try the stream endpoint as fallback
try { try {
const streamUrl = `https://api.runpod.ai/v2/${endpointId}/stream/${jobId}`; const streamUrl = `${proxyUrl}/stream/${jobId}`;
const streamResponse = await fetch(streamUrl, { const streamResponse = await fetch(streamUrl, {
method: 'GET', method: 'GET'
headers: { // Authorization is handled by the proxy server-side
'Authorization': `Bearer ${apiKey}`
}
}); });
if (streamResponse.ok) { if (streamResponse.ok) {

View File

@ -64,14 +64,27 @@ export class AutomergeSyncManager {
// Try to load existing document from R2 // Try to load existing document from R2
let doc = await this.storage.loadDocument(this.roomId) let doc = await this.storage.loadDocument(this.roomId)
const automergeShapeCount = doc?.store
? Object.values(doc.store).filter((r: any) => r?.typeName === 'shape').length
: 0
if (!doc) { // Always check legacy JSON and compare - this prevents data loss if automerge.bin
// Check if there's a legacy JSON document to migrate // was created with fewer shapes than the legacy JSON
const legacyDoc = await this.loadLegacyJsonDocument() const legacyDoc = await this.loadLegacyJsonDocument()
if (legacyDoc) { const legacyShapeCount = legacyDoc?.store
console.log(`🔄 Found legacy JSON document, migrating to Automerge format`) ? Object.values(legacyDoc.store).filter((r: any) => r?.typeName === 'shape').length
doc = await this.storage.migrateFromJson(this.roomId, legacyDoc) : 0
}
console.log(`📊 Document comparison: automerge.bin has ${automergeShapeCount} shapes, legacy JSON has ${legacyShapeCount} shapes`)
// Use legacy JSON if it has more shapes than the automerge binary
// This handles the case where an empty automerge.bin was created before migration
if (legacyDoc && legacyShapeCount > automergeShapeCount) {
console.log(`🔄 Legacy JSON has more shapes (${legacyShapeCount} vs ${automergeShapeCount}), migrating to Automerge format`)
doc = await this.storage.migrateFromJson(this.roomId, legacyDoc)
} else if (!doc && legacyDoc) {
console.log(`🔄 No automerge.bin found, migrating legacy JSON document`)
doc = await this.storage.migrateFromJson(this.roomId, legacyDoc)
} }
if (!doc) { if (!doc) {

View File

@ -15,6 +15,14 @@ export interface Environment {
APP_URL?: string; APP_URL?: string;
// Admin secret for protected endpoints // Admin secret for protected endpoints
ADMIN_SECRET?: string; ADMIN_SECRET?: string;
// AI Service API keys (stored as secrets, never exposed to client)
FAL_API_KEY?: string;
RUNPOD_API_KEY?: string;
// RunPod endpoint IDs (not secrets, but kept server-side for flexibility)
RUNPOD_IMAGE_ENDPOINT_ID?: string;
RUNPOD_VIDEO_ENDPOINT_ID?: string;
RUNPOD_TEXT_ENDPOINT_ID?: string;
RUNPOD_WHISPER_ENDPOINT_ID?: string;
} }
// CryptID types for auth // CryptID types for auth

View File

@ -1029,6 +1029,366 @@ const router = AutoRouter<IRequest, [env: Environment, ctx: ExecutionContext]>({
.get("/boards/:boardId/editors", (req, env) => .get("/boards/:boardId/editors", (req, env) =>
handleListEditors(req.params.boardId, req, env)) handleListEditors(req.params.boardId, req, env))
// =============================================================================
// AI Service Proxies (fal.ai, RunPod)
// These keep API keys server-side instead of exposing them to the browser
// =============================================================================
// Fal.ai proxy - submit job to queue
.post("/api/fal/queue/:endpoint(*)", async (req, env) => {
if (!env.FAL_API_KEY) {
return new Response(JSON.stringify({ error: 'FAL_API_KEY not configured' }), {
status: 500,
headers: { 'Content-Type': 'application/json' }
})
}
try {
const endpoint = req.params.endpoint
const body = await req.json()
const response = await fetch(`https://queue.fal.run/${endpoint}`, {
method: 'POST',
headers: {
'Authorization': `Key ${env.FAL_API_KEY}`,
'Content-Type': 'application/json'
},
body: JSON.stringify(body)
})
if (!response.ok) {
const errorText = await response.text()
return new Response(JSON.stringify({
error: `fal.ai API error: ${response.status}`,
details: errorText
}), {
status: response.status,
headers: { 'Content-Type': 'application/json' }
})
}
const data = await response.json()
return new Response(JSON.stringify(data), {
headers: { 'Content-Type': 'application/json' }
})
} catch (error) {
console.error('Fal.ai proxy error:', error)
return new Response(JSON.stringify({ error: (error as Error).message }), {
status: 500,
headers: { 'Content-Type': 'application/json' }
})
}
})
// Fal.ai proxy - check job status
.get("/api/fal/queue/:endpoint(*)/status/:requestId", async (req, env) => {
if (!env.FAL_API_KEY) {
return new Response(JSON.stringify({ error: 'FAL_API_KEY not configured' }), {
status: 500,
headers: { 'Content-Type': 'application/json' }
})
}
try {
const { endpoint, requestId } = req.params
const response = await fetch(`https://queue.fal.run/${endpoint}/requests/${requestId}/status`, {
headers: { 'Authorization': `Key ${env.FAL_API_KEY}` }
})
if (!response.ok) {
const errorText = await response.text()
return new Response(JSON.stringify({
error: `fal.ai status error: ${response.status}`,
details: errorText
}), {
status: response.status,
headers: { 'Content-Type': 'application/json' }
})
}
const data = await response.json()
return new Response(JSON.stringify(data), {
headers: { 'Content-Type': 'application/json' }
})
} catch (error) {
console.error('Fal.ai status proxy error:', error)
return new Response(JSON.stringify({ error: (error as Error).message }), {
status: 500,
headers: { 'Content-Type': 'application/json' }
})
}
})
// Fal.ai proxy - get job result
.get("/api/fal/queue/:endpoint(*)/result/:requestId", async (req, env) => {
if (!env.FAL_API_KEY) {
return new Response(JSON.stringify({ error: 'FAL_API_KEY not configured' }), {
status: 500,
headers: { 'Content-Type': 'application/json' }
})
}
try {
const { endpoint, requestId } = req.params
const response = await fetch(`https://queue.fal.run/${endpoint}/requests/${requestId}`, {
headers: { 'Authorization': `Key ${env.FAL_API_KEY}` }
})
if (!response.ok) {
const errorText = await response.text()
return new Response(JSON.stringify({
error: `fal.ai result error: ${response.status}`,
details: errorText
}), {
status: response.status,
headers: { 'Content-Type': 'application/json' }
})
}
const data = await response.json()
return new Response(JSON.stringify(data), {
headers: { 'Content-Type': 'application/json' }
})
} catch (error) {
console.error('Fal.ai result proxy error:', error)
return new Response(JSON.stringify({ error: (error as Error).message }), {
status: 500,
headers: { 'Content-Type': 'application/json' }
})
}
})
// Fal.ai subscribe (synchronous generation) - used by LiveImage
.post("/api/fal/subscribe/:endpoint(*)", async (req, env) => {
if (!env.FAL_API_KEY) {
return new Response(JSON.stringify({ error: 'FAL_API_KEY not configured' }), {
status: 500,
headers: { 'Content-Type': 'application/json' }
})
}
try {
const endpoint = req.params.endpoint
const body = await req.json()
// Use the direct endpoint for synchronous generation
const response = await fetch(`https://fal.run/${endpoint}`, {
method: 'POST',
headers: {
'Authorization': `Key ${env.FAL_API_KEY}`,
'Content-Type': 'application/json'
},
body: JSON.stringify(body)
})
if (!response.ok) {
const errorText = await response.text()
return new Response(JSON.stringify({
error: `fal.ai API error: ${response.status}`,
details: errorText
}), {
status: response.status,
headers: { 'Content-Type': 'application/json' }
})
}
const data = await response.json()
return new Response(JSON.stringify(data), {
headers: { 'Content-Type': 'application/json' }
})
} catch (error) {
console.error('Fal.ai subscribe proxy error:', error)
return new Response(JSON.stringify({ error: (error as Error).message }), {
status: 500,
headers: { 'Content-Type': 'application/json' }
})
}
})
// RunPod proxy - run sync (blocking)
.post("/api/runpod/:endpointType/runsync", async (req, env) => {
const endpointType = req.params.endpointType as 'image' | 'video' | 'text' | 'whisper'
// Get the appropriate endpoint ID
const endpointIds: Record<string, string | undefined> = {
'image': env.RUNPOD_IMAGE_ENDPOINT_ID || 'tzf1j3sc3zufsy',
'video': env.RUNPOD_VIDEO_ENDPOINT_ID || '4jql4l7l0yw0f3',
'text': env.RUNPOD_TEXT_ENDPOINT_ID || '03g5hz3hlo8gr2',
'whisper': env.RUNPOD_WHISPER_ENDPOINT_ID || 'lrtisuv8ixbtub'
}
const endpointId = endpointIds[endpointType]
if (!endpointId) {
return new Response(JSON.stringify({ error: `Unknown endpoint type: ${endpointType}` }), {
status: 400,
headers: { 'Content-Type': 'application/json' }
})
}
if (!env.RUNPOD_API_KEY) {
return new Response(JSON.stringify({ error: 'RUNPOD_API_KEY not configured' }), {
status: 500,
headers: { 'Content-Type': 'application/json' }
})
}
try {
const body = await req.json()
const response = await fetch(`https://api.runpod.ai/v2/${endpointId}/runsync`, {
method: 'POST',
headers: {
'Authorization': `Bearer ${env.RUNPOD_API_KEY}`,
'Content-Type': 'application/json'
},
body: JSON.stringify(body)
})
if (!response.ok) {
const errorText = await response.text()
return new Response(JSON.stringify({
error: `RunPod API error: ${response.status}`,
details: errorText
}), {
status: response.status,
headers: { 'Content-Type': 'application/json' }
})
}
const data = await response.json()
return new Response(JSON.stringify(data), {
headers: { 'Content-Type': 'application/json' }
})
} catch (error) {
console.error('RunPod runsync proxy error:', error)
return new Response(JSON.stringify({ error: (error as Error).message }), {
status: 500,
headers: { 'Content-Type': 'application/json' }
})
}
})
// RunPod proxy - run async (non-blocking)
.post("/api/runpod/:endpointType/run", async (req, env) => {
const endpointType = req.params.endpointType as 'image' | 'video' | 'text' | 'whisper'
const endpointIds: Record<string, string | undefined> = {
'image': env.RUNPOD_IMAGE_ENDPOINT_ID || 'tzf1j3sc3zufsy',
'video': env.RUNPOD_VIDEO_ENDPOINT_ID || '4jql4l7l0yw0f3',
'text': env.RUNPOD_TEXT_ENDPOINT_ID || '03g5hz3hlo8gr2',
'whisper': env.RUNPOD_WHISPER_ENDPOINT_ID || 'lrtisuv8ixbtub'
}
const endpointId = endpointIds[endpointType]
if (!endpointId) {
return new Response(JSON.stringify({ error: `Unknown endpoint type: ${endpointType}` }), {
status: 400,
headers: { 'Content-Type': 'application/json' }
})
}
if (!env.RUNPOD_API_KEY) {
return new Response(JSON.stringify({ error: 'RUNPOD_API_KEY not configured' }), {
status: 500,
headers: { 'Content-Type': 'application/json' }
})
}
try {
const body = await req.json()
const response = await fetch(`https://api.runpod.ai/v2/${endpointId}/run`, {
method: 'POST',
headers: {
'Authorization': `Bearer ${env.RUNPOD_API_KEY}`,
'Content-Type': 'application/json'
},
body: JSON.stringify(body)
})
if (!response.ok) {
const errorText = await response.text()
return new Response(JSON.stringify({
error: `RunPod API error: ${response.status}`,
details: errorText
}), {
status: response.status,
headers: { 'Content-Type': 'application/json' }
})
}
const data = await response.json()
return new Response(JSON.stringify(data), {
headers: { 'Content-Type': 'application/json' }
})
} catch (error) {
console.error('RunPod run proxy error:', error)
return new Response(JSON.stringify({ error: (error as Error).message }), {
status: 500,
headers: { 'Content-Type': 'application/json' }
})
}
})
// RunPod proxy - check job status
.get("/api/runpod/:endpointType/status/:jobId", async (req, env) => {
const endpointType = req.params.endpointType as 'image' | 'video' | 'text' | 'whisper'
const endpointIds: Record<string, string | undefined> = {
'image': env.RUNPOD_IMAGE_ENDPOINT_ID || 'tzf1j3sc3zufsy',
'video': env.RUNPOD_VIDEO_ENDPOINT_ID || '4jql4l7l0yw0f3',
'text': env.RUNPOD_TEXT_ENDPOINT_ID || '03g5hz3hlo8gr2',
'whisper': env.RUNPOD_WHISPER_ENDPOINT_ID || 'lrtisuv8ixbtub'
}
const endpointId = endpointIds[endpointType]
if (!endpointId) {
return new Response(JSON.stringify({ error: `Unknown endpoint type: ${endpointType}` }), {
status: 400,
headers: { 'Content-Type': 'application/json' }
})
}
if (!env.RUNPOD_API_KEY) {
return new Response(JSON.stringify({ error: 'RUNPOD_API_KEY not configured' }), {
status: 500,
headers: { 'Content-Type': 'application/json' }
})
}
try {
const { jobId } = req.params
const response = await fetch(`https://api.runpod.ai/v2/${endpointId}/status/${jobId}`, {
headers: { 'Authorization': `Bearer ${env.RUNPOD_API_KEY}` }
})
if (!response.ok) {
const errorText = await response.text()
return new Response(JSON.stringify({
error: `RunPod status error: ${response.status}`,
details: errorText
}), {
status: response.status,
headers: { 'Content-Type': 'application/json' }
})
}
const data = await response.json()
return new Response(JSON.stringify(data), {
headers: { 'Content-Type': 'application/json' }
})
} catch (error) {
console.error('RunPod status proxy error:', error)
return new Response(JSON.stringify({ error: (error as Error).message }), {
status: 500,
headers: { 'Content-Type': 'application/json' }
})
}
})
/** /**
* Compute SHA-256 hash of content for change detection * Compute SHA-256 hash of content for change detection
*/ */

View File

@ -108,4 +108,11 @@ crons = ["0 0 * * *"] # Run at midnight UTC every day
# DO NOT put these directly in wrangler.toml: # DO NOT put these directly in wrangler.toml:
# - DAILY_API_KEY # - DAILY_API_KEY
# - CLOUDFLARE_API_TOKEN # - CLOUDFLARE_API_TOKEN
# etc. # - FAL_API_KEY # For fal.ai image/video generation proxy
# - RUNPOD_API_KEY # For RunPod AI endpoints proxy
# - RESEND_API_KEY # For email sending
# - ADMIN_SECRET # For admin-only endpoints
#
# To set secrets:
# wrangler secret put FAL_API_KEY
# wrangler secret put RUNPOD_API_KEY