import { BaseBoxShapeUtil, Geometry2d, HTMLContainer, Rectangle2d, TLBaseShape, } from "tldraw" import React, { useState, useRef, useEffect } from "react" import { getFalProxyConfig } from "@/lib/clientConfig" import { StandardizedToolWrapper } from "@/components/StandardizedToolWrapper" import { usePinnedToView } from "@/hooks/usePinnedToView" import { useMaximize } from "@/hooks/useMaximize" // Type for fal.ai queue response interface FalQueueResponse { request_id?: string status?: 'IN_QUEUE' | 'IN_PROGRESS' | 'COMPLETED' | 'FAILED' logs?: Array<{ message: string; timestamp: string }> error?: string video?: { url: string } // Additional fields for WAN models output?: { video?: { url: string } } } type IVideoGen = TLBaseShape< "VideoGen", { w: number h: number prompt: string imageUrl: string // Input image URL for I2V generation imageBase64: string // Uploaded image as base64 for I2V generation videoUrl: string | null isLoading: boolean error: string | null duration: number // seconds model: string tags: string[] pinnedToView: boolean } > export class VideoGenShape extends BaseBoxShapeUtil { static override type = "VideoGen" as const // Video generation theme color: Purple static readonly PRIMARY_COLOR = "#8B5CF6" getDefaultProps(): IVideoGen['props'] { return { w: 500, h: 540, prompt: "", imageUrl: "", // Input image URL for I2V generation imageBase64: "", // Uploaded image as base64 videoUrl: null, isLoading: false, error: null, duration: 4, model: "wan-i2v", // fal.ai model: wan-i2v, wan-t2v, kling, minimax tags: ['video', 'ai-generated'], pinnedToView: false } } getGeometry(shape: IVideoGen): Geometry2d { // Ensure minimum dimensions for proper hit testing return new Rectangle2d({ width: Math.max(shape.props.w, 1), height: Math.max(shape.props.h, 1), isFilled: true, }) } component(shape: IVideoGen) { // Capture editor reference to avoid stale 'this' during drag operations const editor = this.editor // Debug: log what's in shape props on each render const [prompt, setPrompt] = useState(shape.props.prompt) const [imageUrl, setImageUrl] = useState(shape.props.imageUrl) const [imageBase64, setImageBase64] = useState(shape.props.imageBase64) const [isGenerating, setIsGenerating] = useState(shape.props.isLoading) const [error, setError] = useState(shape.props.error) const [videoUrl, setVideoUrl] = useState(shape.props.videoUrl) const [isMinimized, setIsMinimized] = useState(false) const fileInputRef = useRef(null) const isSelected = editor.getSelectedShapeIds().includes(shape.id) // Determine mode based on whether an image is provided const hasImage = imageUrl.trim() || imageBase64 const mode = hasImage ? 'i2v' : 't2v' // Sync video URL from shape props when it changes externally // This ensures the displayed video matches the shape's stored videoUrl useEffect(() => { if (shape.props.videoUrl !== videoUrl) { setVideoUrl(shape.props.videoUrl) } }, [shape.props.videoUrl]) // Pin to view functionality usePinnedToView(editor, shape.id, shape.props.pinnedToView) // Use the maximize hook for fullscreen functionality const { isMaximized, toggleMaximize } = useMaximize({ editor: editor, shapeId: shape.id, currentW: shape.props.w, currentH: shape.props.h, shapeType: 'VideoGen', }) const handlePinToggle = () => { editor.updateShape({ id: shape.id, type: "VideoGen", props: { pinnedToView: !shape.props.pinnedToView }, }) } // Handle file upload const handleFileUpload = (e: React.ChangeEvent) => { const file = e.target.files?.[0] if (!file) return // Validate file type if (!file.type.startsWith('image/')) { setError('Please upload an image file (JPEG, PNG, etc.)') return } // Validate file size (max 10MB) if (file.size > 10 * 1024 * 1024) { setError('Image must be less than 10MB') return } const reader = new FileReader() reader.onload = (event) => { const base64 = event.target?.result as string setImageBase64(base64) setImageUrl('') // Clear URL if uploading setError(null) } reader.onerror = () => { setError('Failed to read image file') } reader.readAsDataURL(file) } const handleGenerate = async () => { if (!prompt.trim()) { setError("Please enter a prompt describing the video") return } // Validate image URL if provided if (imageUrl.trim()) { try { new URL(imageUrl) } catch { setError("Please enter a valid image URL (must start with http:// or https://)") return } } // Get fal.ai proxy config const { proxyUrl } = getFalProxyConfig() const currentMode = (imageUrl.trim() || imageBase64) ? 'i2v' : 't2v' // Clear any existing video and set loading state setIsGenerating(true) setError(null) setVideoUrl(null) // Clear old video immediately // Update shape to show loading state and clear old video const currentShape = editor.getShape(shape.id) as IVideoGen | undefined if (currentShape) { editor.updateShape({ id: shape.id, type: shape.type, props: { ...currentShape.props, isLoading: true, error: null, videoUrl: null // Clear old video from shape props } }) } try { // Choose fal.ai endpoint based on mode // WAN 2.1 models: fast startup, good quality const endpoint = currentMode === 'i2v' ? 'fal-ai/wan-i2v' : 'fal-ai/wan-t2v' // Build input payload for fal.ai const inputPayload: Record = { prompt: prompt, negative_prompt: "blurry, distorted, low quality, static, frozen, watermark", num_frames: 81, // ~4 seconds at 24fps fps: 24, guidance_scale: 5.0, num_inference_steps: 30, } // Add image for I2V mode if (currentMode === 'i2v') { if (imageUrl.trim()) { inputPayload.image_url = imageUrl } else if (imageBase64) { // fal.ai accepts data URLs directly inputPayload.image_url = imageBase64 } } // Submit to fal.ai queue via proxy const response = await fetch(`${proxyUrl}/queue/${endpoint}`, { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify(inputPayload) }) if (!response.ok) { const errorData = await response.json().catch(() => ({ error: response.statusText })) as { error?: string; details?: string } throw new Error(`fal.ai API error: ${response.status} - ${errorData.error || errorData.details || 'Unknown error'}`) } const jobData = await response.json() as FalQueueResponse if (!jobData.request_id) { throw new Error('No request_id returned from fal.ai') } // Poll for completion via proxy // fal.ai is generally faster than RunPod due to warm instances // Typical times: 30-90 seconds for video generation let attempts = 0 const maxAttempts = 120 // 4 minutes with 2s intervals while (attempts < maxAttempts) { await new Promise(resolve => setTimeout(resolve, 2000)) attempts++ const statusResponse = await fetch(`${proxyUrl}/queue/${endpoint}/status/${jobData.request_id}`) if (!statusResponse.ok) { console.warn(`🎬 VideoGen: Poll error (attempt ${attempts}):`, statusResponse.status) continue } const statusData = await statusResponse.json() as FalQueueResponse if (statusData.status === 'COMPLETED') { // Fetch the result via proxy const resultResponse = await fetch(`${proxyUrl}/queue/${endpoint}/result/${jobData.request_id}`) if (!resultResponse.ok) { throw new Error(`Failed to fetch result: ${resultResponse.status}`) } const resultData = await resultResponse.json() as { video?: { url: string }; output?: { video?: { url: string } } } // Extract video URL from result const videoResultUrl = resultData.video?.url || resultData.output?.video?.url if (videoResultUrl) { // Update local state immediately setVideoUrl(videoResultUrl) setIsGenerating(false) // Get fresh shape data to avoid stale props const currentShape = editor.getShape(shape.id) if (currentShape) { editor.updateShape({ id: shape.id, type: shape.type, props: { ...(currentShape as IVideoGen).props, videoUrl: videoResultUrl, isLoading: false, prompt: prompt, imageUrl: imageUrl, imageBase64: imageBase64 } }) } return } else { throw new Error('Video generation completed but no video URL returned') } } else if (statusData.status === 'FAILED') { throw new Error(statusData.error || 'Video generation failed') } } throw new Error('Video generation timed out after 4 minutes. Please try again.') } catch (error: any) { const errorMessage = error.message || 'Unknown error during video generation' console.error('❌ VideoGen: Generation error:', errorMessage) setError(errorMessage) setIsGenerating(false) editor.updateShape({ id: shape.id, type: shape.type, props: { ...shape.props, isLoading: false, error: errorMessage } }) } } const handleClose = () => { editor.deleteShape(shape.id) } const handleMinimize = () => { setIsMinimized(!isMinimized) } const handleTagsChange = (newTags: string[]) => { editor.updateShape({ id: shape.id, type: shape.type, props: { ...shape.props, tags: newTags } }) } return ( 🎬 Video Generator Generating... ) : undefined } >
{!videoUrl && ( <> {/* Mode indicator */}
{mode === 'i2v' ? '🖼️ Image-to-Video' : '✨ Text-to-Video'} {mode === 'i2v' ? '(animates your image)' : '(generates from text only)'}
{/* Image Input Section */}
{/* Image preview or upload area */} {(imageUrl || imageBase64) ? (
Preview { (e.target as HTMLImageElement).style.display = 'none' setError('Failed to load image from URL') }} />
) : (
{/* Upload button */}
)} {/* URL input (collapsible) */} {!imageBase64 && ( { setImageUrl(e.target.value) setImageBase64('') }} placeholder="Or paste image URL..." disabled={isGenerating} onPointerDown={(e) => e.stopPropagation()} onTouchStart={(e) => e.stopPropagation()} onMouseDown={(e) => e.stopPropagation()} style={{ width: '100%', padding: '8px 10px', backgroundColor: '#fff', color: '#333', border: '1px solid #ddd', borderRadius: '6px', fontSize: '12px', boxSizing: 'border-box', touchAction: 'manipulation', minHeight: '44px', }} /> )}
{/* Prompt */}