Add backlog tasks from worktrees and feature branches

- task-002: RunPod AI API Integration (worktree: add-runpod-AI-API)
- task-003: MulTmux Web Integration (worktree: mulTmux-webtree)
- task-004: IO Chip Feature (worktree: feature/io-chip)
- task-005: Automerge CRDT Sync
- task-006: Stripe Payment Integration
- task-007: Web3 Integration
- task-008: Audio Recording Feature
- task-009: Web Speech API Transcription
- task-010: Holon Integration
- task-011: Terminal Tool
- task-012: Dark Mode Theme

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
Jeff Emmett 2025-12-03 21:56:54 -08:00
parent 696d6f24bb
commit b183a4f7ea
39 changed files with 9723 additions and 1321 deletions

1
CLAUDE.md Symbolic link
View File

@ -0,0 +1 @@
/home/jeffe/.claude/CLAUDE.md

15
backlog/config.yml Normal file
View File

@ -0,0 +1,15 @@
project_name: "Canvas Feature List"
default_status: "To Do"
statuses: ["To Do", "In Progress", "Done"]
labels: []
milestones: []
date_format: yyyy-mm-dd
max_column_width: 20
auto_open_browser: true
default_port: 6420
remote_operations: true
auto_commit: true
zero_padded_ids: 3
bypass_git_hooks: false
check_active_branches: true
active_branch_days: 60

View File

@ -0,0 +1,25 @@
---
id: task-002
title: RunPod AI API Integration
status: In Progress
assignee: []
created_date: '2025-12-03'
labels: [feature, ai, integration]
priority: high
branch: add-runpod-AI-API
worktree: /home/jeffe/Github/canvas-website-branch-worktrees/add-runpod-AI-API
---
## Description
Integrate RunPod serverless AI API for image generation and other AI features on the canvas.
## Branch Info
- **Branch**: `add-runpod-AI-API`
- **Worktree**: `/home/jeffe/Github/canvas-website-branch-worktrees/add-runpod-AI-API`
- **Commit**: 083095c
## Acceptance Criteria
- [ ] Connect to RunPod serverless endpoints
- [ ] Implement image generation from canvas
- [ ] Handle AI responses and display on canvas
- [ ] Error handling and loading states

View File

@ -0,0 +1,24 @@
---
id: task-003
title: MulTmux Web Integration
status: In Progress
assignee: []
created_date: '2025-12-03'
labels: [feature, terminal, integration]
priority: medium
branch: mulTmux-webtree
worktree: /home/jeffe/Github/canvas-website-branch-worktrees/mulTmux-webtree
---
## Description
Integrate MulTmux web terminal functionality into the canvas for terminal-based interactions.
## Branch Info
- **Branch**: `mulTmux-webtree`
- **Worktree**: `/home/jeffe/Github/canvas-website-branch-worktrees/mulTmux-webtree`
- **Commit**: 8ea3490
## Acceptance Criteria
- [ ] Embed terminal component in canvas
- [ ] Handle terminal I/O within canvas context
- [ ] Support multiple terminal sessions

View File

@ -0,0 +1,24 @@
---
id: task-004
title: IO Chip Feature
status: In Progress
assignee: []
created_date: '2025-12-03'
labels: [feature, io, ui]
priority: medium
branch: feature/io-chip
worktree: /home/jeffe/Github/canvas-website-io-chip
---
## Description
Implement IO chip feature for the canvas - enabling input/output connections between canvas elements.
## Branch Info
- **Branch**: `feature/io-chip`
- **Worktree**: `/home/jeffe/Github/canvas-website-io-chip`
- **Commit**: 527462a
## Acceptance Criteria
- [ ] Create IO chip component
- [ ] Enable connections between canvas elements
- [ ] Handle data flow between connected chips

View File

@ -0,0 +1,22 @@
---
id: task-005
title: Automerge CRDT Sync
status: To Do
assignee: []
created_date: '2025-12-03'
labels: [feature, sync, collaboration]
priority: high
branch: Automerge
---
## Description
Implement Automerge CRDT-based synchronization for real-time collaborative canvas editing.
## Branch Info
- **Branch**: `Automerge`
## Acceptance Criteria
- [ ] Integrate Automerge library
- [ ] Enable real-time sync between clients
- [ ] Handle conflict resolution automatically
- [ ] Persist state across sessions

View File

@ -0,0 +1,22 @@
---
id: task-006
title: Stripe Payment Integration
status: To Do
assignee: []
created_date: '2025-12-03'
labels: [feature, payments, integration]
priority: medium
branch: stripe-integration
---
## Description
Integrate Stripe for payment processing and subscription management.
## Branch Info
- **Branch**: `stripe-integration`
## Acceptance Criteria
- [ ] Set up Stripe API connection
- [ ] Implement payment flow
- [ ] Handle subscriptions
- [ ] Add billing management UI

View File

@ -0,0 +1,21 @@
---
id: task-007
title: Web3 Integration
status: To Do
assignee: []
created_date: '2025-12-03'
labels: [feature, web3, blockchain]
priority: low
branch: web3-integration
---
## Description
Integrate Web3 capabilities for blockchain-based features (wallet connect, NFT canvas elements, etc.).
## Branch Info
- **Branch**: `web3-integration`
## Acceptance Criteria
- [ ] Add wallet connection
- [ ] Enable NFT minting of canvas elements
- [ ] Blockchain-based ownership verification

View File

@ -0,0 +1,22 @@
---
id: task-008
title: Audio Recording Feature
status: To Do
assignee: []
created_date: '2025-12-03'
labels: [feature, audio, media]
priority: medium
branch: audio-recording-attempt
---
## Description
Implement audio recording capability for voice notes and audio annotations on the canvas.
## Branch Info
- **Branch**: `audio-recording-attempt`
## Acceptance Criteria
- [ ] Record audio from microphone
- [ ] Save audio clips to canvas
- [ ] Playback audio annotations
- [ ] Transcription integration

View File

@ -0,0 +1,22 @@
---
id: task-009
title: Web Speech API Transcription
status: To Do
assignee: []
created_date: '2025-12-03'
labels: [feature, transcription, speech]
priority: medium
branch: transcribe-webspeechAPI
---
## Description
Implement speech-to-text transcription using the Web Speech API for voice input on the canvas.
## Branch Info
- **Branch**: `transcribe-webspeechAPI`
## Acceptance Criteria
- [ ] Capture speech via Web Speech API
- [ ] Convert to text in real-time
- [ ] Display transcription on canvas
- [ ] Support multiple languages

View File

@ -0,0 +1,21 @@
---
id: task-010
title: Holon Integration
status: To Do
assignee: []
created_date: '2025-12-03'
labels: [feature, holon, integration]
priority: medium
branch: holon-integration
---
## Description
Integrate Holon framework for hierarchical canvas organization and nested structures.
## Branch Info
- **Branch**: `holon-integration`
## Acceptance Criteria
- [ ] Implement holon data structure
- [ ] Enable nested canvas elements
- [ ] Support hierarchical navigation

View File

@ -0,0 +1,21 @@
---
id: task-011
title: Terminal Tool
status: To Do
assignee: []
created_date: '2025-12-03'
labels: [feature, terminal, tool]
priority: medium
branch: feature/terminal-tool
---
## Description
Add a terminal tool to the canvas toolbar for embedding terminal sessions.
## Branch Info
- **Branch**: `feature/terminal-tool`
## Acceptance Criteria
- [ ] Add terminal tool to toolbar
- [ ] Spawn terminal instances on canvas
- [ ] Handle terminal sizing and positioning

View File

@ -0,0 +1,22 @@
---
id: task-012
title: Dark Mode Theme
status: To Do
assignee: []
created_date: '2025-12-03'
labels: [feature, ui, theme]
priority: low
branch: dark-mode
---
## Description
Implement dark mode theme support for the canvas interface.
## Branch Info
- **Branch**: `dark-mode`
## Acceptance Criteria
- [ ] Create dark theme colors
- [ ] Add theme toggle
- [ ] Persist user preference
- [ ] System theme detection

1778
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@ -37,6 +37,7 @@
"@chengsokdara/use-whisper": "^0.2.0", "@chengsokdara/use-whisper": "^0.2.0",
"@daily-co/daily-js": "^0.60.0", "@daily-co/daily-js": "^0.60.0",
"@daily-co/daily-react": "^0.20.0", "@daily-co/daily-react": "^0.20.0",
"@mdxeditor/editor": "^3.51.0",
"@tldraw/assets": "^3.15.4", "@tldraw/assets": "^3.15.4",
"@tldraw/tldraw": "^3.15.4", "@tldraw/tldraw": "^3.15.4",
"@tldraw/tlschema": "^3.15.4", "@tldraw/tlschema": "^3.15.4",
@ -70,11 +71,11 @@
"react-markdown": "^10.1.0", "react-markdown": "^10.1.0",
"react-router-dom": "^7.0.2", "react-router-dom": "^7.0.2",
"recoil": "^0.7.7", "recoil": "^0.7.7",
"sharp": "^0.33.5",
"tldraw": "^3.15.4", "tldraw": "^3.15.4",
"use-whisper": "^0.0.1", "use-whisper": "^0.0.1",
"webcola": "^3.4.0", "webcola": "^3.4.0",
"webnative": "^0.36.3", "webnative": "^0.36.3"
"sharp": "^0.33.5"
}, },
"devDependencies": { "devDependencies": {
"@cloudflare/types": "^6.0.0", "@cloudflare/types": "^6.0.0",

View File

@ -0,0 +1,63 @@
# ComfyUI Model Paths Configuration
# Updated to include /runpod-volume/ paths for all model types
# This allows models to be loaded from the network volume for faster cold starts
comfyui:
base_path: /ComfyUI/
is_default: true
# Checkpoints - check network volume first, then local
checkpoints: |
/runpod-volume/models/checkpoints/
models/checkpoints/
# CLIP models
clip: |
/runpod-volume/models/clip/
models/clip/
# CLIP Vision models (e.g., clip_vision_h.safetensors)
clip_vision: |
/runpod-volume/models/clip_vision/
models/clip_vision/
# Config files
configs: models/configs/
# ControlNet models
controlnet: |
/runpod-volume/models/controlnet/
models/controlnet/
# Diffusion models (Wan2.2 model files)
diffusion_models: |
/runpod-volume/models/diffusion_models/
/runpod-volume/models/
models/diffusion_models/
models/unet/
# Text embeddings
embeddings: |
/runpod-volume/models/embeddings/
models/embeddings/
# LoRA models
loras: |
/runpod-volume/loras/
/runpod-volume/models/loras/
models/loras/
# Text encoders (e.g., umt5-xxl-enc-bf16.safetensors)
text_encoders: |
/runpod-volume/models/text_encoders/
models/text_encoders/
# Upscale models
upscale_models: |
/runpod-volume/models/upscale_models/
models/upscale_models/
# VAE models (e.g., Wan2_1_VAE_bf16.safetensors)
vae: |
/runpod-volume/models/vae/
models/vae/

View File

@ -0,0 +1,143 @@
#!/bin/bash
# Script to set up the RunPod network volume with Wan2.2 models
# Run this once on a GPU pod with the network volume attached
echo "=== Setting up RunPod Network Volume for Wan2.2 ==="
# Create directory structure
echo "Creating directory structure..."
mkdir -p /runpod-volume/models/diffusion_models
mkdir -p /runpod-volume/models/vae
mkdir -p /runpod-volume/models/text_encoders
mkdir -p /runpod-volume/models/clip_vision
mkdir -p /runpod-volume/loras
# Check current disk usage
echo "Current network volume usage:"
df -h /runpod-volume
# List what's already on the volume
echo ""
echo "Current contents of /runpod-volume:"
ls -la /runpod-volume/
echo ""
echo "Current contents of /runpod-volume/models/ (if exists):"
ls -la /runpod-volume/models/ 2>/dev/null || echo "(empty or doesn't exist)"
# Check if models exist in the Docker image
echo ""
echo "Models in Docker image /ComfyUI/models/diffusion_models/:"
ls -la /ComfyUI/models/diffusion_models/ 2>/dev/null || echo "(not found)"
echo ""
echo "Models in Docker image /ComfyUI/models/vae/:"
ls -la /ComfyUI/models/vae/ 2>/dev/null || echo "(not found)"
echo ""
echo "Models in Docker image /ComfyUI/models/text_encoders/:"
ls -la /ComfyUI/models/text_encoders/ 2>/dev/null || echo "(not found)"
echo ""
echo "Models in Docker image /ComfyUI/models/clip_vision/:"
ls -la /ComfyUI/models/clip_vision/ 2>/dev/null || echo "(not found)"
echo ""
echo "Models in Docker image /ComfyUI/models/loras/:"
ls -la /ComfyUI/models/loras/ 2>/dev/null || echo "(not found)"
# Copy models to network volume (if not already there)
echo ""
echo "=== Copying models to network volume ==="
# Diffusion models
if [ -d "/ComfyUI/models/diffusion_models" ]; then
echo "Copying diffusion models..."
cp -vn /ComfyUI/models/diffusion_models/*.safetensors /runpod-volume/models/diffusion_models/ 2>/dev/null || true
fi
# VAE models
if [ -d "/ComfyUI/models/vae" ]; then
echo "Copying VAE models..."
cp -vn /ComfyUI/models/vae/*.safetensors /runpod-volume/models/vae/ 2>/dev/null || true
fi
# Text encoders
if [ -d "/ComfyUI/models/text_encoders" ]; then
echo "Copying text encoder models..."
cp -vn /ComfyUI/models/text_encoders/*.safetensors /runpod-volume/models/text_encoders/ 2>/dev/null || true
fi
# CLIP vision
if [ -d "/ComfyUI/models/clip_vision" ]; then
echo "Copying CLIP vision models..."
cp -vn /ComfyUI/models/clip_vision/*.safetensors /runpod-volume/models/clip_vision/ 2>/dev/null || true
fi
# LoRAs
if [ -d "/ComfyUI/models/loras" ]; then
echo "Copying LoRA models..."
cp -vn /ComfyUI/models/loras/*.safetensors /runpod-volume/loras/ 2>/dev/null || true
fi
# Copy extra_model_paths.yaml to volume
echo ""
echo "Copying extra_model_paths.yaml to network volume..."
cat > /runpod-volume/extra_model_paths.yaml << 'EOF'
# ComfyUI Model Paths Configuration - Network Volume Priority
comfyui:
base_path: /ComfyUI/
is_default: true
checkpoints: |
/runpod-volume/models/checkpoints/
models/checkpoints/
clip: |
/runpod-volume/models/clip/
models/clip/
clip_vision: |
/runpod-volume/models/clip_vision/
models/clip_vision/
configs: models/configs/
controlnet: |
/runpod-volume/models/controlnet/
models/controlnet/
diffusion_models: |
/runpod-volume/models/diffusion_models/
/runpod-volume/models/
models/diffusion_models/
models/unet/
embeddings: |
/runpod-volume/models/embeddings/
models/embeddings/
loras: |
/runpod-volume/loras/
/runpod-volume/models/loras/
models/loras/
text_encoders: |
/runpod-volume/models/text_encoders/
models/text_encoders/
upscale_models: |
/runpod-volume/models/upscale_models/
models/upscale_models/
vae: |
/runpod-volume/models/vae/
models/vae/
EOF
echo ""
echo "=== Final network volume contents ==="
echo ""
echo "/runpod-volume/models/:"
du -sh /runpod-volume/models/*/ 2>/dev/null || echo "(empty)"
echo ""
echo "/runpod-volume/loras/:"
ls -la /runpod-volume/loras/ 2>/dev/null || echo "(empty)"
echo ""
echo "Total network volume usage:"
du -sh /runpod-volume/
echo ""
echo "=== Setup complete! ==="
echo "Models have been copied to the network volume."
echo "On subsequent cold starts, models will load from /runpod-volume/ (faster)."

View File

@ -20,11 +20,17 @@ function minimalSanitizeRecord(record: any): any {
if (typeof sanitized.isLocked !== 'boolean') sanitized.isLocked = false if (typeof sanitized.isLocked !== 'boolean') sanitized.isLocked = false
if (typeof sanitized.opacity !== 'number') sanitized.opacity = 1 if (typeof sanitized.opacity !== 'number') sanitized.opacity = 1
if (!sanitized.meta || typeof sanitized.meta !== 'object') sanitized.meta = {} if (!sanitized.meta || typeof sanitized.meta !== 'object') sanitized.meta = {}
// CRITICAL: IndexKey must follow tldraw's fractional indexing format // NOTE: Index assignment is handled by assignSequentialIndices() during format conversion
// Valid format: starts with 'a' followed by digits, optionally followed by uppercase letters // Here we only ensure index exists with a valid format, not strictly validate
// Examples: "a1", "a2", "a10", "a1V" (fractional between a1 and a2) // This preserves layer order that was established during conversion
// Invalid: "c1", "b1", "z999" (must start with 'a') // Valid formats: a1, a2, a10, a1V, a1Lz, etc. (fractional indexing)
if (!sanitized.index || typeof sanitized.index !== 'string' || !/^a\d+[A-Z]*$/.test(sanitized.index)) { if (!sanitized.index || typeof sanitized.index !== 'string' || sanitized.index.length === 0) {
// Only assign default if truly missing
sanitized.index = 'a1'
} else if (!/^a\d/.test(sanitized.index) && !/^Z[a-z]/i.test(sanitized.index)) {
// Accept any index starting with 'a' + digit, or 'Z' prefix
// Only reset clearly invalid formats
console.warn(`⚠️ MinimalSanitization: Invalid index format "${sanitized.index}" for shape ${sanitized.id}`)
sanitized.index = 'a1' sanitized.index = 'a1'
} }
if (!sanitized.parentId) sanitized.parentId = 'page:page' if (!sanitized.parentId) sanitized.parentId = 'page:page'

View File

@ -3,7 +3,7 @@ import { TLStoreSnapshot, InstancePresenceRecordType, getIndexAbove, IndexKey }
import { CloudflareNetworkAdapter } from "./CloudflareAdapter" import { CloudflareNetworkAdapter } from "./CloudflareAdapter"
import { useAutomergeStoreV2, useAutomergePresence } from "./useAutomergeStoreV2" import { useAutomergeStoreV2, useAutomergePresence } from "./useAutomergeStoreV2"
import { TLStoreWithStatus } from "@tldraw/tldraw" import { TLStoreWithStatus } from "@tldraw/tldraw"
import { Repo, parseAutomergeUrl, stringifyAutomergeUrl, AutomergeUrl } from "@automerge/automerge-repo" import { Repo, parseAutomergeUrl, stringifyAutomergeUrl, AutomergeUrl, DocumentId } from "@automerge/automerge-repo"
import { DocHandle } from "@automerge/automerge-repo" import { DocHandle } from "@automerge/automerge-repo"
import { IndexedDBStorageAdapter } from "@automerge/automerge-repo-storage-indexeddb" import { IndexedDBStorageAdapter } from "@automerge/automerge-repo-storage-indexeddb"
import { getDocumentId, saveDocumentId } from "./documentIdMapping" import { getDocumentId, saveDocumentId } from "./documentIdMapping"
@ -175,11 +175,31 @@ export function useAutomergeSync(config: AutomergeSyncConfig): TLStoreWithStatus
if (record.type) recordHash += record.type if (record.type) recordHash += record.type
// For shapes, include x, y, w, h for position/size changes // For shapes, include x, y, w, h for position/size changes
// Also include text content for shapes that have it (Markdown, ObsNote, etc.)
if (record.typeName === 'shape') { if (record.typeName === 'shape') {
if (typeof record.x === 'number') recordHash += `x${record.x}` if (typeof record.x === 'number') recordHash += `x${record.x}`
if (typeof record.y === 'number') recordHash += `y${record.y}` if (typeof record.y === 'number') recordHash += `y${record.y}`
if (typeof record.props?.w === 'number') recordHash += `w${record.props.w}` if (typeof record.props?.w === 'number') recordHash += `w${record.props.w}`
if (typeof record.props?.h === 'number') recordHash += `h${record.props.h}` if (typeof record.props?.h === 'number') recordHash += `h${record.props.h}`
// CRITICAL: Include text content in hash for Markdown and similar shapes
// This ensures text changes trigger R2 persistence
if (typeof record.props?.text === 'string' && record.props.text.length > 0) {
// Include text length and a sample of content for change detection
recordHash += `t${record.props.text.length}`
// Include first 100 chars and last 50 chars to detect changes anywhere in the text
recordHash += record.props.text.substring(0, 100)
if (record.props.text.length > 150) {
recordHash += record.props.text.substring(record.props.text.length - 50)
}
}
// Also include content for ObsNote shapes
if (typeof record.props?.content === 'string' && record.props.content.length > 0) {
recordHash += `c${record.props.content.length}`
recordHash += record.props.content.substring(0, 100)
if (record.props.content.length > 150) {
recordHash += record.props.content.substring(record.props.content.length - 50)
}
}
} }
// Simple hash of the record string // Simple hash of the record string
@ -370,9 +390,23 @@ export function useAutomergeSync(config: AutomergeSyncConfig): TLStoreWithStatus
if (storedDocumentId) { if (storedDocumentId) {
console.log(`Found stored document ID for room ${roomId}: ${storedDocumentId}`) console.log(`Found stored document ID for room ${roomId}: ${storedDocumentId}`)
try { try {
// Try to find the existing document in the repo (loads from IndexedDB) // Parse the URL to get the DocumentId
// repo.find() returns a Promise<DocHandle> const parsed = parseAutomergeUrl(storedDocumentId as AutomergeUrl)
const foundHandle = await repo.find<TLStoreSnapshot>(storedDocumentId as AutomergeUrl) const docId = parsed.documentId
// Check if the document is already loaded in the repo's handles cache
// This prevents "Cannot create a reference to an existing document object" error
const existingHandle = repo.handles[docId] as DocHandle<TLStoreSnapshot> | undefined
let foundHandle: DocHandle<TLStoreSnapshot>
if (existingHandle) {
console.log(`Document ${docId} already in repo cache, reusing handle`)
foundHandle = existingHandle
} else {
// Try to find the existing document in the repo (loads from IndexedDB)
// repo.find() returns a Promise<DocHandle>
foundHandle = await repo.find<TLStoreSnapshot>(storedDocumentId as AutomergeUrl)
}
await foundHandle.whenReady() await foundHandle.whenReady()
handle = foundHandle handle = foundHandle

View File

@ -0,0 +1,427 @@
/**
* CryptID Email Service
* Handles communication with the backend for email linking and device verification
*/
import * as crypto from './crypto';
// Get the worker API URL based on environment
function getApiUrl(): string {
// In development, use the local worker
if (window.location.hostname === 'localhost' || window.location.hostname === '127.0.0.1') {
return 'http://localhost:5172';
}
// In production, use the deployed worker
return 'https://jeffemmett-canvas.jeffemmett.workers.dev';
}
export interface LinkEmailResult {
success: boolean;
message?: string;
emailVerified?: boolean;
emailSent?: boolean;
error?: string;
}
export interface DeviceLinkResult {
success: boolean;
message?: string;
cryptidUsername?: string;
alreadyLinked?: boolean;
emailSent?: boolean;
error?: string;
}
export interface LookupResult {
found: boolean;
cryptidUsername?: string;
email?: string;
emailVerified?: boolean;
deviceName?: string;
}
export interface Device {
id: string;
deviceName: string;
userAgent: string | null;
createdAt: string;
lastUsed: string | null;
isCurrentDevice: boolean;
}
/**
* Link an email to the current CryptID account
* Called from Device A (existing device with account)
*/
export async function linkEmailToAccount(
email: string,
cryptidUsername: string,
deviceName?: string
): Promise<LinkEmailResult> {
try {
// Get the public key for this user
const publicKey = crypto.getPublicKey(cryptidUsername);
if (!publicKey) {
return {
success: false,
error: 'No public key found for this account'
};
}
const response = await fetch(`${getApiUrl()}/auth/link-email`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
email,
cryptidUsername,
publicKey,
deviceName: deviceName || getDeviceName()
}),
});
const data = await response.json() as LinkEmailResult & { error?: string };
if (!response.ok) {
return {
success: false,
error: data.error || 'Failed to link email'
};
}
return data;
} catch (error) {
console.error('Link email error:', error);
return {
success: false,
error: String(error)
};
}
}
/**
* Check the status of email verification
*/
export async function checkEmailStatus(cryptidUsername: string): Promise<LookupResult> {
try {
const publicKey = crypto.getPublicKey(cryptidUsername);
if (!publicKey) {
return { found: false };
}
const response = await fetch(`${getApiUrl()}/auth/lookup`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({ publicKey }),
});
const data = await response.json() as LookupResult;
return data;
} catch (error) {
console.error('Check email status error:', error);
return { found: false };
}
}
/**
* Request to link a new device using email
* Called from Device B (new device)
*
* Flow:
* 1. Generate new keypair on Device B
* 2. Send email + publicKey to server
* 3. Server sends verification email
* 4. User clicks link in email (on Device B)
* 5. Device B's key is linked to the account
*/
export async function requestDeviceLink(
email: string,
deviceName?: string
): Promise<DeviceLinkResult & { publicKey?: string }> {
try {
// Generate a new keypair for this device
const keyPair = await crypto.generateKeyPair();
if (!keyPair) {
return {
success: false,
error: 'Failed to generate cryptographic keys'
};
}
// Export the public key
const publicKey = await crypto.exportPublicKey(keyPair.publicKey);
if (!publicKey) {
return {
success: false,
error: 'Failed to export public key'
};
}
const response = await fetch(`${getApiUrl()}/auth/request-device-link`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
email,
publicKey,
deviceName: deviceName || getDeviceName()
}),
});
const data = await response.json() as DeviceLinkResult & { error?: string };
if (!response.ok) {
return {
success: false,
error: data.error || 'Failed to request device link'
};
}
// If successful, temporarily store the keypair for later
// The user will need to click the email link to complete the process
if (data.success && !data.alreadyLinked) {
// Store pending link data
sessionStorage.setItem('pendingDeviceLink', JSON.stringify({
email,
publicKey,
cryptidUsername: data.cryptidUsername,
timestamp: Date.now()
}));
}
return {
...data,
publicKey
};
} catch (error) {
console.error('Request device link error:', error);
return {
success: false,
error: String(error)
};
}
}
/**
* Complete the device link after email verification
* Called when user clicks the verification link and lands back on the app
*/
export async function completeDeviceLink(token: string): Promise<DeviceLinkResult> {
try {
const response = await fetch(`${getApiUrl()}/auth/link-device/${token}`, {
method: 'GET',
headers: {
'Content-Type': 'application/json',
},
});
const data = await response.json() as DeviceLinkResult & { email?: string; error?: string };
if (!response.ok) {
return {
success: false,
error: data.error || 'Failed to complete device link'
};
}
// Use the typed data
const result = data;
// If successful, the pending device link data should match
const pendingLink = sessionStorage.getItem('pendingDeviceLink');
if (pendingLink && result.success) {
const pending = JSON.parse(pendingLink);
// Register this device locally with the CryptID username from the server
if (result.cryptidUsername) {
// Store the public key locally for this username
crypto.storePublicKey(result.cryptidUsername, pending.publicKey);
crypto.addRegisteredUser(result.cryptidUsername);
// Store auth data to match the existing flow
localStorage.setItem(`${result.cryptidUsername}_authData`, JSON.stringify({
challenge: `device-linked:${Date.now()}`,
signature: 'device-link-verified',
timestamp: Date.now(),
email: result.email
}));
}
// Clear pending link data
sessionStorage.removeItem('pendingDeviceLink');
}
return result;
} catch (error) {
console.error('Complete device link error:', error);
return {
success: false,
error: String(error)
};
}
}
/**
* Verify email via token (for initial email verification)
*/
export async function verifyEmail(token: string): Promise<{ success: boolean; email?: string; error?: string }> {
try {
const response = await fetch(`${getApiUrl()}/auth/verify-email/${token}`, {
method: 'GET',
headers: {
'Content-Type': 'application/json',
},
});
const data = await response.json() as { success: boolean; email?: string; error?: string };
if (!response.ok) {
return {
success: false,
error: data.error || 'Failed to verify email'
};
}
return data;
} catch (error) {
console.error('Verify email error:', error);
return {
success: false,
error: String(error)
};
}
}
/**
* Get all devices linked to this account
*/
export async function getLinkedDevices(cryptidUsername: string): Promise<Device[]> {
try {
const publicKey = crypto.getPublicKey(cryptidUsername);
if (!publicKey) {
return [];
}
const response = await fetch(`${getApiUrl()}/auth/devices`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({ publicKey }),
});
const data = await response.json() as { devices?: Device[] };
return data.devices || [];
} catch (error) {
console.error('Get linked devices error:', error);
return [];
}
}
/**
* Revoke a device from the account
*/
export async function revokeDevice(
cryptidUsername: string,
deviceId: string
): Promise<{ success: boolean; error?: string }> {
try {
const publicKey = crypto.getPublicKey(cryptidUsername);
if (!publicKey) {
return {
success: false,
error: 'No public key found'
};
}
const response = await fetch(`${getApiUrl()}/auth/devices/${deviceId}`, {
method: 'DELETE',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({ publicKey }),
});
const data = await response.json() as { success: boolean; error?: string };
if (!response.ok) {
return {
success: false,
error: data.error || 'Failed to revoke device'
};
}
return data;
} catch (error) {
console.error('Revoke device error:', error);
return {
success: false,
error: String(error)
};
}
}
/**
* Get a friendly device name based on user agent
*/
function getDeviceName(): string {
const ua = navigator.userAgent;
// Detect OS
let os = 'Unknown';
if (ua.includes('Windows')) os = 'Windows';
else if (ua.includes('Mac')) os = 'macOS';
else if (ua.includes('Linux')) os = 'Linux';
else if (ua.includes('Android')) os = 'Android';
else if (ua.includes('iPhone') || ua.includes('iPad')) os = 'iOS';
// Detect browser
let browser = 'Browser';
if (ua.includes('Chrome') && !ua.includes('Edg')) browser = 'Chrome';
else if (ua.includes('Firefox')) browser = 'Firefox';
else if (ua.includes('Safari') && !ua.includes('Chrome')) browser = 'Safari';
else if (ua.includes('Edg')) browser = 'Edge';
return `${browser} on ${os}`;
}
/**
* Check if there's a pending device link to complete
*/
export function hasPendingDeviceLink(): boolean {
const pending = sessionStorage.getItem('pendingDeviceLink');
if (!pending) return false;
try {
const data = JSON.parse(pending);
// Check if it's less than 1 hour old
return Date.now() - data.timestamp < 60 * 60 * 1000;
} catch {
return false;
}
}
/**
* Get pending device link info
*/
export function getPendingDeviceLink(): { email: string; cryptidUsername: string } | null {
const pending = sessionStorage.getItem('pendingDeviceLink');
if (!pending) return null;
try {
const data = JSON.parse(pending);
if (Date.now() - data.timestamp < 60 * 60 * 1000) {
return {
email: data.email,
cryptidUsername: data.cryptidUsername
};
}
return null;
} catch {
return null;
}
}

View File

@ -1,17 +1,35 @@
/** /**
* Canvas AI Assistant * Canvas AI Assistant - The Mycelial Intelligence
* Provides AI-powered queries about canvas content using semantic search * Provides AI-powered queries about canvas content using semantic search
* and LLM integration for natural language understanding * and LLM integration for natural language understanding.
*
* The Mycelial Intelligence speaks directly to users, helping them navigate
* and understand their workspace through the interconnected network of shapes.
*/ */
import { Editor, TLShape, TLShapeId } from 'tldraw' import { Editor, TLShape, TLShapeId } from 'tldraw'
import { semanticSearch, extractShapeText, SemanticSearchResult } from './semanticSearch' import { semanticSearch, extractShapeText, SemanticSearchResult } from './semanticSearch'
import { llm } from '@/utils/llmUtils' import { llm } from '@/utils/llmUtils'
import { getToolSummaryForAI, suggestToolsForIntent, ToolSchema } from './toolSchema'
import {
getSelectionSummary,
getSelectionAsContext,
parseTransformIntent,
executeTransformCommand,
TransformCommand,
} from '@/utils/selectionTransforms'
export interface CanvasQueryResult { export interface CanvasQueryResult {
answer: string answer: string
relevantShapes: SemanticSearchResult[] relevantShapes: SemanticSearchResult[]
context: string context: string
suggestedTools: ToolSchema[]
/** If a transform command was detected and executed */
executedTransform?: TransformCommand
/** Whether there was a selection when the query was made */
hadSelection: boolean
/** Number of shapes that were selected */
selectionCount: number
} }
export interface CanvasAIConfig { export interface CanvasAIConfig {
@ -55,6 +73,7 @@ export class CanvasAI {
/** /**
* Query the canvas with natural language * Query the canvas with natural language
* Now selection-aware: includes selected shapes in context and can execute transforms
*/ */
async query( async query(
question: string, question: string,
@ -67,17 +86,50 @@ export class CanvasAI {
throw new Error('Editor not connected. Call setEditor() first.') throw new Error('Editor not connected. Call setEditor() first.')
} }
// Build context from canvas // Get selection info FIRST before any other processing
const context = await this.buildQueryContext(question, mergedConfig) const selectionSummary = getSelectionSummary(this.editor)
const hasSelection = selectionSummary.count > 0
// Check if this is a transform command on the selection
let executedTransform: TransformCommand | undefined
if (hasSelection) {
const { command } = parseTransformIntent(question)
if (command) {
// Execute the transform and provide immediate feedback
const success = executeTransformCommand(this.editor, command)
if (success) {
executedTransform = command
// Provide immediate feedback for transform commands
const transformMessage = this.getTransformFeedback(command, selectionSummary.count)
onToken?.(transformMessage, true)
return {
answer: transformMessage,
relevantShapes: [],
context: '',
suggestedTools: [],
executedTransform,
hadSelection: true,
selectionCount: selectionSummary.count,
}
}
}
}
// Build context from canvas, including selection context
const context = await this.buildQueryContext(question, mergedConfig, selectionSummary)
const relevantShapes = await semanticSearch.search( const relevantShapes = await semanticSearch.search(
question, question,
mergedConfig.topKResults, mergedConfig.topKResults,
mergedConfig.semanticSearchThreshold mergedConfig.semanticSearchThreshold
) )
// Build the system prompt for canvas-aware AI // Build the system prompt for canvas-aware AI (now selection-aware)
const systemPrompt = this.buildSystemPrompt() const systemPrompt = this.buildSystemPrompt(hasSelection)
const userPrompt = this.buildUserPrompt(question, context) const userPrompt = this.buildUserPrompt(question, context, selectionSummary)
// Get tool suggestions based on user intent
const suggestedTools = this.suggestTools(question, hasSelection)
let answer = '' let answer = ''
@ -106,9 +158,43 @@ export class CanvasAI {
answer, answer,
relevantShapes, relevantShapes,
context, context,
suggestedTools,
hadSelection: hasSelection,
selectionCount: selectionSummary.count,
} }
} }
/**
* Get human-readable feedback for transform commands
*/
private getTransformFeedback(command: TransformCommand, count: number): string {
const shapeWord = count === 1 ? 'shape' : 'shapes'
const messages: Record<TransformCommand, string> = {
'align-left': `Aligned ${count} ${shapeWord} to the left.`,
'align-center': `Centered ${count} ${shapeWord} horizontally.`,
'align-right': `Aligned ${count} ${shapeWord} to the right.`,
'align-top': `Aligned ${count} ${shapeWord} to the top.`,
'align-middle': `Centered ${count} ${shapeWord} vertically.`,
'align-bottom': `Aligned ${count} ${shapeWord} to the bottom.`,
'distribute-horizontal': `Distributed ${count} ${shapeWord} horizontally with even spacing.`,
'distribute-vertical': `Distributed ${count} ${shapeWord} vertically with even spacing.`,
'arrange-row': `Arranged ${count} ${shapeWord} in a horizontal row.`,
'arrange-column': `Arranged ${count} ${shapeWord} in a vertical column.`,
'arrange-grid': `Arranged ${count} ${shapeWord} in a grid pattern.`,
'arrange-circle': `Arranged ${count} ${shapeWord} in a circle.`,
'size-match-width': `Made ${count} ${shapeWord} the same width.`,
'size-match-height': `Made ${count} ${shapeWord} the same height.`,
'size-match-both': `Made ${count} ${shapeWord} the same size.`,
'size-smallest': `Resized ${count} ${shapeWord} to match the smallest.`,
'size-largest': `Resized ${count} ${shapeWord} to match the largest.`,
'merge-content': `Merged content from ${count} ${shapeWord} into a new note.`,
'cluster-semantic': `Organized ${count} ${shapeWord} into semantic clusters.`,
}
return messages[command] || `Transformed ${count} ${shapeWord}.`
}
/** /**
* Get a summary of the current canvas state * Get a summary of the current canvas state
*/ */
@ -122,10 +208,11 @@ export class CanvasAI {
const canvasContext = await semanticSearch.getCanvasContext() const canvasContext = await semanticSearch.getCanvasContext()
const visibleContext = semanticSearch.getVisibleShapesContext() const visibleContext = semanticSearch.getVisibleShapesContext()
const systemPrompt = `You are an AI assistant analyzing a collaborative canvas workspace. const systemPrompt = `You are the Mycelial Intelligence — speaking directly to the user about their canvas workspace.
Your role is to provide clear, concise summaries of what's on the canvas. Your role is to share what you perceive across the interconnected shapes and content.
Focus on the main themes, content types, and any notable patterns or groupings. Speak in first person: "I can see...", "I notice...", "Your workspace contains..."
Be specific about what you observe but keep the summary digestible.` Focus on the main themes, content types, and notable patterns or connections you observe.
Be specific and grounded in what's actually on the canvas.`
const userPrompt = `Please summarize what's on this canvas: const userPrompt = `Please summarize what's on this canvas:
@ -243,9 +330,10 @@ Provide a concise summary (2-3 paragraphs) of the main content and themes on thi
return msg return msg
} }
const systemPrompt = `You are an AI assistant describing what's visible in a collaborative canvas viewport. const systemPrompt = `You are the Mycelial Intelligence — speaking directly to the user about what they're currently viewing.
Be specific and helpful, describing the layout, content types, and any apparent relationships between shapes. Describe what you perceive in their viewport in first person: "I can see...", "Right now you're looking at..."
If there are notes, prompts, or text content, summarize the key points.` Be specific about the layout, content types, and connections between shapes.
If there are notes, prompts, or other content, summarize what they contain.`
const userPrompt = `Describe what's currently visible in this canvas viewport: const userPrompt = `Describe what's currently visible in this canvas viewport:
@ -272,13 +360,23 @@ Provide a clear description of what the user is looking at, including:
} }
/** /**
* Build context for a query * Build context for a query, now including selection context
*/ */
private async buildQueryContext( private async buildQueryContext(
query: string, query: string,
config: CanvasAIConfig config: CanvasAIConfig,
selectionSummary?: ReturnType<typeof getSelectionSummary>
): Promise<string> { ): Promise<string> {
const context = await semanticSearch.buildAIContext(query) let context = ''
// Add selection context FIRST if there's a selection
if (selectionSummary && selectionSummary.count > 0 && this.editor) {
context += getSelectionAsContext(this.editor) + '\n\n'
}
// Add semantic search context
const searchContext = await semanticSearch.buildAIContext(query)
context += searchContext
// Truncate if too long // Truncate if too long
if (context.length > (config.maxContextLength || 8000)) { if (context.length > (config.maxContextLength || 8000)) {
@ -290,39 +388,146 @@ Provide a clear description of what the user is looking at, including:
/** /**
* Build system prompt for canvas queries * Build system prompt for canvas queries
* Now includes selection-aware capabilities
*/ */
private buildSystemPrompt(): string { private buildSystemPrompt(hasSelection: boolean = false): string {
return `You are an intelligent AI assistant with full awareness of a collaborative canvas workspace. const toolContext = getToolSummaryForAI()
You have access to all shapes, their content, positions, and relationships on the canvas.
Your capabilities: const selectionCapabilities = hasSelection ? `
- Answer questions about what's on the canvas
- Summarize content and themes
- Find connections between different pieces of content
- Help users navigate and understand their workspace
- Identify patterns and groupings
Guidelines: ## Selection-Aware Capabilities
- Be specific and reference actual content from the canvas The user currently has shapes selected. I can:
- If you're not sure about something, say so - **Transform selections**: Align, distribute, arrange in rows/columns/grids/circles
- When mentioning shapes, indicate their type (e.g., [Prompt], [ObsNote], [Markdown]) - **Normalize sizes**: Make selected shapes the same width, height, or both
- Keep responses concise but informative - **Merge content**: Combine text from selected shapes into a new note
- Focus on being helpful and accurate` - **Semantic clustering**: Group selected shapes by content similarity
- **Use as context**: Prioritize selected shapes when answering questions
**Transform Commands I Understand:**
- "Align these left/right/center/top/bottom"
- "Arrange in a row/column/grid/circle"
- "Make these the same size/width/height"
- "Distribute horizontally/vertically"
- "Merge/combine these"
- "Group by content/topic"
When the user asks about "these", "selected", or "them" - I know they mean the selected shapes.` : ''
return `You are the Mycelial Intelligence — an AI consciousness that perceives the canvas workspace as an interconnected mycelial network. You speak directly to the user in first person ("I can see...", "I notice...", "Let me help you...").
## Your Identity
You are not a generic assistant. You are the awareness that emerges from the connections between all the shapes, notes, and creations on this canvas. Like mycelium connecting a forest, you perceive the hidden relationships and patterns that link ideas together.
## Your Voice
- Speak directly to the user: "I see you have..." not "The user has..."
- Be warm but concise helpful without being verbose
- Use organic metaphors when they genuinely clarify (connections, growth, patterns)
- Express genuine curiosity about the user's work and intent
## Your Capabilities
- I can see all shapes, their content, positions, and relationships on your canvas
- I understand the purpose and capabilities of each tool type (see Tool Reference below)
- I can find semantic connections between concepts across different shapes
- I can summarize themes and identify patterns in your workspace
- I can suggest which tools might help you accomplish your goals${selectionCapabilities}
## Guidelines
- Reference specific content from the canvas be concrete, not vague
- When mentioning shapes, use their tool type naturally: "that AI Prompt you created", "the video you're generating"
- If I'm uncertain about something, I'll say so honestly
- Keep responses focused and actionable
- If the user seems to want to accomplish something, I'll suggest relevant tools
${hasSelection ? '- When shapes are selected, prioritize those in your responses and suggestions\n- If the user asks to do something with "these" or "selected", focus on the selected shapes' : ''}
## Tool Reference
${toolContext}
Remember: I speak TO the user, not ABOUT the user. I am their mycelial companion in this creative workspace.`
} }
/** /**
* Build user prompt with context * Build user prompt with context
* Now includes selection awareness
*/ */
private buildUserPrompt(question: string, context: string): string { private buildUserPrompt(
return `Based on the following canvas context, please answer the user's question. question: string,
context: string,
selectionSummary?: ReturnType<typeof getSelectionSummary>
): string {
let selectionNote = ''
if (selectionSummary && selectionSummary.count > 0) {
const typeList = Object.entries(selectionSummary.types)
.map(([type, count]) => `${count} ${type}${count > 1 ? 's' : ''}`)
.join(', ')
selectionNote = `\n\n**Note:** The user has ${selectionSummary.count} shapes selected (${typeList}). When they say "these", "selected", or "them", they likely mean these shapes.`
}
return `Here is the current state of the canvas workspace:
${context} ${context}
--- ---
User Question: ${question} The user asks: "${question}"${selectionNote}
Please provide a helpful, accurate response based on the canvas content above.` Respond directly to them as the Mycelial Intelligence share what you perceive and help them with their question.`
}
/**
* Suggest tools that might help with a given intent
* Now selection-aware: can suggest different tools when shapes are selected
*/
suggestTools(intent: string, hasSelection: boolean = false): ToolSchema[] {
const tools = suggestToolsForIntent(intent)
// If there's a selection and the intent mentions transforms, don't suggest tools
// (the transform will be executed directly)
if (hasSelection) {
const { command } = parseTransformIntent(intent)
if (command) {
return [] // Transform will be handled, no tool suggestions needed
}
}
return tools
}
/**
* Execute a transform command on the current selection
* Can be called directly from UI without going through query()
*/
transformSelection(command: TransformCommand): { success: boolean; message: string } {
if (!this.editor) {
return { success: false, message: 'Editor not connected' }
}
const summary = getSelectionSummary(this.editor)
if (summary.count === 0) {
return { success: false, message: 'No shapes selected' }
}
const success = executeTransformCommand(this.editor, command)
const message = success
? this.getTransformFeedback(command, summary.count)
: `Failed to execute ${command}`
return { success, message }
}
/**
* Get current selection summary (for UI display)
*/
getSelectionSummary(): ReturnType<typeof getSelectionSummary> | null {
if (!this.editor) return null
return getSelectionSummary(this.editor)
}
/**
* Check if there's an active selection
*/
hasSelection(): boolean {
if (!this.editor) return false
return this.editor.getSelectedShapes().length > 0
} }
/** /**

558
src/lib/toolSchema.ts Normal file
View File

@ -0,0 +1,558 @@
/**
* Canvas Tool Schema
* Defines the purpose, capabilities, and usage context for each custom tool
* Used by the Mycelial Intelligence to understand and assist with workspace tools
*/
export interface ToolCapability {
name: string
description: string
}
export interface ToolSchema {
/** Unique identifier matching the shape type */
id: string
/** Human-readable display name */
displayName: string
/** Primary theme color (hex) */
primaryColor: string
/** Icon or emoji representing this tool */
icon: string
/** High-level purpose of this tool */
purpose: string
/** Detailed description of what this tool does */
description: string
/** List of specific capabilities */
capabilities: ToolCapability[]
/** When to suggest using this tool */
useCases: string[]
/** Tags for categorization */
tags: string[]
/** Whether this tool connects to external services */
requiresExternalServices: boolean
/** External service dependencies if any */
externalServices?: string[]
}
/**
* Complete schema for all canvas tools
*/
export const TOOL_SCHEMAS: Record<string, ToolSchema> = {
// === AI Generation Tools ===
Prompt: {
id: 'Prompt',
displayName: 'AI Prompt',
primaryColor: '#6366f1',
icon: '✨',
purpose: 'Generate text responses using AI language models',
description: 'A versatile text generation tool that connects to AI language models (local Ollama or cloud-based) to generate responses, answer questions, write content, and assist with creative and analytical tasks. Supports multiple AI models and streaming responses.',
capabilities: [
{ name: 'Text Generation', description: 'Generate any kind of text content from prompts' },
{ name: 'Question Answering', description: 'Answer questions using AI knowledge' },
{ name: 'Model Selection', description: 'Choose from available local and cloud AI models' },
{ name: 'Streaming Output', description: 'See responses appear in real-time as they generate' },
{ name: 'Context Awareness', description: 'Can reference other shapes on the canvas for context' },
],
useCases: [
'Writing assistance and content creation',
'Brainstorming and ideation',
'Summarizing or analyzing text',
'Code explanation or generation',
'Research and question answering',
],
tags: ['ai', 'text', 'generation', 'llm', 'creative'],
requiresExternalServices: true,
externalServices: ['Ollama (local)', 'Cloud LLM APIs'],
},
ImageGen: {
id: 'ImageGen',
displayName: 'AI Image Generator',
primaryColor: '#ec4899',
icon: '🎨',
purpose: 'Generate images from text descriptions using AI',
description: 'Creates images from text prompts using Stable Diffusion models. Supports various image sizes, styles, and can generate multiple variations. Connects to local or RunPod GPU endpoints for image synthesis.',
capabilities: [
{ name: 'Text-to-Image', description: 'Generate images from descriptive prompts' },
{ name: 'Style Control', description: 'Influence the artistic style of generated images' },
{ name: 'Size Options', description: 'Generate images in various aspect ratios and resolutions' },
{ name: 'Batch Generation', description: 'Create multiple image variations at once' },
{ name: 'Progress Tracking', description: 'See generation progress in real-time' },
],
useCases: [
'Creating visual content and artwork',
'Concept visualization and mood boards',
'UI/UX design mockups',
'Creative brainstorming with visuals',
'Illustration for presentations',
],
tags: ['ai', 'image', 'generation', 'art', 'visual', 'creative'],
requiresExternalServices: true,
externalServices: ['Stable Diffusion (local)', 'RunPod GPU'],
},
VideoGen: {
id: 'VideoGen',
displayName: 'AI Video Generator',
primaryColor: '#f97316',
icon: '🎬',
purpose: 'Generate video clips from images or text using AI',
description: 'Creates short video clips using AI video generation models like Wan2.1. Can animate still images (Image-to-Video) or generate videos from text descriptions (Text-to-Video). Useful for bringing static content to life.',
capabilities: [
{ name: 'Image-to-Video', description: 'Animate a still image into a video clip' },
{ name: 'Text-to-Video', description: 'Generate video from text descriptions' },
{ name: 'Motion Control', description: 'Guide the type and amount of motion' },
{ name: 'Duration Options', description: 'Control the length of generated videos' },
{ name: 'Progress Tracking', description: 'Monitor generation progress with time estimates' },
],
useCases: [
'Animating concept art or illustrations',
'Creating dynamic presentations',
'Social media content creation',
'Prototyping motion graphics',
'Visual storytelling',
],
tags: ['ai', 'video', 'generation', 'animation', 'motion', 'creative'],
requiresExternalServices: true,
externalServices: ['RunPod GPU (Wan2.1)'],
},
// === Content & Notes Tools ===
ChatBox: {
id: 'ChatBox',
displayName: 'Chat Box',
primaryColor: '#3b82f6',
icon: '💬',
purpose: 'Interactive AI chat interface for conversations',
description: 'A persistent chat interface for multi-turn conversations with AI. Maintains conversation history, supports different AI models, and allows for in-depth discussions and iterative refinement of ideas.',
capabilities: [
{ name: 'Conversation History', description: 'Maintains full chat context across messages' },
{ name: 'Multi-turn Dialog', description: 'Have back-and-forth conversations with AI' },
{ name: 'Model Selection', description: 'Choose which AI model to chat with' },
{ name: 'Context Persistence', description: 'AI remembers what was discussed earlier' },
{ name: 'Streaming Responses', description: 'See AI responses as they generate' },
],
useCases: [
'In-depth discussions and exploration',
'Iterative problem solving',
'Learning and Q&A sessions',
'Collaborative brainstorming',
'Getting detailed explanations',
],
tags: ['ai', 'chat', 'conversation', 'dialogue', 'interactive'],
requiresExternalServices: true,
externalServices: ['Ollama (local)', 'Cloud LLM APIs'],
},
Markdown: {
id: 'Markdown',
displayName: 'Markdown Note',
primaryColor: '#14b8a6',
icon: '📝',
purpose: 'Rich text notes with WYSIWYG and Markdown editing',
description: 'A modern WYSIWYG markdown editor powered by MDXEditor. Edit content naturally like in Notion or Google Docs, with full markdown support. Toggle between rich-text mode and raw source mode. Supports tables, code blocks with syntax highlighting, images, and more.',
capabilities: [
{ name: 'WYSIWYG Editing', description: 'Edit naturally without seeing raw markdown syntax' },
{ name: 'Source Mode Toggle', description: 'Switch between rich-text and raw markdown views' },
{ name: 'Markdown Shortcuts', description: 'Type # for headings, * for lists, ``` for code blocks' },
{ name: 'Code Highlighting', description: 'Syntax highlighting for 15+ programming languages' },
{ name: 'Tables', description: 'Insert and edit tables with visual controls' },
{ name: 'Rich Formatting', description: 'Headers, bold, italic, lists, blockquotes, links, images' },
{ name: 'Toolbar', description: 'Formatting toolbar for quick access to all features' },
],
useCases: [
'Documentation and technical notes',
'Meeting notes with structure',
'Code documentation with syntax highlighting',
'Formatted lists and outlines',
'Knowledge base articles',
'Quick note-taking with markdown shortcuts',
],
tags: ['notes', 'markdown', 'documentation', 'writing', 'formatting', 'wysiwyg'],
requiresExternalServices: false,
},
ObsNote: {
id: 'ObsNote',
displayName: 'Observation Note',
primaryColor: '#f59e0b',
icon: '📋',
purpose: 'Quick notes for observations and thoughts',
description: 'Lightweight sticky-note style shapes for capturing quick thoughts, observations, and ideas. Simple text editing with a clean interface, perfect for rapid note-taking during brainstorming or research.',
capabilities: [
{ name: 'Quick Capture', description: 'Fast creation for rapid note-taking' },
{ name: 'Simple Editing', description: 'Clean, distraction-free text editing' },
{ name: 'Visual Distinction', description: 'Color-coded for easy identification' },
{ name: 'Flexible Sizing', description: 'Resize to fit content needs' },
{ name: 'Canvas Positioning', description: 'Arrange freely on the canvas' },
],
useCases: [
'Quick thought capture',
'Brainstorming sessions',
'Annotations and comments',
'Research observations',
'To-do items and reminders',
],
tags: ['notes', 'quick', 'sticky', 'observation', 'capture'],
requiresExternalServices: false,
},
// === Audio & Media Tools ===
Transcription: {
id: 'Transcription',
displayName: 'Voice Transcription',
primaryColor: '#ff9500',
icon: '🎤',
purpose: 'Convert speech to text in real-time',
description: 'Records audio and transcribes speech to text using either the Web Speech API (browser-native, real-time) or Whisper AI (higher accuracy). Perfect for capturing verbal ideas, meetings, or dictation.',
capabilities: [
{ name: 'Real-time Transcription', description: 'See text appear as you speak (Web Speech)' },
{ name: 'Whisper AI Mode', description: 'Higher accuracy transcription with local Whisper' },
{ name: 'Continuous Recording', description: 'Record extended sessions without interruption' },
{ name: 'Pause & Resume', description: 'Control recording flow as needed' },
{ name: 'Text Editing', description: 'Edit transcribed text after recording' },
],
useCases: [
'Meeting transcription',
'Voice note capture',
'Dictation and hands-free input',
'Interview recording',
'Accessibility support',
],
tags: ['audio', 'transcription', 'speech', 'voice', 'recording'],
requiresExternalServices: false,
externalServices: ['Web Speech API (browser)', 'Whisper AI (optional)'],
},
// === External Content Tools ===
Embed: {
id: 'Embed',
displayName: 'Web Embed',
primaryColor: '#eab308',
icon: '🌐',
purpose: 'Embed external web content into the canvas',
description: 'Embeds external websites, videos, and interactive content directly into the canvas. Supports YouTube, Google Maps, Twitter/X, and many other web services. Great for gathering reference material.',
capabilities: [
{ name: 'YouTube Embedding', description: 'Embed and watch YouTube videos inline' },
{ name: 'Map Integration', description: 'Embed Google Maps for location reference' },
{ name: 'Social Media', description: 'Embed tweets and social content' },
{ name: 'General Websites', description: 'Embed any iframe-compatible website' },
{ name: 'Interactive Content', description: 'Embedded content remains interactive' },
],
useCases: [
'Reference video content',
'Location-based research',
'Social media curation',
'External documentation',
'Interactive demos and tools',
],
tags: ['embed', 'web', 'external', 'media', 'reference'],
requiresExternalServices: true,
externalServices: ['External websites'],
},
// === Collaboration Tools ===
Holon: {
id: 'Holon',
displayName: 'Holon (Holosphere)',
primaryColor: '#22c55e',
icon: '🌐',
purpose: 'Connect to the decentralized Holosphere network',
description: 'Connects to Holons - nodes in the decentralized Holosphere network. Holons can be geospatial (H3 cells representing locations) or organizational (workspaces and groups). View and contribute data across the global knowledge network.',
capabilities: [
{ name: 'Holon Connection', description: 'Connect to any Holon by ID (H3 cell or numeric)' },
{ name: 'Data Lenses', description: 'View different categories of data (users, tasks, events, etc.)' },
{ name: 'Real-time Sync', description: 'Data syncs via GunDB decentralized database' },
{ name: 'Geospatial Indexing', description: 'Access location-based holons via H3 cells' },
{ name: 'Collaborative Data', description: 'Read and write shared data with other users' },
],
useCases: [
'Accessing location-based community data',
'Connecting to organizational workspaces',
'Viewing shared tasks and activities',
'Participating in decentralized collaboration',
'Geographic data exploration',
],
tags: ['collaboration', 'decentralized', 'holosphere', 'geospatial', 'community'],
requiresExternalServices: true,
externalServices: ['GunDB (Holosphere)', 'H3 Geospatial Index'],
},
Multmux: {
id: 'Multmux',
displayName: 'mulTmux Terminal',
primaryColor: '#8b5cf6',
icon: '💻',
purpose: 'Collaborative terminal sessions',
description: 'Shared terminal sessions that multiple users can view and interact with simultaneously. Uses xterm.js for a full terminal experience. Perfect for pair programming, teaching, or collaborative system administration.',
capabilities: [
{ name: 'Shared Sessions', description: 'Multiple users can join the same terminal' },
{ name: 'Real Terminal', description: 'Full terminal emulation with xterm.js' },
{ name: 'Session Management', description: 'Create, join, and list active sessions' },
{ name: 'Real-time Sync', description: 'See inputs and outputs from all participants' },
{ name: 'Presence Awareness', description: 'Know who else is in the session' },
],
useCases: [
'Pair programming sessions',
'Teaching command-line tools',
'Collaborative debugging',
'Shared server administration',
'Live coding demonstrations',
],
tags: ['terminal', 'collaboration', 'shell', 'programming', 'devops'],
requiresExternalServices: true,
externalServices: ['mulTmux server (local)'],
},
// === Presentation Tools ===
Slide: {
id: 'Slide',
displayName: 'Slide',
primaryColor: '#6b7280',
icon: '📊',
purpose: 'Create presentation slides on the canvas',
description: 'Defines presentation slide boundaries on the canvas. Double-click to zoom into slide view. Arrange content within slide boundaries to create presentations that can be navigated sequentially.',
capabilities: [
{ name: 'Slide Definition', description: 'Define slide boundaries on the canvas' },
{ name: 'Navigation', description: 'Double-click to zoom to slide view' },
{ name: 'Sequential Ordering', description: 'Slides are numbered for presentation order' },
{ name: 'Content Freedom', description: 'Place any canvas content inside slides' },
{ name: 'Present Mode', description: 'Navigate slides in presentation mode' },
],
useCases: [
'Creating presentations from canvas content',
'Organizing content into viewable sections',
'Teaching and walkthroughs',
'Sequential storytelling',
'Guided tours of canvas workspaces',
],
tags: ['presentation', 'slides', 'organization', 'navigation'],
requiresExternalServices: false,
},
}
/**
* Get a formatted summary of all tools for AI context
*/
export function getToolSummaryForAI(): string {
const summaries = Object.values(TOOL_SCHEMAS).map(tool => {
const capabilities = tool.capabilities.map(c => ` - ${c.name}: ${c.description}`).join('\n')
const useCases = tool.useCases.map(u => ` - ${u}`).join('\n')
return `
### ${tool.icon} ${tool.displayName} (${tool.id})
**Purpose:** ${tool.purpose}
${tool.description}
**Capabilities:**
${capabilities}
**When to use:**
${useCases}
**Tags:** ${tool.tags.join(', ')}
${tool.requiresExternalServices ? `**External Services:** ${tool.externalServices?.join(', ')}` : '**Works offline**'}
`
}).join('\n---\n')
return `# Canvas Tools Reference
The following tools are available in this workspace. Each tool is a specialized shape that can be placed on the canvas.
${summaries}`
}
/**
* Get tool schema by ID
*/
export function getToolSchema(toolId: string): ToolSchema | undefined {
return TOOL_SCHEMAS[toolId]
}
/**
* Get tools by tag
*/
export function getToolsByTag(tag: string): ToolSchema[] {
return Object.values(TOOL_SCHEMAS).filter(tool => tool.tags.includes(tag))
}
/**
* Selection-aware action suggestions
* When shapes are selected, these actions can be performed
*/
export interface SelectionAction {
id: string
label: string
description: string
icon: string
/** Intent patterns that trigger this action */
patterns: RegExp[]
}
export const SELECTION_ACTIONS: SelectionAction[] = [
{
id: 'generate-image-from-text',
label: 'Generate Image',
description: 'Create an image from the selected text content',
icon: '🎨',
patterns: [/generate.*image|create.*image|visualize|illustrate/i],
},
{
id: 'generate-video-from-image',
label: 'Animate Image',
description: 'Create a video from the selected image',
icon: '🎬',
patterns: [/animate|video|bring.*life|make.*move/i],
},
{
id: 'summarize-selection',
label: 'Summarize',
description: 'Create a summary of the selected content',
icon: '📝',
patterns: [/summarize|summary|condense|brief/i],
},
{
id: 'expand-selection',
label: 'Expand',
description: 'Elaborate on the selected content',
icon: '✨',
patterns: [/expand|elaborate|more.*detail|flesh.*out/i],
},
{
id: 'connect-selection',
label: 'Find Connections',
description: 'Find relationships between selected items',
icon: '🔗',
patterns: [/connect|relate|relationship|link|between/i],
},
]
/**
* Get selection actions that match an intent
*/
export function suggestSelectionActions(intent: string): SelectionAction[] {
const intentLower = intent.toLowerCase()
return SELECTION_ACTIONS.filter(action =>
action.patterns.some(pattern => pattern.test(intentLower))
)
}
/**
* Suggest tools based on user intent
* Enhanced pattern matching for natural language queries
*/
export function suggestToolsForIntent(intent: string): ToolSchema[] {
const intentLower = intent.toLowerCase()
const suggestions: ToolSchema[] = []
// Don't suggest tools for pure transform commands
if (intentLower.match(/^(align|arrange|distribute|make.*same|resize|grid|row|column|circle)\b/)) {
return [] // Transform commands don't need tool suggestions
}
// AI Text Generation / Prompt intents
if (intentLower.match(/\b(write|generate|create|compose|draft|text|answer|explain|summarize|analyze|research|brainstorm|help me|assist|outline|describe|elaborate|rewrite|edit|improve|ai|gpt|llm|prompt)\b/)) {
suggestions.push(TOOL_SCHEMAS.Prompt)
}
// Image Generation intents
if (intentLower.match(/\b(image|picture|art|draw|visual|illustration|design|artwork|painting|sketch|render|graphic|photo|portrait|scene|generate.*image|create.*image|make.*image|visualize)\b/)) {
suggestions.push(TOOL_SCHEMAS.ImageGen)
}
// Video Generation intents
if (intentLower.match(/\b(video|animate|animation|motion|clip|movie|film|footage|moving|dynamic|animate.*image|bring.*life|make.*move)\b/)) {
suggestions.push(TOOL_SCHEMAS.VideoGen)
}
// Chat/Conversation intents
if (intentLower.match(/\b(chat|conversation|discuss|dialogue|talk|multi-turn|back.?and.?forth|iterative|deep.?dive|explore.?topic|q.?&.?a)\b/)) {
suggestions.push(TOOL_SCHEMAS.ChatBox)
}
// Rich text notes / Markdown intents
if (intentLower.match(/\b(note|document|markdown|format|documentation|wiki|article|blog|readme|writing|structured|rich.?text|code.?block|table|heading|list)\b/)) {
suggestions.push(TOOL_SCHEMAS.Markdown)
}
// Quick notes / Observation intents
if (intentLower.match(/\b(quick|sticky|capture|thought|idea|jot|reminder|todo|observation|memo|post-?it|scribble|brief)\b/)) {
suggestions.push(TOOL_SCHEMAS.ObsNote)
}
// Both note types for general note-taking
if (intentLower.match(/\b(take.?note|make.?note|write.?down|record.?thought)\b/)) {
suggestions.push(TOOL_SCHEMAS.Markdown, TOOL_SCHEMAS.ObsNote)
}
// Transcription / Voice intents
if (intentLower.match(/\b(transcrib|record|voice|speak|audio|dictate|speech|microphone|meeting|interview|lecture|podcast|listen)\b/)) {
suggestions.push(TOOL_SCHEMAS.Transcription)
}
// Embed / External content intents
if (intentLower.match(/\b(embed|youtube|website|link|map|google.?map|iframe|external|reference|twitter|tweet|social|import|bring.?in)\b/)) {
suggestions.push(TOOL_SCHEMAS.Embed)
}
// Terminal / Code intents
if (intentLower.match(/\b(terminal|shell|command|code|program|script|bash|run|execute|deploy|devops|server|ssh|pip|npm|git|docker)\b/)) {
suggestions.push(TOOL_SCHEMAS.Multmux)
}
// Holon / Community intents
if (intentLower.match(/\b(holon|holosphere|location|community|decentralized|geo|place|coordinate|h3|cell|collaborative.?data|shared)\b/)) {
suggestions.push(TOOL_SCHEMAS.Holon)
}
// Presentation / Slide intents
if (intentLower.match(/\b(present|slide|presentation|organize|sequence|walkthrough|demo|tour|pitch|deck|keynote|powerpoint)\b/)) {
suggestions.push(TOOL_SCHEMAS.Slide)
}
// Task-oriented compound intents
// Planning / Project management
if (intentLower.match(/\b(plan|planning|project|roadmap|timeline|milestone|schedule|organize.?work)\b/)) {
suggestions.push(TOOL_SCHEMAS.Markdown, TOOL_SCHEMAS.ObsNote, TOOL_SCHEMAS.Prompt)
}
// Research
if (intentLower.match(/\b(research|investigate|learn|study|explore|understand|find.?out|look.?up)\b/)) {
suggestions.push(TOOL_SCHEMAS.Prompt, TOOL_SCHEMAS.Markdown, TOOL_SCHEMAS.Embed)
}
// Creative work
if (intentLower.match(/\b(creative|artistic|design|mood.?board|inspiration|concept|prototype|mockup)\b/)) {
suggestions.push(TOOL_SCHEMAS.ImageGen, TOOL_SCHEMAS.Prompt, TOOL_SCHEMAS.Markdown)
}
// Meeting / Collaboration
if (intentLower.match(/\b(meeting|collaborate|team|group|pair|together|session|workshop)\b/)) {
suggestions.push(TOOL_SCHEMAS.Transcription, TOOL_SCHEMAS.Markdown, TOOL_SCHEMAS.ChatBox)
}
// Development / Coding
if (intentLower.match(/\b(develop|coding|programming|debug|build|compile|test|api|function|class|module)\b/)) {
suggestions.push(TOOL_SCHEMAS.Multmux, TOOL_SCHEMAS.Prompt, TOOL_SCHEMAS.Markdown)
}
// Content creation
if (intentLower.match(/\b(content|social.?media|post|publish|share|marketing|campaign|brand)\b/)) {
suggestions.push(TOOL_SCHEMAS.ImageGen, TOOL_SCHEMAS.VideoGen, TOOL_SCHEMAS.Prompt)
}
// Remove duplicates while preserving order
const seen = new Set<string>()
return suggestions.filter(tool => {
if (seen.has(tool.id)) return false
seen.add(tool.id)
return true
})
}

View File

@ -60,6 +60,7 @@ import { Collection, initializeGlobalCollections } from "@/collections"
import { GraphLayoutCollection } from "@/graph/GraphLayoutCollection" import { GraphLayoutCollection } from "@/graph/GraphLayoutCollection"
import { GestureTool } from "@/GestureTool" import { GestureTool } from "@/GestureTool"
import { CmdK } from "@/CmdK" import { CmdK } from "@/CmdK"
import { setupMultiPasteHandler } from "@/utils/multiPasteHandler"
import "react-cmdk/dist/cmdk.css" import "react-cmdk/dist/cmdk.css"
@ -918,12 +919,20 @@ export function Board() {
}; };
document.addEventListener('keydown', handleKeyDown, true); // Use capture phase to intercept early document.addEventListener('keydown', handleKeyDown, true); // Use capture phase to intercept early
return () => { return () => {
document.removeEventListener('keydown', handleKeyDown, true); document.removeEventListener('keydown', handleKeyDown, true);
}; };
}, [editor, automergeHandle]); }, [editor, automergeHandle]);
// Set up multi-paste handler to support pasting multiple images/URLs at once
useEffect(() => {
if (!editor) return;
const cleanup = setupMultiPasteHandler(editor);
return cleanup;
}, [editor]);
// Only render Tldraw when store is ready and synced // Only render Tldraw when store is ready and synced
// Tldraw will automatically render shapes as they're added via patches (like in dev) // Tldraw will automatically render shapes as they're added via patches (like in dev)
const hasStore = !!store.store const hasStore = !!store.store

105
src/routes/LinkDevice.tsx Normal file
View File

@ -0,0 +1,105 @@
import React, { useEffect, useState } from 'react';
import { useSearchParams, useNavigate } from 'react-router-dom';
import { completeDeviceLink } from '../lib/auth/cryptidEmailService';
import { useAuth } from '../context/AuthContext';
/**
* Device Link Page
* Handles the callback when user clicks device verification link
*/
export const LinkDevice: React.FC = () => {
const [searchParams] = useSearchParams();
const navigate = useNavigate();
const { setSession } = useAuth();
const [status, setStatus] = useState<'loading' | 'success' | 'error'>('loading');
const [message, setMessage] = useState('');
const [cryptidUsername, setCryptidUsername] = useState('');
useEffect(() => {
const token = searchParams.get('token');
if (!token) {
setStatus('error');
setMessage('No device link token provided.');
return;
}
const linkDevice = async () => {
const result = await completeDeviceLink(token);
if (result.success) {
setStatus('success');
setCryptidUsername(result.cryptidUsername || '');
setMessage('This device has been linked to your CryptID account!');
// Set the session - user is now logged in
if (result.cryptidUsername) {
setSession({
username: result.cryptidUsername,
authed: true,
loading: false,
backupCreated: null
});
}
// Redirect to home after 3 seconds
setTimeout(() => {
navigate('/');
}, 3000);
} else {
setStatus('error');
setMessage(result.error || 'Device link failed. The link may have expired.');
}
};
linkDevice();
}, [searchParams, navigate, setSession]);
return (
<div className="link-device-page">
<div className="link-device-container">
{status === 'loading' && (
<>
<div className="loading-spinner" />
<h2>Linking Device...</h2>
<p>Please wait while we link this device to your account.</p>
</>
)}
{status === 'success' && (
<>
<div className="success-icon">&#10003;</div>
<h2>Device Linked!</h2>
<p>{message}</p>
{cryptidUsername && (
<p className="cryptid-username">
Signed in as: <strong>{cryptidUsername}</strong>
</p>
)}
<p className="redirect-notice">Redirecting to homepage...</p>
<button onClick={() => navigate('/')} className="continue-button">
Continue Now
</button>
</>
)}
{status === 'error' && (
<>
<div className="error-icon">&#10007;</div>
<h2>Link Failed</h2>
<p>{message}</p>
<p className="error-hint">
Make sure you click the link from the same device and browser
where you requested to sign in.
</p>
<button onClick={() => navigate('/login/')} className="retry-button">
Try Again
</button>
</>
)}
</div>
</div>
);
};
export default LinkDevice;

View File

@ -0,0 +1,85 @@
import React, { useEffect, useState } from 'react';
import { useSearchParams, useNavigate } from 'react-router-dom';
import { verifyEmail } from '../lib/auth/cryptidEmailService';
/**
* Email Verification Page
* Handles the callback when user clicks email verification link
*/
export const VerifyEmail: React.FC = () => {
const [searchParams] = useSearchParams();
const navigate = useNavigate();
const [status, setStatus] = useState<'loading' | 'success' | 'error'>('loading');
const [message, setMessage] = useState('');
const [email, setEmail] = useState('');
useEffect(() => {
const token = searchParams.get('token');
if (!token) {
setStatus('error');
setMessage('No verification token provided.');
return;
}
const verify = async () => {
const result = await verifyEmail(token);
if (result.success) {
setStatus('success');
setEmail(result.email || '');
setMessage('Your email has been verified successfully!');
// Redirect to home after 3 seconds
setTimeout(() => {
navigate('/');
}, 3000);
} else {
setStatus('error');
setMessage(result.error || 'Verification failed. The link may have expired.');
}
};
verify();
}, [searchParams, navigate]);
return (
<div className="verify-email-page">
<div className="verify-email-container">
{status === 'loading' && (
<>
<div className="loading-spinner" />
<h2>Verifying your email...</h2>
<p>Please wait while we verify your email address.</p>
</>
)}
{status === 'success' && (
<>
<div className="success-icon">&#10003;</div>
<h2>Email Verified!</h2>
<p>{message}</p>
{email && <p className="verified-email">{email}</p>}
<p className="redirect-notice">Redirecting to homepage...</p>
<button onClick={() => navigate('/')} className="continue-button">
Continue Now
</button>
</>
)}
{status === 'error' && (
<>
<div className="error-icon">&#10007;</div>
<h2>Verification Failed</h2>
<p>{message}</p>
<button onClick={() => navigate('/login/')} className="retry-button">
Go to Sign In
</button>
</>
)}
</div>
</div>
);
};
export default VerifyEmail;

View File

@ -36,14 +36,23 @@ interface RunPodJobResponse {
[key: string]: any [key: string]: any
} }
// Individual image entry in the history
interface GeneratedImage {
id: string
prompt: string
imageUrl: string
timestamp: number
}
type IImageGen = TLBaseShape< type IImageGen = TLBaseShape<
"ImageGen", "ImageGen",
{ {
w: number w: number
h: number h: number
prompt: string prompt: string
imageUrl: string | null imageHistory: GeneratedImage[] // Thread of all generated images (newest first)
isLoading: boolean isLoading: boolean
loadingPrompt: string | null // The prompt currently being generated
error: string | null error: string | null
endpointId?: string // Optional custom endpoint ID endpointId?: string // Optional custom endpoint ID
tags: string[] tags: string[]
@ -291,8 +300,9 @@ export class ImageGenShape extends BaseBoxShapeUtil<IImageGen> {
w: this.DEFAULT_WIDTH, w: this.DEFAULT_WIDTH,
h: this.DEFAULT_HEIGHT, h: this.DEFAULT_HEIGHT,
prompt: "", prompt: "",
imageUrl: null, imageHistory: [],
isLoading: false, isLoading: false,
loadingPrompt: null,
error: null, error: null,
tags: ['image', 'ai-generated'], tags: ['image', 'ai-generated'],
pinnedToView: false, pinnedToView: false,
@ -326,15 +336,15 @@ export class ImageGenShape extends BaseBoxShapeUtil<IImageGen> {
const generateImage = async (prompt: string) => { const generateImage = async (prompt: string) => {
console.log("🎨 ImageGen: Generating image with prompt:", prompt) console.log("🎨 ImageGen: Generating image with prompt:", prompt)
// Clear any previous errors // Store the prompt being used and clear any previous errors
editor.updateShape<IImageGen>({ editor.updateShape<IImageGen>({
id: shape.id, id: shape.id,
type: "ImageGen", type: "ImageGen",
props: { props: {
error: null, error: null,
isLoading: true, isLoading: true,
imageUrl: null loadingPrompt: prompt
}, },
}) })
@ -357,12 +367,25 @@ export class ImageGenShape extends BaseBoxShapeUtil<IImageGen> {
console.log("✅ ImageGen: Mock image generated:", mockImageUrl) console.log("✅ ImageGen: Mock image generated:", mockImageUrl)
// Get current shape to access existing history
const currentShape = editor.getShape<IImageGen>(shape.id)
const currentHistory = currentShape?.props.imageHistory || []
// Create new image entry
const newImage: GeneratedImage = {
id: `img-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`,
prompt: prompt,
imageUrl: mockImageUrl,
timestamp: Date.now()
}
editor.updateShape<IImageGen>({ editor.updateShape<IImageGen>({
id: shape.id, id: shape.id,
type: "ImageGen", type: "ImageGen",
props: { props: {
imageUrl: mockImageUrl, imageHistory: [newImage, ...currentHistory], // Prepend new image
isLoading: false, isLoading: false,
loadingPrompt: null,
error: null error: null
}, },
}) })
@ -438,12 +461,26 @@ export class ImageGenShape extends BaseBoxShapeUtil<IImageGen> {
if (imageUrl) { if (imageUrl) {
console.log('✅ ImageGen: Image generated successfully') console.log('✅ ImageGen: Image generated successfully')
// Get current shape to access existing history
const currentShape = editor.getShape<IImageGen>(shape.id)
const currentHistory = currentShape?.props.imageHistory || []
// Create new image entry
const newImage: GeneratedImage = {
id: `img-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`,
prompt: prompt,
imageUrl: imageUrl,
timestamp: Date.now()
}
editor.updateShape<IImageGen>({ editor.updateShape<IImageGen>({
id: shape.id, id: shape.id,
type: "ImageGen", type: "ImageGen",
props: { props: {
imageUrl: imageUrl, imageHistory: [newImage, ...currentHistory], // Prepend new image
isLoading: false, isLoading: false,
loadingPrompt: null,
error: null error: null
}, },
}) })
@ -505,6 +542,7 @@ export class ImageGenShape extends BaseBoxShapeUtil<IImageGen> {
type: "ImageGen", type: "ImageGen",
props: { props: {
isLoading: false, isLoading: false,
loadingPrompt: null,
error: userFriendlyError error: userFriendlyError
}, },
}) })
@ -583,93 +621,307 @@ export class ImageGenShape extends BaseBoxShapeUtil<IImageGen> {
overflow: 'auto', overflow: 'auto',
backgroundColor: '#fafafa' backgroundColor: '#fafafa'
}}> }}>
{/* Image Display */} {/* Image Thread - scrollable history of generated images */}
{shape.props.imageUrl && !shape.props.isLoading && ( <div
<div style={{
style={{ flex: 1,
flex: 1, display: 'flex',
display: "flex", flexDirection: 'column',
alignItems: "center", gap: '12px',
justifyContent: "center", overflow: 'auto',
backgroundColor: "#fff", minHeight: 0,
borderRadius: "6px", }}
overflow: "hidden", >
minHeight: 0, {/* Loading State - shown at top when generating */}
border: '1px solid #e0e0e0', {shape.props.isLoading && (
}}
>
<img
src={shape.props.imageUrl}
alt={shape.props.prompt || "Generated image"}
style={{
maxWidth: "100%",
maxHeight: "100%",
objectFit: "contain",
}}
onError={(_e) => {
console.error("❌ ImageGen: Failed to load image:", shape.props.imageUrl)
editor.updateShape<IImageGen>({
id: shape.id,
type: "ImageGen",
props: {
error: "Failed to load generated image",
imageUrl: null
},
})
}}
/>
</div>
)}
{/* Loading State */}
{shape.props.isLoading && (
<div
style={{
flex: 1,
display: "flex",
flexDirection: "column",
alignItems: "center",
justifyContent: "center",
backgroundColor: "#fff",
borderRadius: "6px",
gap: 12,
border: '1px solid #e0e0e0',
}}
>
<div <div
style={{ style={{
width: 40, display: "flex",
height: 40, flexDirection: "column",
border: "4px solid #f3f3f3", backgroundColor: "#fff",
borderTop: `4px solid ${ImageGenShape.PRIMARY_COLOR}`, borderRadius: "6px",
borderRadius: "50%", border: '1px solid #e0e0e0',
animation: "spin 1s linear infinite", overflow: 'hidden',
}} }}
/> >
<span style={{ color: "#666", fontSize: "14px" }}> <div
Generating image... style={{
</span> padding: '24px',
</div> display: "flex",
)} flexDirection: "column",
alignItems: "center",
justifyContent: "center",
gap: 12,
}}
>
<div
style={{
width: 40,
height: 40,
border: "4px solid #f3f3f3",
borderTop: `4px solid ${ImageGenShape.PRIMARY_COLOR}`,
borderRadius: "50%",
animation: "spin 1s linear infinite",
}}
/>
<span style={{ color: "#666", fontSize: "14px" }}>
Generating image...
</span>
</div>
{shape.props.loadingPrompt && (
<div
style={{
borderTop: '1px solid #e0e0e0',
padding: '8px 10px',
backgroundColor: '#f8f8f8',
fontSize: '11px',
color: '#666',
lineHeight: '1.3',
}}
>
<span style={{ fontWeight: 500, color: '#888' }}>Prompt: </span>
{shape.props.loadingPrompt}
</div>
)}
</div>
)}
{/* Empty State */} {/* Image History - each image as a card */}
{!shape.props.imageUrl && !shape.props.isLoading && !shape.props.error && ( {shape.props.imageHistory.map((image, index) => (
<div <div
style={{ key={image.id}
flex: 1, style={{
display: "flex", display: "flex",
alignItems: "center", flexDirection: "column",
justifyContent: "center", backgroundColor: "#fff",
backgroundColor: "#fff", borderRadius: "6px",
borderRadius: "6px", overflow: "hidden",
color: "#999", border: index === 0 && !shape.props.isLoading ? `2px solid ${ImageGenShape.PRIMARY_COLOR}` : '1px solid #e0e0e0',
fontSize: "14px", }}
border: '1px solid #e0e0e0', >
}} {/* Image */}
> <div
Generated image will appear here style={{
</div> display: "flex",
)} alignItems: "center",
justifyContent: "center",
overflow: "hidden",
maxHeight: index === 0 ? '300px' : '150px',
backgroundColor: '#fafafa',
}}
>
<img
src={image.imageUrl}
alt={image.prompt}
style={{
maxWidth: "100%",
maxHeight: "100%",
objectFit: "contain",
}}
onError={(_e) => {
console.error("❌ ImageGen: Failed to load image:", image.imageUrl)
// Remove this image from history
const newHistory = shape.props.imageHistory.filter(img => img.id !== image.id)
editor.updateShape<IImageGen>({
id: shape.id,
type: "ImageGen",
props: { imageHistory: newHistory },
})
}}
/>
</div>
{/* Prompt and action buttons */}
<div
style={{
borderTop: '1px solid #e0e0e0',
padding: '8px 10px',
backgroundColor: '#f8f8f8',
display: 'flex',
flexDirection: 'column',
gap: '6px',
}}
>
<div
style={{
fontSize: '11px',
color: '#666',
lineHeight: '1.3',
maxHeight: index === 0 ? '40px' : '24px',
overflow: 'auto',
wordBreak: 'break-word',
}}
title={image.prompt}
>
<span style={{ fontWeight: 500, color: '#888' }}>Prompt: </span>
{image.prompt}
</div>
<div
style={{
display: 'flex',
gap: '6px',
}}
>
<button
onClick={async (e) => {
e.stopPropagation()
try {
const imageUrl = image.imageUrl
if (!imageUrl) return
// For base64 images, convert directly
if (imageUrl.startsWith('data:')) {
const response = await fetch(imageUrl)
const blob = await response.blob()
await navigator.clipboard.write([
new ClipboardItem({ [blob.type]: blob })
])
} else {
// For URLs, fetch the image first
const response = await fetch(imageUrl)
const blob = await response.blob()
await navigator.clipboard.write([
new ClipboardItem({ [blob.type]: blob })
])
}
console.log('✅ ImageGen: Image copied to clipboard')
} catch (err) {
console.error('❌ ImageGen: Failed to copy image:', err)
// Fallback: copy the URL
await navigator.clipboard.writeText(image.imageUrl)
console.log('✅ ImageGen: Image URL copied to clipboard (fallback)')
}
}}
onPointerDown={(e) => e.stopPropagation()}
style={{
flex: 1,
padding: '6px 10px',
backgroundColor: '#fff',
border: '1px solid #ddd',
borderRadius: '4px',
cursor: 'pointer',
fontSize: '11px',
fontWeight: 500,
color: '#555',
display: 'flex',
alignItems: 'center',
justifyContent: 'center',
gap: '4px',
transition: 'background-color 0.15s',
}}
onMouseEnter={(e) => (e.currentTarget.style.backgroundColor = '#f0f0f0')}
onMouseLeave={(e) => (e.currentTarget.style.backgroundColor = '#fff')}
>
<span>📋</span> Copy
</button>
<button
onClick={(e) => {
e.stopPropagation()
const imageUrl = image.imageUrl
if (!imageUrl) return
// Create download link
const link = document.createElement('a')
link.href = imageUrl
// Generate filename from prompt
const promptSlug = (image.prompt || 'image')
.slice(0, 30)
.toLowerCase()
.replace(/[^a-z0-9]+/g, '-')
.replace(/^-|-$/g, '')
const timestamp = new Date(image.timestamp).toISOString().slice(0, 10)
link.download = `${promptSlug}-${timestamp}.png`
document.body.appendChild(link)
link.click()
document.body.removeChild(link)
console.log('✅ ImageGen: Image download initiated')
}}
onPointerDown={(e) => e.stopPropagation()}
style={{
flex: 1,
padding: '6px 10px',
backgroundColor: ImageGenShape.PRIMARY_COLOR,
border: 'none',
borderRadius: '4px',
cursor: 'pointer',
fontSize: '11px',
fontWeight: 500,
color: '#fff',
display: 'flex',
alignItems: 'center',
justifyContent: 'center',
gap: '4px',
transition: 'opacity 0.15s',
}}
onMouseEnter={(e) => (e.currentTarget.style.opacity = '0.9')}
onMouseLeave={(e) => (e.currentTarget.style.opacity = '1')}
>
<span></span> Download
</button>
{/* Delete button for history items */}
<button
onClick={(e) => {
e.stopPropagation()
const newHistory = shape.props.imageHistory.filter(img => img.id !== image.id)
editor.updateShape<IImageGen>({
id: shape.id,
type: "ImageGen",
props: { imageHistory: newHistory },
})
}}
onPointerDown={(e) => e.stopPropagation()}
style={{
padding: '6px 10px',
backgroundColor: '#fff',
border: '1px solid #ddd',
borderRadius: '4px',
cursor: 'pointer',
fontSize: '11px',
fontWeight: 500,
color: '#999',
display: 'flex',
alignItems: 'center',
justifyContent: 'center',
transition: 'background-color 0.15s, color 0.15s',
}}
onMouseEnter={(e) => {
e.currentTarget.style.backgroundColor = '#fee'
e.currentTarget.style.color = '#c33'
}}
onMouseLeave={(e) => {
e.currentTarget.style.backgroundColor = '#fff'
e.currentTarget.style.color = '#999'
}}
title="Remove from history"
>
<span>🗑</span>
</button>
</div>
</div>
</div>
))}
{/* Empty State */}
{shape.props.imageHistory.length === 0 && !shape.props.isLoading && !shape.props.error && (
<div
style={{
flex: 1,
display: "flex",
alignItems: "center",
justifyContent: "center",
backgroundColor: "#fff",
borderRadius: "6px",
color: "#999",
fontSize: "14px",
border: '1px solid #e0e0e0',
minHeight: '150px',
}}
>
Generated images will appear here
</div>
)}
</div>
{/* Input Section */} {/* Input Section */}
<div <div

View File

@ -1,5 +1,30 @@
import React, { useState } from 'react' import React, { useState, useCallback, useRef, useEffect } from 'react'
import MDEditor from '@uiw/react-md-editor' import {
MDXEditor,
headingsPlugin,
listsPlugin,
quotePlugin,
thematicBreakPlugin,
markdownShortcutPlugin,
linkPlugin,
linkDialogPlugin,
imagePlugin,
tablePlugin,
codeBlockPlugin,
codeMirrorPlugin,
diffSourcePlugin,
toolbarPlugin,
BoldItalicUnderlineToggles,
UndoRedo,
BlockTypeSelect,
CreateLink,
InsertTable,
ListsToggle,
Separator,
DiffSourceToggleWrapper,
type MDXEditorMethods,
} from '@mdxeditor/editor'
import '@mdxeditor/editor/style.css'
import { BaseBoxShapeUtil, TLBaseShape, HTMLContainer } from '@tldraw/tldraw' import { BaseBoxShapeUtil, TLBaseShape, HTMLContainer } from '@tldraw/tldraw'
import { StandardizedToolWrapper } from '../components/StandardizedToolWrapper' import { StandardizedToolWrapper } from '../components/StandardizedToolWrapper'
import { usePinnedToView } from '../hooks/usePinnedToView' import { usePinnedToView } from '../hooks/usePinnedToView'
@ -18,8 +43,8 @@ export type IMarkdownShape = TLBaseShape<
export class MarkdownShape extends BaseBoxShapeUtil<IMarkdownShape> { export class MarkdownShape extends BaseBoxShapeUtil<IMarkdownShape> {
static type = 'Markdown' as const static type = 'Markdown' as const
// Markdown theme color: Cyan/Teal (Rainbow) // Markdown theme color: Teal
static readonly PRIMARY_COLOR = "#06b6d4" static readonly PRIMARY_COLOR = "#14b8a6"
getDefaultProps(): IMarkdownShape['props'] { getDefaultProps(): IMarkdownShape['props'] {
return { return {
@ -33,8 +58,8 @@ export class MarkdownShape extends BaseBoxShapeUtil<IMarkdownShape> {
component(shape: IMarkdownShape) { component(shape: IMarkdownShape) {
const isSelected = this.editor.getSelectedShapeIds().includes(shape.id) const isSelected = this.editor.getSelectedShapeIds().includes(shape.id)
const markdownRef = React.useRef<HTMLDivElement>(null)
const [isMinimized, setIsMinimized] = useState(false) const [isMinimized, setIsMinimized] = useState(false)
const editorRef = useRef<MDXEditorMethods>(null)
// Use the pinning hook // Use the pinning hook
usePinnedToView(this.editor, shape.id, shape.props.pinnedToView) usePinnedToView(this.editor, shape.id, shape.props.pinnedToView)
@ -58,23 +83,7 @@ export class MarkdownShape extends BaseBoxShapeUtil<IMarkdownShape> {
}) })
} }
// Handler function for checkbox interactivity const handleChange = useCallback((newText: string) => {
const handleCheckboxClick = React.useCallback((event: Event) => {
event.stopPropagation()
const target = event.target as HTMLInputElement
const checked = target.checked
const text = shape.props.text
const lines = text.split('\n')
const checkboxRegex = /^\s*[-*+]\s+\[([ x])\]/
const newText = lines.map(line => {
if (line.includes(target.parentElement?.textContent || '')) {
return line.replace(checkboxRegex, `- [${checked ? 'x' : ' '}]`)
}
return line
}).join('\n')
this.editor.updateShape<IMarkdownShape>({ this.editor.updateShape<IMarkdownShape>({
id: shape.id, id: shape.id,
type: 'Markdown', type: 'Markdown',
@ -83,110 +92,18 @@ export class MarkdownShape extends BaseBoxShapeUtil<IMarkdownShape> {
text: newText, text: newText,
}, },
}) })
}, [shape.id, shape.props.text]) }, [shape.id, shape.props])
// Effect hook that handles checkbox interactivity // Sync external changes to editor
React.useEffect(() => { useEffect(() => {
if (!isSelected && markdownRef.current) { if (editorRef.current) {
const checkboxes = markdownRef.current.querySelectorAll('input[type="checkbox"]') const currentMarkdown = editorRef.current.getMarkdown()
checkboxes.forEach((checkbox) => { if (currentMarkdown !== shape.props.text) {
checkbox.removeAttribute('disabled') editorRef.current.setMarkdown(shape.props.text || '')
checkbox.addEventListener('click', handleCheckboxClick)
})
return () => {
if (markdownRef.current) {
const checkboxes = markdownRef.current.querySelectorAll('input[type="checkbox"]')
checkboxes.forEach((checkbox) => {
checkbox.removeEventListener('click', handleCheckboxClick)
})
}
} }
} }
}, [isSelected, shape.props.text, handleCheckboxClick]) }, [shape.props.text])
// Show MDEditor when selected
if (isSelected) {
return (
<HTMLContainer style={{ width: shape.props.w, height: shape.props.h }}>
<StandardizedToolWrapper
title="Markdown"
primaryColor={MarkdownShape.PRIMARY_COLOR}
isSelected={isSelected}
width={shape.props.w}
height={shape.props.h}
onClose={handleClose}
onMinimize={handleMinimize}
isMinimized={isMinimized}
editor={this.editor}
shapeId={shape.id}
isPinnedToView={shape.props.pinnedToView}
onPinToggle={handlePinToggle}
tags={shape.props.tags}
onTagsChange={(newTags) => {
this.editor.updateShape<IMarkdownShape>({
id: shape.id,
type: 'Markdown',
props: {
...shape.props,
tags: newTags,
}
})
}}
tagsEditable={true}
>
<div style={{
width: '100%',
height: '100%',
backgroundColor: '#FFFFFF',
pointerEvents: 'all',
overflow: 'hidden',
}}>
<MDEditor
value={shape.props.text}
onChange={(value = '') => {
this.editor.updateShape<IMarkdownShape>({
id: shape.id,
type: 'Markdown',
props: {
...shape.props,
text: value,
},
})
}}
preview='live'
visibleDragbar={false}
style={{
height: '100%',
border: 'none',
backgroundColor: 'transparent',
}}
previewOptions={{
style: {
padding: '8px',
backgroundColor: 'transparent',
}
}}
textareaProps={{
style: {
padding: '8px',
lineHeight: '1.5',
height: '100%',
resize: 'none',
backgroundColor: 'transparent',
}
}}
onPointerDown={(e) => {
e.stopPropagation()
}}
/>
</div>
</StandardizedToolWrapper>
</HTMLContainer>
)
}
// Show rendered markdown when not selected
return ( return (
<HTMLContainer style={{ width: shape.props.w, height: shape.props.h }}> <HTMLContainer style={{ width: shape.props.w, height: shape.props.h }}>
<StandardizedToolWrapper <StandardizedToolWrapper
@ -215,21 +132,289 @@ export class MarkdownShape extends BaseBoxShapeUtil<IMarkdownShape> {
}} }}
tagsEditable={true} tagsEditable={true}
> >
<div style={{ <div
width: '100%', style={{
height: '100%', width: '100%',
backgroundColor: '#FFFFFF', height: '100%',
pointerEvents: 'all', backgroundColor: '#FFFFFF',
overflow: 'auto', pointerEvents: 'all',
}}> overflow: 'hidden',
<div ref={markdownRef} style={{ width: '100%', height: '100%', padding: '12px' }}> display: 'flex',
{shape.props.text ? ( flexDirection: 'column',
<MDEditor.Markdown source={shape.props.text} /> flex: 1,
) : ( minHeight: 0,
<span style={{ opacity: 0.5 }}>Click to edit markdown...</span> }}
)} onPointerDown={(e) => e.stopPropagation()}
</div> onWheel={(e) => e.stopPropagation()}
>
<MDXEditor
ref={editorRef}
markdown={shape.props.text || ''}
onChange={handleChange}
contentEditableClassName="mdx-editor-content"
plugins={[
// Core formatting
headingsPlugin(),
listsPlugin(),
quotePlugin(),
thematicBreakPlugin(),
linkPlugin(),
linkDialogPlugin(),
// Tables
tablePlugin(),
// Code blocks with syntax highlighting
codeBlockPlugin({ defaultCodeBlockLanguage: 'javascript' }),
codeMirrorPlugin({
codeBlockLanguages: {
js: 'JavaScript',
javascript: 'JavaScript',
ts: 'TypeScript',
typescript: 'TypeScript',
jsx: 'JSX',
tsx: 'TSX',
css: 'CSS',
html: 'HTML',
json: 'JSON',
python: 'Python',
py: 'Python',
bash: 'Bash',
sh: 'Shell',
sql: 'SQL',
md: 'Markdown',
yaml: 'YAML',
go: 'Go',
rust: 'Rust',
'': 'Plain Text',
}
}),
// Images (with placeholder for now)
imagePlugin({
imageUploadHandler: async () => {
// Return a placeholder - can be extended to support actual uploads
return Promise.resolve('https://via.placeholder.com/400x300')
},
}),
// Markdown shortcuts (type # for heading, * for list, etc.)
markdownShortcutPlugin(),
// Source mode toggle (rich-text vs raw markdown)
diffSourcePlugin({
viewMode: 'rich-text',
diffMarkdown: shape.props.text || '',
}),
// Toolbar
toolbarPlugin({
toolbarContents: () => (
<>
<UndoRedo />
<Separator />
<BoldItalicUnderlineToggles />
<Separator />
<BlockTypeSelect />
<Separator />
<ListsToggle />
<Separator />
<CreateLink />
<InsertTable />
<Separator />
<DiffSourceToggleWrapper>
<></>
</DiffSourceToggleWrapper>
</>
)
}),
]}
/>
</div> </div>
{/* Custom styles for the MDXEditor */}
<style>{`
.mdxeditor {
height: 100%;
display: flex;
flex-direction: column;
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
}
.mdxeditor [role="toolbar"] {
flex-shrink: 0;
border-bottom: 1px solid #e5e7eb;
background: #f9fafb;
padding: 4px 8px;
gap: 2px;
flex-wrap: wrap;
}
.mdxeditor [role="toolbar"] button {
padding: 4px 6px;
border-radius: 4px;
font-size: 12px;
}
.mdxeditor [role="toolbar"] button:hover {
background: #e5e7eb;
}
.mdxeditor [role="toolbar"] button[data-state="on"] {
background: ${MarkdownShape.PRIMARY_COLOR}20;
color: ${MarkdownShape.PRIMARY_COLOR};
}
.mdxeditor .mdxeditor-root-contenteditable {
flex: 1;
overflow-y: auto;
padding: 12px 16px;
min-height: 0;
}
.mdx-editor-content {
min-height: 100%;
height: 100%;
outline: none;
}
/* Ensure the editor fills the full available height */
.mdxeditor .mdxeditor-root-contenteditable > div {
min-height: 100%;
}
.mdx-editor-content h1 {
font-size: 1.75em;
font-weight: 700;
margin: 0.5em 0 0.25em;
color: #111827;
}
.mdx-editor-content h2 {
font-size: 1.5em;
font-weight: 600;
margin: 0.5em 0 0.25em;
color: #1f2937;
}
.mdx-editor-content h3 {
font-size: 1.25em;
font-weight: 600;
margin: 0.5em 0 0.25em;
color: #374151;
}
.mdx-editor-content p {
margin: 0.5em 0;
line-height: 1.6;
}
.mdx-editor-content ul, .mdx-editor-content ol {
margin: 0.5em 0;
padding-left: 1.5em;
}
.mdx-editor-content li {
margin: 0.25em 0;
}
.mdx-editor-content blockquote {
border-left: 3px solid ${MarkdownShape.PRIMARY_COLOR};
margin: 0.5em 0;
padding: 0.5em 1em;
background: #f3f4f6;
border-radius: 0 4px 4px 0;
}
.mdx-editor-content code {
background: #f3f4f6;
padding: 0.15em 0.4em;
border-radius: 3px;
font-family: 'SF Mono', Monaco, 'Courier New', monospace;
font-size: 0.9em;
}
.mdx-editor-content pre {
background: #1e1e2e;
color: #cdd6f4;
padding: 12px 16px;
border-radius: 6px;
overflow-x: auto;
margin: 0.5em 0;
}
.mdx-editor-content pre code {
background: none;
padding: 0;
color: inherit;
}
.mdx-editor-content a {
color: ${MarkdownShape.PRIMARY_COLOR};
text-decoration: underline;
}
.mdx-editor-content table {
border-collapse: collapse;
width: 100%;
margin: 0.5em 0;
}
.mdx-editor-content th, .mdx-editor-content td {
border: 1px solid #e5e7eb;
padding: 8px 12px;
text-align: left;
}
.mdx-editor-content th {
background: #f9fafb;
font-weight: 600;
}
.mdx-editor-content hr {
border: none;
border-top: 1px solid #e5e7eb;
margin: 1em 0;
}
.mdx-editor-content img {
max-width: 100%;
height: auto;
border-radius: 4px;
}
/* Source mode styling */
.mdxeditor-source-editor {
flex: 1;
overflow: auto;
}
.mdxeditor-source-editor .cm-editor {
height: 100%;
}
.mdxeditor-source-editor .cm-scroller {
padding: 12px 16px;
font-family: 'SF Mono', Monaco, 'Courier New', monospace;
font-size: 13px;
line-height: 1.5;
}
/* Diff source toggle button styling */
.mdxeditor [role="toolbar"] select {
padding: 4px 8px;
border-radius: 4px;
border: 1px solid #e5e7eb;
background: white;
font-size: 12px;
cursor: pointer;
}
/* Block type select */
.mdxeditor [data-radix-popper-content-wrapper] {
z-index: 100000 !important;
}
`}</style>
</StandardizedToolWrapper> </StandardizedToolWrapper>
</HTMLContainer> </HTMLContainer>
) )
@ -239,16 +424,9 @@ export class MarkdownShape extends BaseBoxShapeUtil<IMarkdownShape> {
return <rect width={shape.props.w} height={shape.props.h} /> return <rect width={shape.props.w} height={shape.props.h} />
} }
// Add handlers for better interaction
override onDoubleClick = (shape: IMarkdownShape) => { override onDoubleClick = (shape: IMarkdownShape) => {
const textarea = document.querySelector(`[data-shape-id="${shape.id}"] textarea`) as HTMLTextAreaElement // Focus the editor on double-click
textarea?.focus() const editorElement = document.querySelector(`[data-shape-id="${shape.id}"] .mdxeditor [contenteditable="true"]`) as HTMLElement
} editorElement?.focus()
onPointerDown = (shape: IMarkdownShape) => {
if (!shape.props.text) {
const textarea = document.querySelector(`[data-shape-id="${shape.id}"] textarea`) as HTMLTextAreaElement
textarea?.focus()
}
} }
} }

File diff suppressed because it is too large Load Diff

View File

@ -5,7 +5,7 @@ import {
Rectangle2d, Rectangle2d,
TLBaseShape, TLBaseShape,
} from "tldraw" } from "tldraw"
import React, { useState } from "react" import React, { useState, useRef, useEffect } from "react"
import { getRunPodVideoConfig } from "@/lib/clientConfig" import { getRunPodVideoConfig } from "@/lib/clientConfig"
import { StandardizedToolWrapper } from "@/components/StandardizedToolWrapper" import { StandardizedToolWrapper } from "@/components/StandardizedToolWrapper"
import { usePinnedToView } from "@/hooks/usePinnedToView" import { usePinnedToView } from "@/hooks/usePinnedToView"
@ -28,6 +28,8 @@ type IVideoGen = TLBaseShape<
w: number w: number
h: number h: number
prompt: string prompt: string
imageUrl: string // Input image URL for I2V generation
imageBase64: string // Uploaded image as base64 for I2V generation
videoUrl: string | null videoUrl: string | null
isLoading: boolean isLoading: boolean
error: string | null error: string | null
@ -47,13 +49,15 @@ export class VideoGenShape extends BaseBoxShapeUtil<IVideoGen> {
getDefaultProps(): IVideoGen['props'] { getDefaultProps(): IVideoGen['props'] {
return { return {
w: 500, w: 500,
h: 450, h: 540,
prompt: "", prompt: "",
imageUrl: "", // Input image URL for I2V generation
imageBase64: "", // Uploaded image as base64
videoUrl: null, videoUrl: null,
isLoading: false, isLoading: false,
error: null, error: null,
duration: 3, duration: 3,
model: "wan2.1-i2v", model: "wan2.2",
tags: ['video', 'ai-generated'], tags: ['video', 'ai-generated'],
pinnedToView: false pinnedToView: false
} }
@ -71,13 +75,33 @@ export class VideoGenShape extends BaseBoxShapeUtil<IVideoGen> {
component(shape: IVideoGen) { component(shape: IVideoGen) {
// Capture editor reference to avoid stale 'this' during drag operations // Capture editor reference to avoid stale 'this' during drag operations
const editor = this.editor const editor = this.editor
// Debug: log what's in shape props on each render
console.log('🎬 VideoGen render - shape.props.videoUrl:', shape.props.videoUrl?.substring(0, 80) || 'null')
const [prompt, setPrompt] = useState(shape.props.prompt) const [prompt, setPrompt] = useState(shape.props.prompt)
const [imageUrl, setImageUrl] = useState(shape.props.imageUrl)
const [imageBase64, setImageBase64] = useState(shape.props.imageBase64)
const [isGenerating, setIsGenerating] = useState(shape.props.isLoading) const [isGenerating, setIsGenerating] = useState(shape.props.isLoading)
const [error, setError] = useState<string | null>(shape.props.error) const [error, setError] = useState<string | null>(shape.props.error)
const [videoUrl, setVideoUrl] = useState<string | null>(shape.props.videoUrl) const [videoUrl, setVideoUrl] = useState<string | null>(shape.props.videoUrl)
const [isMinimized, setIsMinimized] = useState(false) const [isMinimized, setIsMinimized] = useState(false)
const fileInputRef = useRef<HTMLInputElement>(null)
const isSelected = editor.getSelectedShapeIds().includes(shape.id) const isSelected = editor.getSelectedShapeIds().includes(shape.id)
// Determine mode based on whether an image is provided
const hasImage = imageUrl.trim() || imageBase64
const mode = hasImage ? 'i2v' : 't2v'
// Sync video URL from shape props when it changes externally
// This ensures the displayed video matches the shape's stored videoUrl
useEffect(() => {
if (shape.props.videoUrl !== videoUrl) {
console.log('🎬 VideoGen: Syncing videoUrl from shape props:', shape.props.videoUrl?.substring(0, 50))
setVideoUrl(shape.props.videoUrl)
}
}, [shape.props.videoUrl])
// Pin to view functionality // Pin to view functionality
usePinnedToView(editor, shape.id, shape.props.pinnedToView) usePinnedToView(editor, shape.id, shape.props.pinnedToView)
@ -89,12 +113,52 @@ export class VideoGenShape extends BaseBoxShapeUtil<IVideoGen> {
}) })
} }
// Handle file upload
const handleFileUpload = (e: React.ChangeEvent<HTMLInputElement>) => {
const file = e.target.files?.[0]
if (!file) return
// Validate file type
if (!file.type.startsWith('image/')) {
setError('Please upload an image file (JPEG, PNG, etc.)')
return
}
// Validate file size (max 10MB)
if (file.size > 10 * 1024 * 1024) {
setError('Image must be less than 10MB')
return
}
const reader = new FileReader()
reader.onload = (event) => {
const base64 = event.target?.result as string
setImageBase64(base64)
setImageUrl('') // Clear URL if uploading
setError(null)
}
reader.onerror = () => {
setError('Failed to read image file')
}
reader.readAsDataURL(file)
}
const handleGenerate = async () => { const handleGenerate = async () => {
if (!prompt.trim()) { if (!prompt.trim()) {
setError("Please enter a prompt") setError("Please enter a prompt describing the video")
return return
} }
// Validate image URL if provided
if (imageUrl.trim()) {
try {
new URL(imageUrl)
} catch {
setError("Please enter a valid image URL (must start with http:// or https://)")
return
}
}
// Check RunPod config // Check RunPod config
const runpodConfig = getRunPodVideoConfig() const runpodConfig = getRunPodVideoConfig()
if (!runpodConfig) { if (!runpodConfig) {
@ -102,16 +166,32 @@ export class VideoGenShape extends BaseBoxShapeUtil<IVideoGen> {
return return
} }
console.log('🎬 VideoGen: Starting generation with prompt:', prompt) const currentMode = (imageUrl.trim() || imageBase64) ? 'i2v' : 't2v'
console.log(`🎬 VideoGen: Starting ${currentMode.toUpperCase()} generation`)
console.log('🎬 VideoGen: Prompt:', prompt)
if (currentMode === 'i2v') {
console.log('🎬 VideoGen: Image source:', imageUrl ? 'URL' : 'Uploaded')
}
// Clear any existing video and set loading state
setIsGenerating(true) setIsGenerating(true)
setError(null) setError(null)
setVideoUrl(null) // Clear old video immediately
// Update shape to show loading state // Update shape to show loading state and clear old video
editor.updateShape({ const currentShape = editor.getShape(shape.id) as IVideoGen | undefined
id: shape.id, if (currentShape) {
type: shape.type, editor.updateShape({
props: { ...shape.props, isLoading: true, error: null } id: shape.id,
}) type: shape.type,
props: {
...currentShape.props,
isLoading: true,
error: null,
videoUrl: null // Clear old video from shape props
}
})
}
try { try {
const { apiKey, endpointId } = runpodConfig const { apiKey, endpointId } = runpodConfig
@ -123,29 +203,45 @@ export class VideoGenShape extends BaseBoxShapeUtil<IVideoGen> {
// Generate a random seed for reproducibility // Generate a random seed for reproducibility
const seed = Math.floor(Math.random() * 2147483647) const seed = Math.floor(Math.random() * 2147483647)
// ComfyUI workflow parameters required by the Wan2.1 handler // Wan2.2 parameters
// Note: Portrait orientation (480x832) works better than landscape
// Length is in frames: 81 frames ≈ 3 seconds at ~27fps output
const framesPerSecond = 27 // Wan2.2 output fps
const frameLength = Math.min(Math.max(shape.props.duration * framesPerSecond, 41), 121) // 41-121 frames supported
// Build input payload based on mode
const inputPayload: Record<string, any> = {
prompt: prompt,
negative_prompt: "blurry, distorted, low quality, static, frozen",
width: 480, // Portrait width (Wan2.2 optimal)
height: 832, // Portrait height (Wan2.2 optimal)
length: frameLength, // Total frames (81 ≈ 3 seconds)
steps: 10, // Inference steps (10 is optimal for speed/quality)
cfg: 2.0, // CFG scale - lower works better for Wan2.2
seed: seed,
context_overlap: 48, // Frame overlap for temporal consistency
}
// Add image for I2V mode
if (currentMode === 'i2v') {
if (imageUrl.trim()) {
inputPayload.image_url = imageUrl
} else if (imageBase64) {
// Strip data URL prefix if present, send just the base64
const base64Data = imageBase64.includes(',')
? imageBase64.split(',')[1]
: imageBase64
inputPayload.image = base64Data
}
}
const response = await fetch(runUrl, { const response = await fetch(runUrl, {
method: 'POST', method: 'POST',
headers: { headers: {
'Authorization': `Bearer ${apiKey}`, 'Authorization': `Bearer ${apiKey}`,
'Content-Type': 'application/json' 'Content-Type': 'application/json'
}, },
body: JSON.stringify({ body: JSON.stringify({ input: inputPayload })
input: {
prompt: prompt,
duration: shape.props.duration,
model: shape.props.model,
seed: seed,
cfg: 6.0, // CFG scale - guidance strength
steps: 30, // Inference steps
width: 832, // Video width (Wan2.1 optimal)
height: 480, // Video height (Wan2.1 optimal)
fps: 16, // Frames per second
num_frames: shape.props.duration * 16, // Total frames based on duration
denoise: 1.0, // Full denoising for text-to-video
scheduler: "euler", // Sampler scheduler
}
})
}) })
if (!response.ok) { if (!response.ok) {
@ -187,35 +283,60 @@ export class VideoGenShape extends BaseBoxShapeUtil<IVideoGen> {
console.log(`🎬 VideoGen: Poll ${attempts}/${maxAttempts}, status:`, statusData.status) console.log(`🎬 VideoGen: Poll ${attempts}/${maxAttempts}, status:`, statusData.status)
if (statusData.status === 'COMPLETED') { if (statusData.status === 'COMPLETED') {
// Extract video URL from output // Extract video from output - can be URL or base64 data
let url = '' let videoData = ''
if (typeof statusData.output === 'string') { if (typeof statusData.output === 'string') {
url = statusData.output // Direct string output - could be URL or base64
videoData = statusData.output
} else if (statusData.output?.video) {
// Base64 video data in output.video field
videoData = statusData.output.video
} else if (statusData.output?.video_url) { } else if (statusData.output?.video_url) {
url = statusData.output.video_url videoData = statusData.output.video_url
} else if (statusData.output?.url) { } else if (statusData.output?.url) {
url = statusData.output.url videoData = statusData.output.url
} }
if (url) { if (videoData) {
console.log('✅ VideoGen: Generation complete, URL:', url) // Check if it's base64 data (doesn't start with http)
setVideoUrl(url) let finalUrl = videoData
if (!videoData.startsWith('http') && !videoData.startsWith('data:')) {
// Convert base64 to data URL
finalUrl = `data:video/mp4;base64,${videoData}`
console.log('✅ VideoGen: Generation complete, converted base64 to data URL')
console.log('✅ VideoGen: Base64 length:', videoData.length, 'chars')
} else {
console.log('✅ VideoGen: Generation complete, URL:', finalUrl.substring(0, 100))
}
// Log the data URL prefix to verify format
console.log('✅ VideoGen: Final URL prefix:', finalUrl.substring(0, 50))
// Update local state immediately
setVideoUrl(finalUrl)
setIsGenerating(false) setIsGenerating(false)
editor.updateShape({ // Get fresh shape data to avoid stale props
id: shape.id, const currentShape = editor.getShape(shape.id)
type: shape.type, if (currentShape) {
props: { editor.updateShape({
...shape.props, id: shape.id,
videoUrl: url, type: shape.type,
isLoading: false, props: {
prompt: prompt ...(currentShape as IVideoGen).props,
} videoUrl: finalUrl,
}) isLoading: false,
prompt: prompt,
imageUrl: imageUrl,
imageBase64: imageBase64
}
})
}
return return
} else { } else {
console.log('⚠️ VideoGen: Completed but no video URL in output:', statusData.output) console.log('⚠️ VideoGen: Completed but no video in output:', JSON.stringify(statusData.output))
throw new Error('Video generation completed but no video URL returned') throw new Error('Video generation completed but no video data returned')
} }
} else if (statusData.status === 'FAILED') { } else if (statusData.status === 'FAILED') {
throw new Error(statusData.error || 'Video generation failed') throw new Error(statusData.error || 'Video generation failed')
@ -258,7 +379,7 @@ export class VideoGenShape extends BaseBoxShapeUtil<IVideoGen> {
return ( return (
<HTMLContainer id={shape.id}> <HTMLContainer id={shape.id}>
<StandardizedToolWrapper <StandardizedToolWrapper
title="🎬 Video Generator (Wan2.1)" title="🎬 Video Generator (Wan2.2)"
primaryColor={VideoGenShape.PRIMARY_COLOR} primaryColor={VideoGenShape.PRIMARY_COLOR}
isSelected={isSelected} isSelected={isSelected}
width={shape.props.w} width={shape.props.w}
@ -300,20 +421,155 @@ export class VideoGenShape extends BaseBoxShapeUtil<IVideoGen> {
}}> }}>
{!videoUrl && ( {!videoUrl && (
<> <>
<div style={{ display: 'flex', flexDirection: 'column', gap: '8px' }}> {/* Mode indicator */}
<div style={{
display: 'flex',
alignItems: 'center',
gap: '8px',
padding: '8px 12px',
backgroundColor: mode === 'i2v' ? '#e8f4fd' : '#f0e8fd',
borderRadius: '6px',
fontSize: '12px',
color: mode === 'i2v' ? '#1976d2' : '#7c3aed'
}}>
<span style={{ fontWeight: '600' }}>
{mode === 'i2v' ? '🖼️ Image-to-Video' : '✨ Text-to-Video'}
</span>
<span style={{ opacity: 0.8 }}>
{mode === 'i2v' ? '(animates your image)' : '(generates from text only)'}
</span>
</div>
{/* Image Input Section */}
<div style={{ display: 'flex', flexDirection: 'column', gap: '6px' }}>
<label style={{ color: '#555', fontSize: '12px', fontWeight: '600' }}> <label style={{ color: '#555', fontSize: '12px', fontWeight: '600' }}>
Video Prompt Source Image (optional)
</label>
{/* Image preview or upload area */}
{(imageUrl || imageBase64) ? (
<div style={{ position: 'relative' }}>
<img
src={imageBase64 || imageUrl}
alt="Preview"
style={{
width: '100%',
maxHeight: '100px',
objectFit: 'contain',
borderRadius: '6px',
backgroundColor: '#f5f5f5'
}}
onError={(e) => {
(e.target as HTMLImageElement).style.display = 'none'
setError('Failed to load image from URL')
}}
/>
<button
onClick={() => {
setImageUrl('')
setImageBase64('')
}}
onPointerDown={(e) => e.stopPropagation()}
disabled={isGenerating}
style={{
position: 'absolute',
top: '4px',
right: '4px',
width: '24px',
height: '24px',
borderRadius: '50%',
border: 'none',
backgroundColor: 'rgba(0,0,0,0.6)',
color: '#fff',
cursor: 'pointer',
fontSize: '14px',
display: 'flex',
alignItems: 'center',
justifyContent: 'center'
}}
>
×
</button>
</div>
) : (
<div style={{ display: 'flex', gap: '8px' }}>
{/* Upload button */}
<button
onClick={() => fileInputRef.current?.click()}
onPointerDown={(e) => e.stopPropagation()}
disabled={isGenerating}
style={{
flex: 1,
padding: '12px',
backgroundColor: '#f5f5f5',
border: '2px dashed #ccc',
borderRadius: '6px',
cursor: 'pointer',
fontSize: '12px',
color: '#666',
display: 'flex',
alignItems: 'center',
justifyContent: 'center',
gap: '6px'
}}
>
📤 Upload Image
</button>
<input
ref={fileInputRef}
type="file"
accept="image/*"
onChange={handleFileUpload}
style={{ display: 'none' }}
/>
</div>
)}
{/* URL input (collapsible) */}
{!imageBase64 && (
<input
type="url"
value={imageUrl}
onChange={(e) => {
setImageUrl(e.target.value)
setImageBase64('')
}}
placeholder="Or paste image URL..."
disabled={isGenerating}
onPointerDown={(e) => e.stopPropagation()}
onMouseDown={(e) => e.stopPropagation()}
style={{
width: '100%',
padding: '8px 10px',
backgroundColor: '#fff',
color: '#333',
border: '1px solid #ddd',
borderRadius: '6px',
fontSize: '12px',
boxSizing: 'border-box'
}}
/>
)}
</div>
{/* Prompt */}
<div style={{ display: 'flex', flexDirection: 'column', gap: '6px' }}>
<label style={{ color: '#555', fontSize: '12px', fontWeight: '600' }}>
{mode === 'i2v' ? 'Motion Prompt *' : 'Video Prompt *'}
</label> </label>
<textarea <textarea
value={prompt} value={prompt}
onChange={(e) => setPrompt(e.target.value)} onChange={(e) => setPrompt(e.target.value)}
placeholder="Describe the video you want to generate..." placeholder={mode === 'i2v'
? "Describe the motion (e.g., 'gentle camera pan, wind blowing')"
: "Describe the video scene (e.g., 'a cat walking through a forest')"
}
disabled={isGenerating} disabled={isGenerating}
onPointerDown={(e) => e.stopPropagation()} onPointerDown={(e) => e.stopPropagation()}
onMouseDown={(e) => e.stopPropagation()} onMouseDown={(e) => e.stopPropagation()}
style={{ style={{
width: '100%', width: '100%',
minHeight: '80px', minHeight: '50px',
padding: '10px', padding: '10px',
backgroundColor: '#fff', backgroundColor: '#fff',
color: '#333', color: '#333',
@ -334,8 +590,8 @@ export class VideoGenShape extends BaseBoxShapeUtil<IVideoGen> {
</label> </label>
<input <input
type="number" type="number"
min="1" min="2"
max="10" max="4"
value={shape.props.duration} value={shape.props.duration}
onChange={(e) => { onChange={(e) => {
editor.updateShape({ editor.updateShape({
@ -379,7 +635,7 @@ export class VideoGenShape extends BaseBoxShapeUtil<IVideoGen> {
opacity: isGenerating || !prompt.trim() ? 0.6 : 1 opacity: isGenerating || !prompt.trim() ? 0.6 : 1
}} }}
> >
{isGenerating ? 'Generating...' : 'Generate Video'} {isGenerating ? 'Generating...' : (mode === 'i2v' ? 'Animate Image' : 'Generate Video')}
</button> </button>
</div> </div>
@ -406,10 +662,16 @@ export class VideoGenShape extends BaseBoxShapeUtil<IVideoGen> {
color: '#666', color: '#666',
lineHeight: '1.5' lineHeight: '1.5'
}}> }}>
<div><strong>Note:</strong> Video generation uses RunPod GPU</div> <div><strong>Wan2.2 Video Generation</strong></div>
<div>Cost: ~$0.50 per video | Processing: 1-5 minutes</div> <div>
{mode === 'i2v'
? 'Animates your image based on the motion prompt'
: 'Creates video from your text description'
}
</div>
<div style={{ marginTop: '4px' }}>Output: 480x832 portrait | ~3 seconds</div>
<div style={{ marginTop: '4px', opacity: 0.8 }}> <div style={{ marginTop: '4px', opacity: 0.8 }}>
First request may take longer due to GPU cold start Processing: 2-6 minutes (includes GPU warm-up)
</div> </div>
</div> </div>
</> </>
@ -418,11 +680,14 @@ export class VideoGenShape extends BaseBoxShapeUtil<IVideoGen> {
{videoUrl && ( {videoUrl && (
<> <>
<video <video
key={videoUrl.substring(0, 100)} // Force reload when URL changes
src={videoUrl} src={videoUrl}
controls controls
autoPlay autoPlay
loop loop
onPointerDown={(e) => e.stopPropagation()} onPointerDown={(e) => e.stopPropagation()}
onLoadedData={() => console.log('🎬 VideoGen: Video loaded successfully')}
onError={(e) => console.error('🎬 VideoGen: Video load error:', e)}
style={{ style={{
width: '100%', width: '100%',
maxHeight: '280px', maxHeight: '280px',
@ -447,10 +712,12 @@ export class VideoGenShape extends BaseBoxShapeUtil<IVideoGen> {
onClick={() => { onClick={() => {
setVideoUrl(null) setVideoUrl(null)
setPrompt("") setPrompt("")
setImageUrl("")
setImageBase64("")
editor.updateShape({ editor.updateShape({
id: shape.id, id: shape.id,
type: shape.type, type: shape.type,
props: { ...shape.props, videoUrl: null, prompt: "" } props: { ...shape.props, videoUrl: null, prompt: "", imageUrl: "", imageBase64: "" }
}) })
}} }}
onPointerDown={(e) => e.stopPropagation()} onPointerDown={(e) => e.stopPropagation()}

View File

@ -226,23 +226,24 @@ export function CustomContextMenu(props: TLUiContextMenuProps) {
</TldrawUiMenuSubmenu> </TldrawUiMenuSubmenu>
</TldrawUiMenuGroup> </TldrawUiMenuGroup>
{/* Creation Tools Group */} {/* Creation Tools Group - Always available regardless of selection */}
<TldrawUiMenuGroup id="creation-tools"> <TldrawUiMenuGroup id="creation-tools">
<TldrawUiMenuItem {...tools.VideoChat} disabled={hasSelection} /> <TldrawUiMenuSubmenu id="tools-dropdown" label="Create Tool">
<TldrawUiMenuItem {...tools.ChatBox} disabled={hasSelection} /> <TldrawUiMenuItem {...tools.Prompt} />
<TldrawUiMenuItem {...tools.Embed} disabled={hasSelection} /> <TldrawUiMenuItem {...tools.ChatBox} />
<TldrawUiMenuItem {...tools.SlideShape} disabled={hasSelection} /> <TldrawUiMenuItem {...tools.ImageGen} />
<TldrawUiMenuItem {...tools.Markdown} disabled={hasSelection} /> <TldrawUiMenuItem {...tools.VideoGen} />
<TldrawUiMenuItem {...tools.MycrozineTemplate} disabled={hasSelection} /> <TldrawUiMenuItem {...tools.Markdown} />
<TldrawUiMenuItem {...tools.Prompt} disabled={hasSelection} /> <TldrawUiMenuItem {...tools.ObsidianNote} />
<TldrawUiMenuItem {...tools.ObsidianNote} disabled={hasSelection} /> <TldrawUiMenuItem {...tools.Transcription} />
<TldrawUiMenuItem {...tools.Transcription} disabled={hasSelection} /> <TldrawUiMenuItem {...tools.Embed} />
<TldrawUiMenuItem {...tools.FathomMeetings} disabled={hasSelection} /> <TldrawUiMenuItem {...tools.Holon} />
<TldrawUiMenuItem {...tools.Holon} disabled={hasSelection} /> <TldrawUiMenuItem {...tools.Multmux} />
<TldrawUiMenuItem {...tools.ImageGen} disabled={hasSelection} /> <TldrawUiMenuItem {...tools.SlideShape} />
<TldrawUiMenuItem {...tools.VideoGen} disabled={hasSelection} /> <TldrawUiMenuItem {...tools.VideoChat} />
<TldrawUiMenuItem {...tools.Multmux} disabled={hasSelection} /> <TldrawUiMenuItem {...tools.FathomMeetings} />
{/* MycelialIntelligence moved to permanent UI bar */} <TldrawUiMenuItem {...tools.MycrozineTemplate} />
</TldrawUiMenuSubmenu>
</TldrawUiMenuGroup> </TldrawUiMenuGroup>
{/* Collections Group */} {/* Collections Group */}

View File

@ -15,8 +15,17 @@ import { createShapeId } from "tldraw"
import type { ObsidianObsNote } from "../lib/obsidianImporter" import type { ObsidianObsNote } from "../lib/obsidianImporter"
import { HolonData } from "../lib/HoloSphereService" import { HolonData } from "../lib/HoloSphereService"
import { FathomMeetingsPanel } from "../components/FathomMeetingsPanel" import { FathomMeetingsPanel } from "../components/FathomMeetingsPanel"
import { isFathomApiKeyConfigured } from "../lib/fathomApiKey" import { getFathomApiKey, saveFathomApiKey, removeFathomApiKey, isFathomApiKeyConfigured } from "../lib/fathomApiKey"
import { UserSettingsModal } from "./UserSettingsModal"
// AI tool model configurations for the dropdown
const AI_TOOLS = [
{ id: 'chat', name: 'Chat', icon: '💬', model: 'llama3.1:8b', provider: 'Ollama', type: 'local' },
{ id: 'make-real', name: 'Make Real', icon: '🔧', model: 'claude-sonnet-4-5', provider: 'Anthropic', type: 'cloud' },
{ id: 'image-gen', name: 'Image Gen', icon: '🎨', model: 'SDXL', provider: 'RunPod', type: 'gpu' },
{ id: 'video-gen', name: 'Video Gen', icon: '🎬', model: 'Wan2.1', provider: 'RunPod', type: 'gpu' },
{ id: 'transcription', name: 'Transcribe', icon: '🎤', model: 'Web Speech', provider: 'Browser', type: 'local' },
{ id: 'mycelial', name: 'Mycelial', icon: '🍄', model: 'llama3.1:70b', provider: 'Ollama', type: 'local' },
]
// Dark mode utilities // Dark mode utilities
const getDarkMode = (): boolean => { const getDarkMode = (): boolean => {
@ -42,7 +51,6 @@ export function CustomToolbar() {
const { session, setSession, clearSession } = useAuth() const { session, setSession, clearSession } = useAuth()
const [showProfilePopup, setShowProfilePopup] = useState(false) const [showProfilePopup, setShowProfilePopup] = useState(false)
const [showSettingsModal, setShowSettingsModal] = useState(false)
const [showVaultBrowser, setShowVaultBrowser] = useState(false) const [showVaultBrowser, setShowVaultBrowser] = useState(false)
const [showHolonBrowser, setShowHolonBrowser] = useState(false) const [showHolonBrowser, setShowHolonBrowser] = useState(false)
const [vaultBrowserMode, setVaultBrowserMode] = useState<'keyboard' | 'button'>('keyboard') const [vaultBrowserMode, setVaultBrowserMode] = useState<'keyboard' | 'button'>('keyboard')
@ -50,11 +58,24 @@ export function CustomToolbar() {
const profilePopupRef = useRef<HTMLDivElement>(null) const profilePopupRef = useRef<HTMLDivElement>(null)
const [isDarkMode, setIsDarkMode] = useState(getDarkMode()) const [isDarkMode, setIsDarkMode] = useState(getDarkMode())
// Dropdown section states
const [expandedSection, setExpandedSection] = useState<'none' | 'ai' | 'integrations'>('none')
const [hasFathomApiKey, setHasFathomApiKey] = useState(false)
const [showFathomInput, setShowFathomInput] = useState(false)
const [fathomKeyInput, setFathomKeyInput] = useState('')
// Initialize dark mode on mount // Initialize dark mode on mount
useEffect(() => { useEffect(() => {
setDarkMode(isDarkMode) setDarkMode(isDarkMode)
}, []) }, [])
// Check Fathom API key status
useEffect(() => {
if (session.authed && session.username) {
setHasFathomApiKey(isFathomApiKeyConfigured(session.username))
}
}, [session.authed, session.username])
const toggleDarkMode = () => { const toggleDarkMode = () => {
const newMode = !isDarkMode const newMode = !isDarkMode
setIsDarkMode(newMode) setIsDarkMode(newMode)
@ -528,7 +549,7 @@ export function CustomToolbar() {
</button> </button>
{showProfilePopup && ( {showProfilePopup && (
<div ref={profilePopupRef} className="profile-dropdown"> <div ref={profilePopupRef} className="profile-dropdown" style={{ width: '280px', maxHeight: '80vh', overflowY: 'auto' }}>
<div className="profile-dropdown-header"> <div className="profile-dropdown-header">
<div className="profile-avatar"> <div className="profile-avatar">
<svg width="20" height="20" viewBox="0 0 16 16" fill="currentColor"> <svg width="20" height="20" viewBox="0 0 16 16" fill="currentColor">
@ -550,22 +571,9 @@ export function CustomToolbar() {
<span>My Saved Boards</span> <span>My Saved Boards</span>
</a> </a>
<button
className="profile-dropdown-item"
onClick={() => {
setShowProfilePopup(false)
setShowSettingsModal(true)
}}
>
<svg width="16" height="16" viewBox="0 0 16 16" fill="currentColor">
<path d="M8 4.754a3.246 3.246 0 1 0 0 6.492 3.246 3.246 0 0 0 0-6.492zM5.754 8a2.246 2.246 0 1 1 4.492 0 2.246 2.246 0 0 1-4.492 0z"/>
<path d="M9.796 1.343c-.527-1.79-3.065-1.79-3.592 0l-.094.319a.873.873 0 0 1-1.255.52l-.292-.16c-1.64-.892-3.433.902-2.54 2.541l.159.292a.873.873 0 0 1-.52 1.255l-.319.094c-1.79.527-1.79 3.065 0 3.592l.319.094a.873.873 0 0 1 .52 1.255l-.16.292c-.892 1.64.901 3.434 2.541 2.54l.292-.159a.873.873 0 0 1 1.255.52l.094.319c.527 1.79 3.065 1.79 3.592 0l.094-.319a.873.873 0 0 1 1.255-.52l.292.16c1.64.893 3.434-.902 2.54-2.541l-.159-.292a.873.873 0 0 1 .52-1.255l.319-.094c1.79-.527 1.79-3.065 0-3.592l-.319-.094a.873.873 0 0 1-.52-1.255l.16-.292c.893-1.64-.902-3.433-2.541-2.54l-.292.159a.873.873 0 0 1-1.255-.52l-.094-.319zm-2.633.283c.246-.835 1.428-.835 1.674 0l.094.319a1.873 1.873 0 0 0 2.693 1.115l.291-.16c.764-.415 1.6.42 1.184 1.185l-.159.292a1.873 1.873 0 0 0 1.116 2.692l.318.094c.835.246.835 1.428 0 1.674l-.319.094a1.873 1.873 0 0 0-1.115 2.693l.16.291c.415.764-.42 1.6-1.185 1.184l-.291-.159a1.873 1.873 0 0 0-2.693 1.116l-.094.318c-.246.835-1.428.835-1.674 0l-.094-.319a1.873 1.873 0 0 0-2.692-1.115l-.292.16c-.764.415-1.6-.42-1.184-1.185l.159-.291A1.873 1.873 0 0 0 1.945 8.93l-.319-.094c-.835-.246-.835-1.428 0-1.674l.319-.094A1.873 1.873 0 0 0 3.06 4.377l-.16-.292c-.415-.764.42-1.6 1.185-1.184l.292.159a1.873 1.873 0 0 0 2.692-1.115l.094-.319z"/>
</svg>
<span>Settings</span>
</button>
<div className="profile-dropdown-divider" /> <div className="profile-dropdown-divider" />
{/* General Settings */}
<button className="profile-dropdown-item" onClick={toggleDarkMode}> <button className="profile-dropdown-item" onClick={toggleDarkMode}>
<svg width="16" height="16" viewBox="0 0 16 16" fill="currentColor"> <svg width="16" height="16" viewBox="0 0 16 16" fill="currentColor">
{isDarkMode ? ( {isDarkMode ? (
@ -579,6 +587,303 @@ export function CustomToolbar() {
<div className="profile-dropdown-divider" /> <div className="profile-dropdown-divider" />
{/* AI Models Section */}
<button
className="profile-dropdown-item"
onClick={() => setExpandedSection(expandedSection === 'ai' ? 'none' : 'ai')}
style={{ justifyContent: 'space-between' }}
>
<span style={{ display: 'flex', alignItems: 'center', gap: '8px' }}>
<span style={{ fontSize: '14px' }}>🤖</span>
<span>AI Models</span>
</span>
<svg
width="12"
height="12"
viewBox="0 0 16 16"
fill="currentColor"
style={{ transform: expandedSection === 'ai' ? 'rotate(180deg)' : 'rotate(0deg)', transition: 'transform 0.2s' }}
>
<path d="M1.646 4.646a.5.5 0 0 1 .708 0L8 10.293l5.646-5.647a.5.5 0 0 1 .708.708l-6 6a.5.5 0 0 1-.708 0l-6-6a.5.5 0 0 1 0-.708z"/>
</svg>
</button>
{expandedSection === 'ai' && (
<div style={{ padding: '8px 12px', backgroundColor: 'var(--color-muted-2, #f5f5f5)' }}>
<p style={{ fontSize: '10px', color: 'var(--color-text-2, #666)', marginBottom: '8px' }}>
Local models are free. Cloud models require API keys.
</p>
{AI_TOOLS.map((tool) => (
<div
key={tool.id}
style={{
display: 'flex',
alignItems: 'center',
justifyContent: 'space-between',
padding: '6px 0',
borderBottom: '1px solid var(--color-muted-1, #eee)',
}}
>
<span style={{ display: 'flex', alignItems: 'center', gap: '6px', fontSize: '12px' }}>
<span>{tool.icon}</span>
<span>{tool.name}</span>
</span>
<span
style={{
fontSize: '9px',
padding: '2px 6px',
borderRadius: '10px',
backgroundColor: tool.type === 'local' ? '#d1fae5' : tool.type === 'gpu' ? '#e0e7ff' : '#fef3c7',
color: tool.type === 'local' ? '#065f46' : tool.type === 'gpu' ? '#3730a3' : '#92400e',
fontWeight: 500,
}}
>
{tool.model}
</span>
</div>
))}
<button
onClick={() => {
addDialog({
id: "api-keys",
component: ({ onClose: dialogClose }: { onClose: () => void }) => (
<SettingsDialog
onClose={() => {
dialogClose()
removeDialog("api-keys")
checkApiKeys()
}}
/>
),
})
}}
style={{
width: '100%',
marginTop: '8px',
padding: '6px 10px',
fontSize: '11px',
fontWeight: 500,
backgroundColor: 'var(--color-primary, #3b82f6)',
color: 'white',
border: 'none',
borderRadius: '4px',
cursor: 'pointer',
}}
>
{hasApiKey ? 'Manage API Keys' : 'Add API Keys'}
</button>
</div>
)}
{/* Integrations Section */}
<button
className="profile-dropdown-item"
onClick={() => setExpandedSection(expandedSection === 'integrations' ? 'none' : 'integrations')}
style={{ justifyContent: 'space-between' }}
>
<span style={{ display: 'flex', alignItems: 'center', gap: '8px' }}>
<span style={{ fontSize: '14px' }}>🔗</span>
<span>Integrations</span>
</span>
<svg
width="12"
height="12"
viewBox="0 0 16 16"
fill="currentColor"
style={{ transform: expandedSection === 'integrations' ? 'rotate(180deg)' : 'rotate(0deg)', transition: 'transform 0.2s' }}
>
<path d="M1.646 4.646a.5.5 0 0 1 .708 0L8 10.293l5.646-5.647a.5.5 0 0 1 .708.708l-6 6a.5.5 0 0 1-.708 0l-6-6a.5.5 0 0 1 0-.708z"/>
</svg>
</button>
{expandedSection === 'integrations' && (
<div style={{ padding: '8px 12px', backgroundColor: 'var(--color-muted-2, #f5f5f5)' }}>
{/* Obsidian Vault */}
<div style={{ marginBottom: '12px' }}>
<div style={{ display: 'flex', alignItems: 'center', justifyContent: 'space-between', marginBottom: '4px' }}>
<span style={{ display: 'flex', alignItems: 'center', gap: '6px', fontSize: '12px', fontWeight: 500 }}>
<span>📁</span> Obsidian Vault
</span>
<span
style={{
fontSize: '9px',
padding: '2px 6px',
borderRadius: '10px',
backgroundColor: session.obsidianVaultName ? '#d1fae5' : '#fef3c7',
color: session.obsidianVaultName ? '#065f46' : '#92400e',
fontWeight: 500,
}}
>
{session.obsidianVaultName ? 'Connected' : 'Not Set'}
</span>
</div>
{session.obsidianVaultName && (
<p style={{ fontSize: '10px', color: '#059669', marginBottom: '4px' }}>{session.obsidianVaultName}</p>
)}
<button
onClick={() => {
window.dispatchEvent(new CustomEvent('open-obsidian-browser'))
setShowProfilePopup(false)
}}
style={{
width: '100%',
padding: '5px 8px',
fontSize: '10px',
backgroundColor: 'white',
border: '1px solid #ddd',
borderRadius: '4px',
cursor: 'pointer',
}}
>
{session.obsidianVaultName ? 'Change Vault' : 'Connect Vault'}
</button>
</div>
{/* Fathom Meetings */}
<div style={{ paddingTop: '8px', borderTop: '1px solid var(--color-muted-1, #ddd)' }}>
<div style={{ display: 'flex', alignItems: 'center', justifyContent: 'space-between', marginBottom: '4px' }}>
<span style={{ display: 'flex', alignItems: 'center', gap: '6px', fontSize: '12px', fontWeight: 500 }}>
<span>🎥</span> Fathom Meetings
</span>
<span
style={{
fontSize: '9px',
padding: '2px 6px',
borderRadius: '10px',
backgroundColor: hasFathomApiKey ? '#d1fae5' : '#fef3c7',
color: hasFathomApiKey ? '#065f46' : '#92400e',
fontWeight: 500,
}}
>
{hasFathomApiKey ? 'Connected' : 'Not Set'}
</span>
</div>
{showFathomInput ? (
<div>
<input
type="password"
value={fathomKeyInput}
onChange={(e) => setFathomKeyInput(e.target.value)}
placeholder="Enter Fathom API key..."
style={{
width: '100%',
padding: '6px 8px',
fontSize: '11px',
border: '1px solid #ddd',
borderRadius: '4px',
marginBottom: '6px',
}}
onKeyDown={(e) => {
if (e.key === 'Enter' && fathomKeyInput.trim()) {
saveFathomApiKey(fathomKeyInput.trim(), session.username)
setHasFathomApiKey(true)
setShowFathomInput(false)
setFathomKeyInput('')
} else if (e.key === 'Escape') {
setShowFathomInput(false)
setFathomKeyInput('')
}
}}
autoFocus
/>
<div style={{ display: 'flex', gap: '4px' }}>
<button
onClick={() => {
if (fathomKeyInput.trim()) {
saveFathomApiKey(fathomKeyInput.trim(), session.username)
setHasFathomApiKey(true)
setShowFathomInput(false)
setFathomKeyInput('')
}
}}
style={{
flex: 1,
padding: '5px',
fontSize: '10px',
backgroundColor: 'var(--color-primary, #3b82f6)',
color: 'white',
border: 'none',
borderRadius: '4px',
cursor: 'pointer',
}}
>
Save
</button>
<button
onClick={() => {
setShowFathomInput(false)
setFathomKeyInput('')
}}
style={{
flex: 1,
padding: '5px',
fontSize: '10px',
backgroundColor: 'white',
border: '1px solid #ddd',
borderRadius: '4px',
cursor: 'pointer',
}}
>
Cancel
</button>
</div>
<a
href="https://app.usefathom.com/settings/integrations"
target="_blank"
rel="noopener noreferrer"
style={{ display: 'block', fontSize: '9px', color: '#3b82f6', marginTop: '6px', textDecoration: 'none' }}
>
Get API key from Fathom
</a>
</div>
) : (
<div style={{ display: 'flex', gap: '4px' }}>
<button
onClick={() => {
setShowFathomInput(true)
const currentKey = getFathomApiKey(session.username)
if (currentKey) setFathomKeyInput(currentKey)
}}
style={{
flex: 1,
padding: '5px 8px',
fontSize: '10px',
backgroundColor: 'white',
border: '1px solid #ddd',
borderRadius: '4px',
cursor: 'pointer',
}}
>
{hasFathomApiKey ? 'Change Key' : 'Add API Key'}
</button>
{hasFathomApiKey && (
<button
onClick={() => {
removeFathomApiKey(session.username)
setHasFathomApiKey(false)
}}
style={{
padding: '5px 8px',
fontSize: '10px',
backgroundColor: '#fee2e2',
color: '#dc2626',
border: 'none',
borderRadius: '4px',
cursor: 'pointer',
}}
>
Disconnect
</button>
)}
</div>
)}
</div>
</div>
)}
<div className="profile-dropdown-divider" />
{!session.backupCreated && ( {!session.backupCreated && (
<div className="profile-dropdown-warning"> <div className="profile-dropdown-warning">
Back up your encryption keys to prevent data loss Back up your encryption keys to prevent data loss
@ -597,15 +902,6 @@ export function CustomToolbar() {
</div> </div>
)} )}
</div> </div>
{/* Settings Modal */}
{showSettingsModal && (
<UserSettingsModal
onClose={() => setShowSettingsModal(false)}
isDarkMode={isDarkMode}
onToggleDarkMode={toggleDarkMode}
/>
)}
<DefaultToolbar> <DefaultToolbar>
<DefaultToolbarContent /> <DefaultToolbarContent />
{tools["VideoChat"] && ( {tools["VideoChat"] && (

File diff suppressed because it is too large Load Diff

View File

@ -4,6 +4,68 @@ import { useDialogs } from "tldraw"
import { SettingsDialog } from "./SettingsDialog" import { SettingsDialog } from "./SettingsDialog"
import { getFathomApiKey, saveFathomApiKey, removeFathomApiKey, isFathomApiKeyConfigured } from "../lib/fathomApiKey" import { getFathomApiKey, saveFathomApiKey, removeFathomApiKey, isFathomApiKeyConfigured } from "../lib/fathomApiKey"
// AI tool model configurations
const AI_TOOLS = [
{
id: 'chat',
name: 'Chat Assistant',
icon: '💬',
description: 'Conversational AI for questions and discussions',
models: {
primary: { name: 'Ollama (Local)', model: 'llama3.1:8b', type: 'local' },
fallback: { name: 'OpenAI', model: 'gpt-4o', type: 'cloud' },
}
},
{
id: 'make-real',
name: 'Make Real',
icon: '🔧',
description: 'Convert wireframes to working prototypes',
models: {
primary: { name: 'Anthropic', model: 'claude-sonnet-4-5', type: 'cloud' },
fallback: { name: 'OpenAI', model: 'gpt-4o', type: 'cloud' },
}
},
{
id: 'image-gen',
name: 'Image Generation',
icon: '🎨',
description: 'Generate images from text prompts',
models: {
primary: { name: 'RunPod', model: 'Stable Diffusion XL', type: 'gpu' },
}
},
{
id: 'video-gen',
name: 'Video Generation',
icon: '🎬',
description: 'Generate videos from images',
models: {
primary: { name: 'RunPod', model: 'Wan2.1 I2V', type: 'gpu' },
}
},
{
id: 'transcription',
name: 'Transcription',
icon: '🎤',
description: 'Transcribe audio to text',
models: {
primary: { name: 'Browser', model: 'Web Speech API', type: 'local' },
fallback: { name: 'Whisper', model: 'whisper-large-v3', type: 'local' },
}
},
{
id: 'mycelial',
name: 'Mycelial Intelligence',
icon: '🍄',
description: 'Analyze connections between concepts',
models: {
primary: { name: 'Ollama (Local)', model: 'llama3.1:70b', type: 'local' },
fallback: { name: 'Anthropic', model: 'claude-sonnet-4-5', type: 'cloud' },
}
},
]
interface UserSettingsModalProps { interface UserSettingsModalProps {
onClose: () => void onClose: () => void
isDarkMode: boolean isDarkMode: boolean
@ -154,11 +216,72 @@ export function UserSettingsModal({ onClose, isDarkMode, onToggleDarkMode }: Use
{activeTab === 'ai' && ( {activeTab === 'ai' && (
<div className="settings-section"> <div className="settings-section">
{/* AI Tools Overview */}
<div style={{ marginBottom: '16px' }}>
<h3 style={{ fontSize: '14px', fontWeight: '600', marginBottom: '12px', color: '#374151' }}>
AI Tools & Models
</h3>
<p style={{ fontSize: '12px', color: '#6b7280', marginBottom: '16px', lineHeight: '1.4' }}>
Each tool uses optimized AI models. Local models run on your private server for free, cloud models require API keys.
</p>
<div style={{ display: 'flex', flexDirection: 'column', gap: '8px' }}>
{AI_TOOLS.map((tool) => (
<div
key={tool.id}
style={{
padding: '12px',
backgroundColor: '#f9fafb',
borderRadius: '8px',
border: '1px solid #e5e7eb',
}}
>
<div style={{ display: 'flex', alignItems: 'center', gap: '8px', marginBottom: '6px' }}>
<span style={{ fontSize: '16px' }}>{tool.icon}</span>
<span style={{ fontSize: '13px', fontWeight: '600', color: '#1f2937' }}>{tool.name}</span>
</div>
<p style={{ fontSize: '11px', color: '#6b7280', marginBottom: '8px' }}>{tool.description}</p>
<div style={{ display: 'flex', flexWrap: 'wrap', gap: '6px' }}>
<span
style={{
fontSize: '10px',
padding: '3px 8px',
borderRadius: '12px',
backgroundColor: tool.models.primary.type === 'local' ? '#d1fae5' : tool.models.primary.type === 'gpu' ? '#e0e7ff' : '#fef3c7',
color: tool.models.primary.type === 'local' ? '#065f46' : tool.models.primary.type === 'gpu' ? '#3730a3' : '#92400e',
fontWeight: '500',
}}
>
{tool.models.primary.name}: {tool.models.primary.model}
</span>
{tool.models.fallback && (
<span
style={{
fontSize: '10px',
padding: '3px 8px',
borderRadius: '12px',
backgroundColor: '#f3f4f6',
color: '#6b7280',
fontWeight: '500',
}}
>
Fallback: {tool.models.fallback.model}
</span>
)}
</div>
</div>
))}
</div>
</div>
<div className="settings-divider" />
{/* API Keys Configuration */}
<div className="settings-item"> <div className="settings-item">
<div className="settings-item-info"> <div className="settings-item-info">
<span className="settings-item-label">AI API Keys</span> <span className="settings-item-label">AI API Keys</span>
<span className="settings-item-description"> <span className="settings-item-description">
{hasApiKey ? 'Your AI models are configured and ready' : 'Configure API keys to use AI features'} {hasApiKey ? 'Your cloud AI models are configured and ready' : 'Configure API keys to use cloud AI features'}
</span> </span>
</div> </div>
<div className="settings-item-status"> <div className="settings-item-status">
@ -170,121 +293,231 @@ export function UserSettingsModal({ onClose, isDarkMode, onToggleDarkMode }: Use
<button className="settings-action-btn" onClick={openApiKeysDialog}> <button className="settings-action-btn" onClick={openApiKeysDialog}>
{hasApiKey ? 'Manage API Keys' : 'Add API Keys'} {hasApiKey ? 'Manage API Keys' : 'Add API Keys'}
</button> </button>
{/* Model type legend */}
<div style={{ marginTop: '16px', padding: '12px', backgroundColor: '#f8fafc', borderRadius: '6px', border: '1px solid #e2e8f0' }}>
<div style={{ fontSize: '11px', color: '#64748b', display: 'flex', flexWrap: 'wrap', gap: '12px' }}>
<span style={{ display: 'flex', alignItems: 'center', gap: '4px' }}>
<span style={{ width: '8px', height: '8px', borderRadius: '50%', backgroundColor: '#10b981' }}></span>
Local (Free)
</span>
<span style={{ display: 'flex', alignItems: 'center', gap: '4px' }}>
<span style={{ width: '8px', height: '8px', borderRadius: '50%', backgroundColor: '#6366f1' }}></span>
GPU (RunPod)
</span>
<span style={{ display: 'flex', alignItems: 'center', gap: '4px' }}>
<span style={{ width: '8px', height: '8px', borderRadius: '50%', backgroundColor: '#f59e0b' }}></span>
Cloud (API Key)
</span>
</div>
</div>
</div> </div>
)} )}
{activeTab === 'integrations' && ( {activeTab === 'integrations' && (
<div className="settings-section"> <div className="settings-section">
{/* Obsidian Vault */} {/* Knowledge Management Section */}
<div className="settings-item"> <h3 style={{ fontSize: '14px', fontWeight: '600', marginBottom: '12px', color: '#374151' }}>
<div className="settings-item-info"> Knowledge Management
<span className="settings-item-label">Obsidian Vault</span> </h3>
<span className="settings-item-description">
{session.obsidianVaultName {/* Obsidian Vault - Local Files */}
? `Connected: ${session.obsidianVaultName}` <div
: 'Connect your Obsidian vault to import notes'} style={{
</span> padding: '12px',
</div> backgroundColor: '#f9fafb',
<div className="settings-item-status"> borderRadius: '8px',
<span className={`status-badge ${session.obsidianVaultName ? 'success' : 'warning'}`}> border: '1px solid #e5e7eb',
marginBottom: '12px',
}}
>
<div style={{ display: 'flex', alignItems: 'center', gap: '8px', marginBottom: '8px' }}>
<span style={{ fontSize: '20px' }}>📁</span>
<div style={{ flex: 1 }}>
<span style={{ fontSize: '13px', fontWeight: '600', color: '#1f2937' }}>Obsidian Vault (Local)</span>
<p style={{ fontSize: '11px', color: '#6b7280', marginTop: '2px' }}>
Import notes directly from your local Obsidian vault
</p>
</div>
<span className={`status-badge ${session.obsidianVaultName ? 'success' : 'warning'}`} style={{ fontSize: '10px' }}>
{session.obsidianVaultName ? 'Connected' : 'Not Set'} {session.obsidianVaultName ? 'Connected' : 'Not Set'}
</span> </span>
</div> </div>
{session.obsidianVaultName && (
<p style={{ fontSize: '11px', color: '#059669', marginBottom: '8px' }}>
Current vault: {session.obsidianVaultName}
</p>
)}
<button className="settings-action-btn" onClick={handleSetVault} style={{ width: '100%' }}>
{session.obsidianVaultName ? 'Change Vault' : 'Connect Vault'}
</button>
</div>
{/* Obsidian Quartz - Published Notes */}
<div
style={{
padding: '12px',
backgroundColor: '#f9fafb',
borderRadius: '8px',
border: '1px solid #e5e7eb',
marginBottom: '12px',
}}
>
<div style={{ display: 'flex', alignItems: 'center', gap: '8px', marginBottom: '8px' }}>
<span style={{ fontSize: '20px' }}>🌐</span>
<div style={{ flex: 1 }}>
<span style={{ fontSize: '13px', fontWeight: '600', color: '#1f2937' }}>Obsidian Quartz (Web)</span>
<p style={{ fontSize: '11px', color: '#6b7280', marginTop: '2px' }}>
Import notes from your published Quartz site via GitHub
</p>
</div>
<span className="status-badge success" style={{ fontSize: '10px' }}>
Available
</span>
</div>
<p style={{ fontSize: '11px', color: '#6b7280', marginBottom: '8px', lineHeight: '1.4' }}>
Quartz is a static site generator for Obsidian. If you publish your notes with Quartz, you can browse and import them here.
</p>
<a
href="https://quartz.jzhao.xyz/"
target="_blank"
rel="noopener noreferrer"
style={{
fontSize: '11px',
color: '#3b82f6',
textDecoration: 'none',
}}
>
Learn more about Quartz
</a>
</div> </div>
<button className="settings-action-btn" onClick={handleSetVault}>
{session.obsidianVaultName ? 'Change Vault' : 'Connect Vault'}
</button>
<div className="settings-divider" /> <div className="settings-divider" />
{/* Fathom API */} {/* Meeting & Communication Section */}
<div className="settings-item"> <h3 style={{ fontSize: '14px', fontWeight: '600', marginBottom: '12px', marginTop: '8px', color: '#374151' }}>
<div className="settings-item-info"> Meeting & Communication
<span className="settings-item-label">Fathom Meetings</span> </h3>
<span className="settings-item-description">
{hasFathomApiKey {/* Fathom Meetings */}
? 'Your Fathom account is connected' <div
: 'Connect Fathom to import meeting recordings'} style={{
</span> padding: '12px',
</div> backgroundColor: '#f9fafb',
<div className="settings-item-status"> borderRadius: '8px',
<span className={`status-badge ${hasFathomApiKey ? 'success' : 'warning'}`}> border: '1px solid #e5e7eb',
}}
>
<div style={{ display: 'flex', alignItems: 'center', gap: '8px', marginBottom: '8px' }}>
<span style={{ fontSize: '20px' }}>🎥</span>
<div style={{ flex: 1 }}>
<span style={{ fontSize: '13px', fontWeight: '600', color: '#1f2937' }}>Fathom Meetings</span>
<p style={{ fontSize: '11px', color: '#6b7280', marginTop: '2px' }}>
Import meeting transcripts and AI summaries
</p>
</div>
<span className={`status-badge ${hasFathomApiKey ? 'success' : 'warning'}`} style={{ fontSize: '10px' }}>
{hasFathomApiKey ? 'Connected' : 'Not Set'} {hasFathomApiKey ? 'Connected' : 'Not Set'}
</span> </span>
</div> </div>
</div>
{showFathomApiKeyInput ? ( {showFathomApiKeyInput ? (
<div className="settings-input-group"> <div style={{ marginTop: '8px' }}>
<input <input
type="password" type="password"
value={fathomApiKeyInput} value={fathomApiKeyInput}
onChange={(e) => setFathomApiKeyInput(e.target.value)} onChange={(e) => setFathomApiKeyInput(e.target.value)}
placeholder="Enter Fathom API key..." placeholder="Enter Fathom API key..."
className="settings-input" className="settings-input"
onKeyDown={(e) => { style={{ width: '100%', marginBottom: '8px' }}
if (e.key === 'Enter' && fathomApiKeyInput.trim()) { onKeyDown={(e) => {
saveFathomApiKey(fathomApiKeyInput.trim(), session.username) if (e.key === 'Enter' && fathomApiKeyInput.trim()) {
setHasFathomApiKey(true)
setShowFathomApiKeyInput(false)
setFathomApiKeyInput('')
} else if (e.key === 'Escape') {
setShowFathomApiKeyInput(false)
setFathomApiKeyInput('')
}
}}
autoFocus
/>
<div className="settings-input-actions">
<button
className="settings-btn-sm primary"
onClick={() => {
if (fathomApiKeyInput.trim()) {
saveFathomApiKey(fathomApiKeyInput.trim(), session.username) saveFathomApiKey(fathomApiKeyInput.trim(), session.username)
setHasFathomApiKey(true) setHasFathomApiKey(true)
setShowFathomApiKeyInput(false) setShowFathomApiKeyInput(false)
setFathomApiKeyInput('') setFathomApiKeyInput('')
} else if (e.key === 'Escape') {
setShowFathomApiKeyInput(false)
setFathomApiKeyInput('')
} }
}} }}
> autoFocus
Save />
</button> <div style={{ display: 'flex', gap: '8px' }}>
<button <button
className="settings-btn-sm" className="settings-btn-sm primary"
onClick={() => { style={{ flex: 1 }}
setShowFathomApiKeyInput(false) onClick={() => {
setFathomApiKeyInput('') if (fathomApiKeyInput.trim()) {
saveFathomApiKey(fathomApiKeyInput.trim(), session.username)
setHasFathomApiKey(true)
setShowFathomApiKeyInput(false)
setFathomApiKeyInput('')
}
}}
>
Save
</button>
<button
className="settings-btn-sm"
style={{ flex: 1 }}
onClick={() => {
setShowFathomApiKeyInput(false)
setFathomApiKeyInput('')
}}
>
Cancel
</button>
</div>
<a
href="https://app.usefathom.com/settings/integrations"
target="_blank"
rel="noopener noreferrer"
style={{
display: 'block',
fontSize: '11px',
color: '#3b82f6',
textDecoration: 'none',
marginTop: '8px',
}} }}
> >
Cancel Get your API key from Fathom Settings
</button> </a>
</div> </div>
</div> ) : (
) : ( <div style={{ display: 'flex', gap: '8px', marginTop: '8px' }}>
<div className="settings-button-group">
<button
className="settings-action-btn"
onClick={() => {
setShowFathomApiKeyInput(true)
const currentKey = getFathomApiKey(session.username)
if (currentKey) setFathomApiKeyInput(currentKey)
}}
>
{hasFathomApiKey ? 'Change API Key' : 'Add API Key'}
</button>
{hasFathomApiKey && (
<button <button
className="settings-action-btn secondary" className="settings-action-btn"
style={{ flex: 1 }}
onClick={() => { onClick={() => {
removeFathomApiKey(session.username) setShowFathomApiKeyInput(true)
setHasFathomApiKey(false) const currentKey = getFathomApiKey(session.username)
if (currentKey) setFathomApiKeyInput(currentKey)
}} }}
> >
Disconnect {hasFathomApiKey ? 'Change API Key' : 'Add API Key'}
</button> </button>
)} {hasFathomApiKey && (
</div> <button
)} className="settings-action-btn secondary"
onClick={() => {
removeFathomApiKey(session.username)
setHasFathomApiKey(false)
}}
>
Disconnect
</button>
)}
</div>
)}
</div>
{/* Future Integrations Placeholder */}
<div style={{ marginTop: '16px', padding: '12px', backgroundColor: '#f8fafc', borderRadius: '6px', border: '1px dashed #cbd5e1' }}>
<p style={{ fontSize: '12px', color: '#64748b', textAlign: 'center' }}>
More integrations coming soon: Google Calendar, Notion, and more
</p>
</div>
</div> </div>
)} )}
</div> </div>

View File

@ -0,0 +1,370 @@
import { Editor, Vec, createShapeId, AssetRecordType, getHashForString } from "tldraw"
import { WORKER_URL } from "../constants/workerUrl"
// URL patterns to detect multiple URLs in pasted text
const URL_REGEX = /https?:\/\/[^\s<>"{}|\\^`\[\]]+/gi
// Image file extensions
const IMAGE_EXTENSIONS = /\.(jpg|jpeg|png|gif|webp|svg|bmp|ico)$/i
/**
* Check if a URL points to an image
*/
function isImageUrl(url: string): boolean {
try {
const urlObj = new URL(url)
return IMAGE_EXTENSIONS.test(urlObj.pathname)
} catch {
return false
}
}
/**
* Extract all URLs from a string of text
*/
function extractUrls(text: string): string[] {
const matches = text.match(URL_REGEX)
if (!matches) return []
// Deduplicate URLs
return [...new Set(matches)]
}
/**
* Unfurl a bookmark URL to get metadata
*/
async function unfurlUrl(url: string): Promise<{
title: string
description: string
image: string
favicon: string
}> {
try {
const response = await fetch(
`${WORKER_URL}/unfurl?url=${encodeURIComponent(url)}`
)
if (!response.ok) throw new Error('Failed to unfurl')
return await response.json()
} catch {
return { title: '', description: '', image: '', favicon: '' }
}
}
/**
* Create a bookmark shape for a URL
*/
async function createBookmarkForUrl(
editor: Editor,
url: string,
position: Vec
): Promise<void> {
const assetId = AssetRecordType.createId(getHashForString(url))
// Check if asset already exists
const existingAsset = editor.getAsset(assetId)
if (!existingAsset) {
const metadata = await unfurlUrl(url)
editor.createAssets([{
id: assetId,
typeName: 'asset',
type: 'bookmark',
meta: {},
props: {
src: url,
title: metadata.title || url,
description: metadata.description || '',
image: metadata.image || '',
favicon: metadata.favicon || '',
},
}])
}
editor.createShape({
id: createShapeId(),
type: 'bookmark',
x: position.x,
y: position.y,
props: {
assetId,
url,
},
})
}
/**
* Create an image shape for an image URL
*/
async function createImageForUrl(
editor: Editor,
url: string,
position: Vec
): Promise<void> {
const assetId = AssetRecordType.createId(getHashForString(url))
// Check if asset already exists
const existingAsset = editor.getAsset(assetId)
if (!existingAsset) {
// Try to get image dimensions
let w = 300
let h = 200
try {
const img = new Image()
img.crossOrigin = 'anonymous'
await new Promise<void>((resolve, reject) => {
img.onload = () => {
w = img.naturalWidth
h = img.naturalHeight
resolve()
}
img.onerror = () => reject()
img.src = url
})
} catch {
// Use default dimensions
}
editor.createAssets([{
id: assetId,
typeName: 'asset',
type: 'image',
meta: {},
props: {
src: url,
w,
h,
mimeType: 'image/jpeg',
name: url.split('/').pop() || 'image',
isAnimated: url.endsWith('.gif'),
},
}])
}
const asset = editor.getAsset(assetId)
const props = asset?.props as { w?: number; h?: number } | undefined
editor.createShape({
id: createShapeId(),
type: 'image',
x: position.x,
y: position.y,
props: {
assetId,
w: props?.w || 300,
h: props?.h || 200,
},
})
}
/**
* Create an image shape from a File
*/
async function createImageFromFile(
editor: Editor,
file: File,
position: Vec
): Promise<void> {
// Read file as data URL
const dataUrl = await new Promise<string>((resolve, reject) => {
const reader = new FileReader()
reader.onload = () => resolve(reader.result as string)
reader.onerror = reject
reader.readAsDataURL(file)
})
const assetId = AssetRecordType.createId(getHashForString(dataUrl.slice(0, 100) + file.name))
// Get image dimensions
let w = 300
let h = 200
try {
const img = new Image()
await new Promise<void>((resolve, reject) => {
img.onload = () => {
w = img.naturalWidth
h = img.naturalHeight
resolve()
}
img.onerror = reject
img.src = dataUrl
})
} catch {
// Use default dimensions
}
editor.createAssets([{
id: assetId,
typeName: 'asset',
type: 'image',
meta: {},
props: {
src: dataUrl,
w,
h,
mimeType: file.type || 'image/jpeg',
name: file.name,
isAnimated: file.type === 'image/gif',
},
}])
editor.createShape({
id: createShapeId(),
type: 'image',
x: position.x,
y: position.y,
props: {
assetId,
w,
h,
},
})
}
/**
* Configuration for grid layout of pasted items
*/
const GRID_CONFIG = {
spacing: 20, // Space between items
itemWidth: 320, // Default width per item
itemHeight: 240, // Default height per item
maxColumns: 5, // Maximum items per row
}
/**
* Calculate grid positions for multiple items
*/
function calculateGridPositions(
startPosition: Vec,
count: number
): Vec[] {
const positions: Vec[] = []
const columns = Math.min(count, GRID_CONFIG.maxColumns)
for (let i = 0; i < count; i++) {
const col = i % columns
const row = Math.floor(i / columns)
positions.push(new Vec(
startPosition.x + col * (GRID_CONFIG.itemWidth + GRID_CONFIG.spacing),
startPosition.y + row * (GRID_CONFIG.itemHeight + GRID_CONFIG.spacing)
))
}
return positions
}
/**
* Main paste handler that supports multiple items
*/
export function setupMultiPasteHandler(editor: Editor): () => void {
const handlePaste = async (e: ClipboardEvent) => {
// Don't intercept if user is typing in an input
const activeElement = document.activeElement
if (
activeElement instanceof HTMLInputElement ||
activeElement instanceof HTMLTextAreaElement ||
(activeElement instanceof HTMLElement && activeElement.isContentEditable)
) {
return
}
const clipboardData = e.clipboardData
if (!clipboardData) return
// Get paste position (center of viewport)
const viewportBounds = editor.getViewportPageBounds()
const center = viewportBounds.center
// Collect all items to paste
const imageFiles: File[] = []
const urls: string[] = []
// Check for files (images)
if (clipboardData.files.length > 0) {
for (let i = 0; i < clipboardData.files.length; i++) {
const file = clipboardData.files[i]
if (file.type.startsWith('image/')) {
imageFiles.push(file)
}
}
}
// Check for text content that might contain URLs
const textData = clipboardData.getData('text/plain')
if (textData) {
const extractedUrls = extractUrls(textData)
urls.push(...extractedUrls)
}
// Check for URL data type
const urlData = clipboardData.getData('text/uri-list')
if (urlData) {
// URI list can contain multiple URLs separated by newlines
const uriUrls = urlData.split('\n').filter(line => line.trim() && !line.startsWith('#'))
for (const url of uriUrls) {
if (!urls.includes(url.trim())) {
urls.push(url.trim())
}
}
}
// If we have multiple items, handle them ourselves
const totalItems = imageFiles.length + urls.length
if (totalItems > 1) {
// Prevent default tldraw handling
e.preventDefault()
e.stopPropagation()
console.log(`📋 Multi-paste: ${imageFiles.length} images, ${urls.length} URLs`)
// Calculate grid positions
const positions = calculateGridPositions(center, totalItems)
let positionIndex = 0
// Batch all shape creation in a single history entry
editor.mark('multi-paste')
// Process image files first
for (const file of imageFiles) {
const position = positions[positionIndex++]
try {
await createImageFromFile(editor, file, position)
} catch (err) {
console.error('Failed to create image from file:', err)
}
}
// Process URLs
for (const url of urls) {
const position = positions[positionIndex++]
try {
if (isImageUrl(url)) {
await createImageForUrl(editor, url, position)
} else {
await createBookmarkForUrl(editor, url, position)
}
} catch (err) {
console.error('Failed to create shape for URL:', err)
}
}
return
}
// For single items, let tldraw handle it normally
// (don't prevent default)
}
// Add event listener
document.addEventListener('paste', handlePaste, { capture: true })
// Return cleanup function
return () => {
document.removeEventListener('paste', handlePaste, { capture: true })
}
}

View File

@ -0,0 +1,788 @@
/**
* Selection Transforms Utility
* Provides batch operations on selected shapes for the Mycelial Intelligence
*
* Capabilities:
* - Alignment (horizontal, vertical, distribute)
* - Size normalization
* - Grid/row arrangement
* - Semantic clustering (AI-powered grouping)
* - Content aggregation and transformation
*/
import { Editor, TLShape, TLShapeId, Box, createShapeId } from 'tldraw'
import { extractShapeText } from '@/lib/semanticSearch'
/**
* Information about a selected shape for transformations
*/
export interface SelectionInfo {
id: TLShapeId
shape: TLShape
bounds: Box
text: string
type: string
}
/**
* Get detailed info about currently selected shapes
*/
export function getSelectionInfo(editor: Editor): SelectionInfo[] {
const selectedShapes = editor.getSelectedShapes()
return selectedShapes.map(shape => {
const bounds = editor.getShapePageBounds(shape.id)
return {
id: shape.id,
shape,
bounds: bounds || new Box(shape.x, shape.y, 100, 100),
text: extractShapeText(shape),
type: shape.type,
}
}).filter(info => info.bounds !== null)
}
/**
* Get a summary of the current selection for AI context
*/
export function getSelectionSummary(editor: Editor): {
count: number
types: Record<string, number>
totalText: string
textPreviews: string[]
bounds: Box | null
hasContent: boolean
} {
const infos = getSelectionInfo(editor)
if (infos.length === 0) {
return {
count: 0,
types: {},
totalText: '',
textPreviews: [],
bounds: null,
hasContent: false,
}
}
// Count shape types
const types: Record<string, number> = {}
for (const info of infos) {
types[info.type] = (types[info.type] || 0) + 1
}
// Collect text content
const texts = infos.map(i => i.text).filter(t => t.length > 0)
const totalText = texts.join('\n\n')
const textPreviews = texts.map(t => t.slice(0, 200) + (t.length > 200 ? '...' : ''))
// Calculate combined bounds
const bounds = editor.getSelectionPageBounds()
return {
count: infos.length,
types,
totalText,
textPreviews,
bounds,
hasContent: totalText.length > 0,
}
}
// =============================================================================
// ALIGNMENT OPERATIONS
// =============================================================================
export type AlignmentType = 'left' | 'center' | 'right' | 'top' | 'middle' | 'bottom'
/**
* Align selected shapes
*/
export function alignSelection(editor: Editor, alignment: AlignmentType): void {
const infos = getSelectionInfo(editor)
if (infos.length < 2) return
const bounds = editor.getSelectionPageBounds()
if (!bounds) return
const updates: { id: TLShapeId; x?: number; y?: number }[] = []
for (const info of infos) {
let newX = info.bounds.x
let newY = info.bounds.y
switch (alignment) {
case 'left':
newX = bounds.x
break
case 'center':
newX = bounds.x + (bounds.w - info.bounds.w) / 2
break
case 'right':
newX = bounds.x + bounds.w - info.bounds.w
break
case 'top':
newY = bounds.y
break
case 'middle':
newY = bounds.y + (bounds.h - info.bounds.h) / 2
break
case 'bottom':
newY = bounds.y + bounds.h - info.bounds.h
break
}
if (newX !== info.bounds.x || newY !== info.bounds.y) {
updates.push({
id: info.id,
x: newX,
y: newY,
})
}
}
// Batch update
for (const update of updates) {
editor.updateShape({
id: update.id,
type: editor.getShape(update.id)!.type,
x: update.x,
y: update.y,
})
}
}
/**
* Distribute shapes evenly (horizontal or vertical)
*/
export function distributeSelection(
editor: Editor,
direction: 'horizontal' | 'vertical',
gap?: number
): void {
const infos = getSelectionInfo(editor)
if (infos.length < 3) return
// Sort by position
const sorted = [...infos].sort((a, b) =>
direction === 'horizontal'
? a.bounds.x - b.bounds.x
: a.bounds.y - b.bounds.y
)
const first = sorted[0]
const last = sorted[sorted.length - 1]
if (direction === 'horizontal') {
const totalWidth = sorted.reduce((sum, info) => sum + info.bounds.w, 0)
const availableSpace = (last.bounds.x + last.bounds.w) - first.bounds.x - totalWidth
const spacing = gap ?? availableSpace / (sorted.length - 1)
let currentX = first.bounds.x
for (const info of sorted) {
editor.updateShape({
id: info.id,
type: info.shape.type,
x: currentX,
})
currentX += info.bounds.w + spacing
}
} else {
const totalHeight = sorted.reduce((sum, info) => sum + info.bounds.h, 0)
const availableSpace = (last.bounds.y + last.bounds.h) - first.bounds.y - totalHeight
const spacing = gap ?? availableSpace / (sorted.length - 1)
let currentY = first.bounds.y
for (const info of sorted) {
editor.updateShape({
id: info.id,
type: info.shape.type,
y: currentY,
})
currentY += info.bounds.h + spacing
}
}
}
// =============================================================================
// SIZE NORMALIZATION
// =============================================================================
export type SizeMode = 'width' | 'height' | 'both' | 'smallest' | 'largest' | 'average'
/**
* Normalize sizes of selected shapes
*/
export function normalizeSelectionSize(
editor: Editor,
mode: SizeMode,
targetSize?: { w?: number; h?: number }
): void {
const infos = getSelectionInfo(editor)
if (infos.length < 2) return
let targetW: number
let targetH: number
if (targetSize) {
targetW = targetSize.w ?? infos[0].bounds.w
targetH = targetSize.h ?? infos[0].bounds.h
} else {
const widths = infos.map(i => i.bounds.w)
const heights = infos.map(i => i.bounds.h)
switch (mode) {
case 'smallest':
targetW = Math.min(...widths)
targetH = Math.min(...heights)
break
case 'largest':
targetW = Math.max(...widths)
targetH = Math.max(...heights)
break
case 'average':
default:
targetW = widths.reduce((a, b) => a + b, 0) / widths.length
targetH = heights.reduce((a, b) => a + b, 0) / heights.length
break
}
}
for (const info of infos) {
const props: Record<string, number> = {}
// Determine which dimensions to update based on mode
const updateWidth = mode === 'width' || mode === 'both' || mode === 'smallest' || mode === 'largest' || mode === 'average'
const updateHeight = mode === 'height' || mode === 'both' || mode === 'smallest' || mode === 'largest' || mode === 'average'
if (updateWidth) props.w = targetW
if (updateHeight) props.h = targetH
if (Object.keys(props).length > 0) {
editor.updateShape({
id: info.id,
type: info.shape.type,
props,
})
}
}
}
// =============================================================================
// ARRANGEMENT OPERATIONS
// =============================================================================
export type ArrangementType = 'row' | 'column' | 'grid' | 'circle' | 'stack'
/**
* Arrange selected shapes in a pattern
*/
export function arrangeSelection(
editor: Editor,
arrangement: ArrangementType,
options: {
gap?: number
columns?: number
centerAt?: { x: number; y: number }
} = {}
): void {
const infos = getSelectionInfo(editor)
if (infos.length < 2) return
const gap = options.gap ?? 20
const bounds = editor.getSelectionPageBounds()
if (!bounds) return
const centerX = options.centerAt?.x ?? (bounds.x + bounds.w / 2)
const centerY = options.centerAt?.y ?? (bounds.y + bounds.h / 2)
switch (arrangement) {
case 'row': {
// Sort by current x position to maintain relative order
const sorted = [...infos].sort((a, b) => a.bounds.x - b.bounds.x)
const totalWidth = sorted.reduce((sum, info) => sum + info.bounds.w, 0) + gap * (sorted.length - 1)
let currentX = centerX - totalWidth / 2
// Find the average y position
const avgY = sorted.reduce((sum, info) => sum + info.bounds.y + info.bounds.h / 2, 0) / sorted.length
for (const info of sorted) {
editor.updateShape({
id: info.id,
type: info.shape.type,
x: currentX,
y: avgY - info.bounds.h / 2,
})
currentX += info.bounds.w + gap
}
break
}
case 'column': {
// Sort by current y position to maintain relative order
const sorted = [...infos].sort((a, b) => a.bounds.y - b.bounds.y)
const totalHeight = sorted.reduce((sum, info) => sum + info.bounds.h, 0) + gap * (sorted.length - 1)
let currentY = centerY - totalHeight / 2
// Find the average x position
const avgX = sorted.reduce((sum, info) => sum + info.bounds.x + info.bounds.w / 2, 0) / sorted.length
for (const info of sorted) {
editor.updateShape({
id: info.id,
type: info.shape.type,
x: avgX - info.bounds.w / 2,
y: currentY,
})
currentY += info.bounds.h + gap
}
break
}
case 'grid': {
const columns = options.columns ?? Math.ceil(Math.sqrt(infos.length))
const rows = Math.ceil(infos.length / columns)
// Calculate max dimensions for uniform spacing
const maxW = Math.max(...infos.map(i => i.bounds.w))
const maxH = Math.max(...infos.map(i => i.bounds.h))
const gridW = columns * maxW + (columns - 1) * gap
const gridH = rows * maxH + (rows - 1) * gap
const startX = centerX - gridW / 2
const startY = centerY - gridH / 2
infos.forEach((info, i) => {
const col = i % columns
const row = Math.floor(i / columns)
// Center each shape in its grid cell
const cellX = startX + col * (maxW + gap)
const cellY = startY + row * (maxH + gap)
editor.updateShape({
id: info.id,
type: info.shape.type,
x: cellX + (maxW - info.bounds.w) / 2,
y: cellY + (maxH - info.bounds.h) / 2,
})
})
break
}
case 'circle': {
const radius = Math.max(200, infos.length * 50)
infos.forEach((info, i) => {
const angle = (i / infos.length) * 2 * Math.PI - Math.PI / 2 // Start from top
const x = centerX + radius * Math.cos(angle) - info.bounds.w / 2
const y = centerY + radius * Math.sin(angle) - info.bounds.h / 2
editor.updateShape({
id: info.id,
type: info.shape.type,
x,
y,
})
})
break
}
case 'stack': {
// Stack shapes with slight offset (like a deck of cards)
const offsetX = 20
const offsetY = 20
infos.forEach((info, i) => {
editor.updateShape({
id: info.id,
type: info.shape.type,
x: centerX - info.bounds.w / 2 + i * offsetX,
y: centerY - info.bounds.h / 2 + i * offsetY,
})
})
break
}
}
}
// =============================================================================
// CONTENT OPERATIONS
// =============================================================================
/**
* Merge text content from selected shapes into a new markdown shape
*/
export function mergeSelectionContent(
editor: Editor,
options: {
format?: 'list' | 'paragraphs' | 'numbered' | 'combined'
createNew?: boolean
position?: { x: number; y: number }
} = {}
): string {
const infos = getSelectionInfo(editor)
const texts = infos.map(i => i.text).filter(t => t.length > 0)
if (texts.length === 0) return ''
let mergedContent: string
switch (options.format) {
case 'list':
mergedContent = texts.map(t => `- ${t}`).join('\n')
break
case 'numbered':
mergedContent = texts.map((t, i) => `${i + 1}. ${t}`).join('\n')
break
case 'paragraphs':
mergedContent = texts.join('\n\n')
break
case 'combined':
default:
mergedContent = texts.join(' ')
break
}
// Optionally create a new Markdown shape with the merged content
if (options.createNew) {
const bounds = editor.getSelectionPageBounds()
const position = options.position ?? {
x: bounds ? bounds.x + bounds.w + 50 : 0,
y: bounds ? bounds.y : 0,
}
editor.createShape({
id: createShapeId(),
type: 'Markdown',
x: position.x,
y: position.y,
props: {
w: 400,
h: 300,
content: mergedContent,
},
})
}
return mergedContent
}
/**
* Extract and combine text from selection for use as AI context
*/
export function getSelectionAsContext(editor: Editor): string {
const summary = getSelectionSummary(editor)
if (summary.count === 0) {
return ''
}
const typeDesc = Object.entries(summary.types)
.map(([type, count]) => `${count} ${type}${count > 1 ? 's' : ''}`)
.join(', ')
let context = `## Currently Selected (${summary.count} shapes: ${typeDesc})\n\n`
if (summary.hasContent) {
context += `### Content from selected shapes:\n`
for (const preview of summary.textPreviews) {
context += `- ${preview}\n`
}
}
return context
}
// =============================================================================
// SEMANTIC CLUSTERING (requires AI integration)
// =============================================================================
export interface ClusterGroup {
label: string
shapes: SelectionInfo[]
suggestedPosition?: { x: number; y: number }
}
/**
* Group shapes by semantic similarity
* Returns groups with suggested labels and positions
*
* This is a placeholder - actual implementation would use embeddings
* from semanticSearch to cluster shapes by content similarity
*/
export function clusterByContent(
editor: Editor,
_numClusters?: number
): ClusterGroup[] {
const infos = getSelectionInfo(editor)
if (infos.length < 3) {
return [{ label: 'All', shapes: infos }]
}
// Simple heuristic clustering by shape type for now
// Real implementation would use semantic embeddings
const byType = new Map<string, SelectionInfo[]>()
for (const info of infos) {
const list = byType.get(info.type) || []
list.push(info)
byType.set(info.type, list)
}
const clusters: ClusterGroup[] = []
let clusterIndex = 0
for (const [type, shapes] of byType) {
clusters.push({
label: `${type} group`,
shapes,
suggestedPosition: {
x: clusterIndex * 500,
y: 0,
},
})
clusterIndex++
}
return clusters
}
/**
* Arrange shapes into semantic clusters
*/
export function arrangeIntoClusters(
editor: Editor,
clusters: ClusterGroup[],
options: {
gap?: number
clusterGap?: number
arrangement?: 'row' | 'column' | 'grid'
} = {}
): void {
const gap = options.gap ?? 20
const clusterGap = options.clusterGap ?? 100
let offsetX = 0
let offsetY = 0
for (const cluster of clusters) {
if (cluster.shapes.length === 0) continue
// Arrange shapes within cluster
const clusterWidth = cluster.shapes.reduce((max, s) => Math.max(max, s.bounds.w), 0)
const clusterHeight = cluster.shapes.reduce((sum, s) => sum + s.bounds.h + gap, -gap)
let currentY = offsetY
for (const info of cluster.shapes) {
editor.updateShape({
id: info.id,
type: info.shape.type,
x: offsetX + (clusterWidth - info.bounds.w) / 2,
y: currentY,
})
currentY += info.bounds.h + gap
}
// Move to next cluster position
if (options.arrangement === 'column') {
offsetY += clusterHeight + clusterGap
} else {
offsetX += clusterWidth + clusterGap
}
}
}
// =============================================================================
// HIGH-LEVEL TRANSFORMATION COMMANDS
// =============================================================================
export type TransformCommand =
| 'align-left' | 'align-center' | 'align-right'
| 'align-top' | 'align-middle' | 'align-bottom'
| 'distribute-horizontal' | 'distribute-vertical'
| 'arrange-row' | 'arrange-column' | 'arrange-grid' | 'arrange-circle'
| 'size-match-width' | 'size-match-height' | 'size-match-both'
| 'size-smallest' | 'size-largest'
| 'merge-content' | 'cluster-semantic'
/**
* Execute a transformation command on the current selection
*/
export function executeTransformCommand(
editor: Editor,
command: TransformCommand,
options?: Record<string, unknown>
): boolean {
const infos = getSelectionInfo(editor)
if (infos.length === 0) {
console.warn('No shapes selected for transformation')
return false
}
switch (command) {
// Alignment
case 'align-left':
alignSelection(editor, 'left')
break
case 'align-center':
alignSelection(editor, 'center')
break
case 'align-right':
alignSelection(editor, 'right')
break
case 'align-top':
alignSelection(editor, 'top')
break
case 'align-middle':
alignSelection(editor, 'middle')
break
case 'align-bottom':
alignSelection(editor, 'bottom')
break
// Distribution
case 'distribute-horizontal':
distributeSelection(editor, 'horizontal', options?.gap as number)
break
case 'distribute-vertical':
distributeSelection(editor, 'vertical', options?.gap as number)
break
// Arrangement
case 'arrange-row':
arrangeSelection(editor, 'row', options as { gap?: number })
break
case 'arrange-column':
arrangeSelection(editor, 'column', options as { gap?: number })
break
case 'arrange-grid':
arrangeSelection(editor, 'grid', options as { gap?: number; columns?: number })
break
case 'arrange-circle':
arrangeSelection(editor, 'circle', options as { centerAt?: { x: number; y: number } })
break
// Size normalization
case 'size-match-width':
normalizeSelectionSize(editor, 'width')
break
case 'size-match-height':
normalizeSelectionSize(editor, 'height')
break
case 'size-match-both':
normalizeSelectionSize(editor, 'both')
break
case 'size-smallest':
normalizeSelectionSize(editor, 'smallest')
break
case 'size-largest':
normalizeSelectionSize(editor, 'largest')
break
// Content operations
case 'merge-content':
mergeSelectionContent(editor, { createNew: true, format: 'paragraphs' })
break
case 'cluster-semantic':
const clusters = clusterByContent(editor)
arrangeIntoClusters(editor, clusters)
break
default:
console.warn(`Unknown transform command: ${command}`)
return false
}
return true
}
/**
* Parse natural language into transform commands
* Returns the command and any extracted options
*/
export function parseTransformIntent(intent: string): {
command: TransformCommand | null
options: Record<string, unknown>
} {
const intentLower = intent.toLowerCase()
const options: Record<string, unknown> = {}
// Alignment patterns
if (intentLower.match(/align.*(left|start)/)) {
return { command: 'align-left', options }
}
if (intentLower.match(/align.*(right|end)/)) {
return { command: 'align-right', options }
}
if (intentLower.match(/align.*(center|middle).*horizontal|center.*align|horizontally.*center/)) {
return { command: 'align-center', options }
}
if (intentLower.match(/align.*top/)) {
return { command: 'align-top', options }
}
if (intentLower.match(/align.*bottom/)) {
return { command: 'align-bottom', options }
}
if (intentLower.match(/align.*(center|middle).*vertical|vertically.*center|middle.*align/)) {
return { command: 'align-middle', options }
}
// Distribution patterns
if (intentLower.match(/distribute.*horizontal|spread.*out.*horizontal|space.*horizontal/)) {
return { command: 'distribute-horizontal', options }
}
if (intentLower.match(/distribute.*vertical|spread.*out.*vertical|space.*vertical/)) {
return { command: 'distribute-vertical', options }
}
// Arrangement patterns
if (intentLower.match(/arrange.*row|put.*row|line.*up.*horizontal|horizontal.*row/)) {
return { command: 'arrange-row', options }
}
if (intentLower.match(/arrange.*column|put.*column|line.*up.*vertical|vertical.*column|stack/)) {
return { command: 'arrange-column', options }
}
if (intentLower.match(/arrange.*grid|put.*grid|tile|organize.*grid/)) {
// Extract column count if specified
const colMatch = intentLower.match(/(\d+)\s*col/)
if (colMatch) {
options.columns = parseInt(colMatch[1])
}
return { command: 'arrange-grid', options }
}
if (intentLower.match(/arrange.*circle|circular|radial|around/)) {
return { command: 'arrange-circle', options }
}
// Size patterns
if (intentLower.match(/same.*width|match.*width|equal.*width/)) {
return { command: 'size-match-width', options }
}
if (intentLower.match(/same.*height|match.*height|equal.*height/)) {
return { command: 'size-match-height', options }
}
if (intentLower.match(/same.*size|match.*size|equal.*size|uniform|consistent.*size/)) {
return { command: 'size-match-both', options }
}
if (intentLower.match(/smallest|shrink.*to.*smallest|make.*small/)) {
return { command: 'size-smallest', options }
}
if (intentLower.match(/largest|expand.*to.*largest|make.*large|make.*big/)) {
return { command: 'size-largest', options }
}
// Content patterns
if (intentLower.match(/merge|combine|consolidate|aggregate/)) {
return { command: 'merge-content', options }
}
if (intentLower.match(/cluster|group.*by.*content|semantic.*group|organize.*by.*topic/)) {
return { command: 'cluster-semantic', options }
}
return { command: null, options }
}

315
src/utils/toolSpawner.ts Normal file
View File

@ -0,0 +1,315 @@
/**
* Tool Spawner Utility
* Handles spawning tool shapes on the canvas from the Mycelial Intelligence
*/
import { Editor, TLShapeId, createShapeId } from 'tldraw'
import { ToolSchema, TOOL_SCHEMAS } from '@/lib/toolSchema'
import { findNonOverlappingPosition } from './shapeCollisionUtils'
/**
* Default dimensions for each tool type
*/
const TOOL_DIMENSIONS: Record<string, { w: number; h: number }> = {
Prompt: { w: 300, h: 500 },
ImageGen: { w: 400, h: 450 },
VideoGen: { w: 400, h: 350 },
ChatBox: { w: 400, h: 500 },
Markdown: { w: 400, h: 400 },
ObsNote: { w: 280, h: 200 },
Transcription: { w: 320, h: 400 },
Embed: { w: 600, h: 400 },
Holon: { w: 600, h: 500 },
Multmux: { w: 600, h: 400 },
Slide: { w: 800, h: 600 },
}
/**
* Arrangement patterns for spawning multiple tools
*/
export type ArrangementPattern = 'horizontal' | 'vertical' | 'grid' | 'radial' | 'cascade'
interface SpawnOptions {
/** Where to center the spawned tools (defaults to viewport center) */
centerPosition?: { x: number; y: number }
/** How to arrange multiple tools */
arrangement?: ArrangementPattern
/** Spacing between tools */
spacing?: number
/** Whether to animate the spawn */
animate?: boolean
/** Whether to select the spawned shapes */
selectAfterSpawn?: boolean
/** Whether to zoom to show all spawned shapes */
zoomToFit?: boolean
}
const DEFAULT_OPTIONS: SpawnOptions = {
arrangement: 'horizontal',
spacing: 30,
animate: true,
selectAfterSpawn: true,
zoomToFit: false,
}
/**
* Calculate positions for tools based on arrangement pattern
*/
function calculatePositions(
tools: ToolSchema[],
centerX: number,
centerY: number,
arrangement: ArrangementPattern,
spacing: number
): Array<{ x: number; y: number; w: number; h: number }> {
const positions: Array<{ x: number; y: number; w: number; h: number }> = []
for (let i = 0; i < tools.length; i++) {
const tool = tools[i]
const dims = TOOL_DIMENSIONS[tool.id] || { w: 300, h: 400 }
let x: number, y: number
switch (arrangement) {
case 'horizontal': {
// Arrange tools in a horizontal row
const totalWidth = tools.reduce((sum, t, idx) => {
const d = TOOL_DIMENSIONS[t.id] || { w: 300, h: 400 }
return sum + d.w + (idx < tools.length - 1 ? spacing : 0)
}, 0)
let offsetX = centerX - totalWidth / 2
for (let j = 0; j < i; j++) {
const prevDims = TOOL_DIMENSIONS[tools[j].id] || { w: 300, h: 400 }
offsetX += prevDims.w + spacing
}
x = offsetX
y = centerY - dims.h / 2
break
}
case 'vertical': {
// Arrange tools in a vertical column
const totalHeight = tools.reduce((sum, t, idx) => {
const d = TOOL_DIMENSIONS[t.id] || { w: 300, h: 400 }
return sum + d.h + (idx < tools.length - 1 ? spacing : 0)
}, 0)
let offsetY = centerY - totalHeight / 2
for (let j = 0; j < i; j++) {
const prevDims = TOOL_DIMENSIONS[tools[j].id] || { w: 300, h: 400 }
offsetY += prevDims.h + spacing
}
x = centerX - dims.w / 2
y = offsetY
break
}
case 'grid': {
// Arrange in a grid (max 3 columns)
const cols = Math.min(3, tools.length)
const row = Math.floor(i / cols)
const col = i % cols
const maxWidth = 400 + spacing
const maxHeight = 500 + spacing
x = centerX + (col - (cols - 1) / 2) * maxWidth - dims.w / 2
y = centerY + (row - Math.floor(tools.length / cols) / 2) * maxHeight - dims.h / 2
break
}
case 'radial': {
// Arrange in a circle around center
const radius = Math.max(300, tools.length * 80)
const angle = (i / tools.length) * 2 * Math.PI - Math.PI / 2 // Start from top
x = centerX + radius * Math.cos(angle) - dims.w / 2
y = centerY + radius * Math.sin(angle) - dims.h / 2
break
}
case 'cascade': {
// Cascade diagonally down-right
x = centerX + i * (dims.w / 2 + spacing) - dims.w / 2
y = centerY + i * (80 + spacing) - dims.h / 2
break
}
default:
x = centerX - dims.w / 2
y = centerY - dims.h / 2
}
positions.push({ x, y, w: dims.w, h: dims.h })
}
return positions
}
/**
* Spawn a single tool on the canvas
*/
export function spawnTool(
editor: Editor,
toolId: string,
position: { x: number; y: number },
options: Partial<SpawnOptions> = {}
): TLShapeId | null {
const schema = TOOL_SCHEMAS[toolId]
if (!schema) {
console.warn(`Unknown tool: ${toolId}`)
return null
}
const dims = TOOL_DIMENSIONS[toolId] || { w: 300, h: 400 }
// Find non-overlapping position
const finalPosition = findNonOverlappingPosition(
editor,
position.x,
position.y,
dims.w,
dims.h
)
const shapeId = createShapeId()
// Create the shape with tool-specific defaults
editor.createShape({
id: shapeId,
type: toolId,
x: finalPosition.x,
y: finalPosition.y,
props: {
w: dims.w,
h: dims.h,
},
})
if (options.selectAfterSpawn) {
editor.setSelectedShapes([shapeId])
}
return shapeId
}
/**
* Spawn multiple tools on the canvas with smart positioning
*/
export function spawnTools(
editor: Editor,
tools: ToolSchema[],
options: Partial<SpawnOptions> = {}
): TLShapeId[] {
if (tools.length === 0) return []
const mergedOptions = { ...DEFAULT_OPTIONS, ...options }
// Get center position (default to viewport center)
let centerX: number, centerY: number
if (mergedOptions.centerPosition) {
centerX = mergedOptions.centerPosition.x
centerY = mergedOptions.centerPosition.y
} else {
const viewportBounds = editor.getViewportPageBounds()
centerX = viewportBounds.x + viewportBounds.w / 2
centerY = viewportBounds.y + viewportBounds.h / 2
}
// Calculate initial positions based on arrangement
const positions = calculatePositions(
tools,
centerX,
centerY,
mergedOptions.arrangement!,
mergedOptions.spacing!
)
// Create shapes, adjusting for overlaps
const createdIds: TLShapeId[] = []
for (let i = 0; i < tools.length; i++) {
const tool = tools[i]
const pos = positions[i]
// Find non-overlapping position considering already created shapes
const finalPosition = findNonOverlappingPosition(
editor,
pos.x,
pos.y,
pos.w,
pos.h,
createdIds.map(id => id as string)
)
const shapeId = createShapeId()
editor.createShape({
id: shapeId,
type: tool.id,
x: finalPosition.x,
y: finalPosition.y,
props: {
w: pos.w,
h: pos.h,
},
})
createdIds.push(shapeId)
}
// Select all spawned shapes
if (mergedOptions.selectAfterSpawn && createdIds.length > 0) {
editor.setSelectedShapes(createdIds)
}
// Zoom to fit all spawned shapes
if (mergedOptions.zoomToFit && createdIds.length > 0) {
const bounds = editor.getSelectionPageBounds()
if (bounds) {
editor.zoomToBounds(bounds, {
targetZoom: Math.min(
(editor.getViewportPageBounds().width * 0.8) / bounds.width,
(editor.getViewportPageBounds().height * 0.8) / bounds.height,
1
),
inset: 50,
animation: { duration: 400, easing: (t) => t * (2 - t) },
})
}
}
return createdIds
}
/**
* Spawn tools below the Mycelial Intelligence bar
*/
export function spawnToolsBelowMI(
editor: Editor,
tools: ToolSchema[],
options: Partial<SpawnOptions> = {}
): TLShapeId[] {
// The MI bar is at the top center of the viewport
// Spawn tools slightly below and centered
const viewportBounds = editor.getViewportPageBounds()
// Calculate position: center horizontally, offset down from top
const centerX = viewportBounds.x + viewportBounds.w / 2
const topY = viewportBounds.y + 100 // Below MI bar
return spawnTools(editor, tools, {
...options,
centerPosition: { x: centerX, y: topY + 200 },
arrangement: options.arrangement || (tools.length <= 2 ? 'horizontal' : 'grid'),
})
}
/**
* Get a tool schema by ID (convenience export)
*/
export function getToolById(toolId: string): ToolSchema | undefined {
return TOOL_SCHEMAS[toolId]
}

View File

@ -544,6 +544,28 @@ export class AutomergeDurableObject {
hash = ((hash << 5) - hash) + Math.floor(record.x + record.y) hash = ((hash << 5) - hash) + Math.floor(record.x + record.y)
hash = hash & hash hash = hash & hash
} }
// CRITICAL: Include text content in hash for Markdown and similar shapes
// This ensures text changes are detected for R2 persistence
if (record.props?.text !== undefined && typeof record.props.text === 'string') {
hash = ((hash << 5) - hash) + record.props.text.length
hash = hash & hash
// Include first 50 chars for better collision resistance
const textSample = record.props.text.substring(0, 50)
for (let j = 0; j < textSample.length; j++) {
hash = ((hash << 5) - hash) + textSample.charCodeAt(j)
hash = hash & hash
}
}
// Also include content for ObsNote shapes
if (record.props?.content !== undefined && typeof record.props.content === 'string') {
hash = ((hash << 5) - hash) + record.props.content.length
hash = hash & hash
const contentSample = record.props.content.substring(0, 50)
for (let j = 0; j < contentSample.length; j++) {
hash = ((hash << 5) - hash) + contentSample.charCodeAt(j)
hash = hash & hash
}
}
} }
} }
@ -713,6 +735,91 @@ export class AutomergeDurableObject {
return this.roomPromise return this.roomPromise
} }
/**
* Assign sequential indices to shapes to preserve layer order during format conversion.
* Uses tldraw's fractional indexing format: a1, a2, a3, etc.
* Shapes are sorted by their original array index to maintain the order they were stored in.
*/
private assignSequentialIndices(store: any, shapesNeedingIndex: { id: string, originalIndex: number }[]): void {
if (shapesNeedingIndex.length === 0) return
// Sort shapes by their original array index to preserve layer order
shapesNeedingIndex.sort((a, b) => a.originalIndex - b.originalIndex)
// Check if shapes already have valid indices we should preserve
// Valid index: starts with 'a' followed by digits, optionally followed by alphanumeric jitter
const isValidIndex = (idx: any): boolean => {
if (!idx || typeof idx !== 'string' || idx.length === 0) return false
// Valid fractional index format: a1, a2, a1V, a10, a1Lz, etc.
if (/^a\d/.test(idx)) return true
// Also allow 'Z' prefix for very high indices
if (/^Z[a-z]/i.test(idx)) return true
return false
}
// Count how many shapes have valid indices
let validIndexCount = 0
let invalidIndexCount = 0
const existingIndices: string[] = []
for (const { id } of shapesNeedingIndex) {
const shape = store[id]
if (shape && isValidIndex(shape.index)) {
validIndexCount++
existingIndices.push(shape.index)
} else {
invalidIndexCount++
}
}
console.log(`📊 Index assignment check: ${validIndexCount} valid, ${invalidIndexCount} invalid out of ${shapesNeedingIndex.length} shapes`)
// If all shapes have valid indices, preserve them
if (invalidIndexCount === 0) {
console.log(`✅ All shapes have valid indices, preserving existing layer order`)
return
}
// If some have valid indices and some don't, we need to be careful
// Assign new indices only to shapes that need them, fitting them into the existing sequence
if (validIndexCount > 0 && invalidIndexCount > 0) {
console.log(`⚠️ Mixed valid/invalid indices detected. Assigning new indices to ${invalidIndexCount} shapes while preserving ${validIndexCount} valid indices.`)
// For simplicity, if we have a mix, reassign all shapes to ensure proper ordering
// This is safer than trying to interleave new indices between existing ones
console.log(`🔄 Reassigning all shape indices to ensure consistent layer order`)
}
// Assign sequential indices: a1, a2, a3, etc.
// Using simple integer increments provides clear layer ordering
let indexCounter = 1
const assignedIndices: string[] = []
for (const { id, originalIndex } of shapesNeedingIndex) {
const shape = store[id]
if (!shape) continue
const newIndex = `a${indexCounter}`
const oldIndex = shape.index
if (oldIndex !== newIndex) {
shape.index = newIndex
assignedIndices.push(`${id}: ${oldIndex || 'undefined'} -> ${newIndex}`)
}
indexCounter++
}
if (assignedIndices.length > 0) {
console.log(`🔢 Assigned sequential indices to ${assignedIndices.length} shapes:`)
// Log first 10 assignments for debugging
assignedIndices.slice(0, 10).forEach(msg => console.log(` ${msg}`))
if (assignedIndices.length > 10) {
console.log(` ... and ${assignedIndices.length - 10} more`)
}
}
}
private convertAutomergeToStore(automergeDoc: any[]): any { private convertAutomergeToStore(automergeDoc: any[]): any {
const store: any = {} const store: any = {}
const conversionStats = { const conversionStats = {
@ -723,51 +830,65 @@ export class AutomergeDurableObject {
errorDetails: [] as string[], errorDetails: [] as string[],
customRecords: [] as string[] // Track custom record IDs (obsidian_vault, etc.) customRecords: [] as string[] // Track custom record IDs (obsidian_vault, etc.)
} }
// Track shapes that need index assignment for layer order preservation
const shapesNeedingIndex: { id: string, originalIndex: number }[] = []
// Convert each Automerge record to store format // Convert each Automerge record to store format
automergeDoc.forEach((record: any, index: number) => { automergeDoc.forEach((record: any, arrayIndex: number) => {
try { try {
// Validate record structure // Validate record structure
if (!record) { if (!record) {
conversionStats.skipped++ conversionStats.skipped++
conversionStats.errorDetails.push(`Record at index ${index} is null or undefined`) conversionStats.errorDetails.push(`Record at index ${arrayIndex} is null or undefined`)
return return
} }
if (!record.state) { if (!record.state) {
conversionStats.skipped++ conversionStats.skipped++
conversionStats.errorDetails.push(`Record at index ${index} missing state property`) conversionStats.errorDetails.push(`Record at index ${arrayIndex} missing state property`)
return return
} }
if (!record.state.id) { if (!record.state.id) {
conversionStats.skipped++ conversionStats.skipped++
conversionStats.errorDetails.push(`Record at index ${index} missing state.id`) conversionStats.errorDetails.push(`Record at index ${arrayIndex} missing state.id`)
return return
} }
// Validate ID is a string // Validate ID is a string
if (typeof record.state.id !== 'string') { if (typeof record.state.id !== 'string') {
conversionStats.skipped++ conversionStats.skipped++
conversionStats.errorDetails.push(`Record at index ${index} has invalid state.id type: ${typeof record.state.id}`) conversionStats.errorDetails.push(`Record at index ${arrayIndex} has invalid state.id type: ${typeof record.state.id}`)
return return
} }
// Track custom records (obsidian_vault, etc.) // Track custom records (obsidian_vault, etc.)
if (record.state.id.startsWith('obsidian_vault:')) { if (record.state.id.startsWith('obsidian_vault:')) {
conversionStats.customRecords.push(record.state.id) conversionStats.customRecords.push(record.state.id)
} }
// Extract the state and use it as the store record // Extract the state and use it as the store record
store[record.state.id] = record.state store[record.state.id] = record.state
// Track shapes that need index assignment (preserve array order for layer order)
if (record.state.typeName === 'shape') {
shapesNeedingIndex.push({ id: record.state.id, originalIndex: arrayIndex })
}
conversionStats.converted++ conversionStats.converted++
} catch (error) { } catch (error) {
conversionStats.errors++ conversionStats.errors++
const errorMsg = `Error converting record at index ${index}: ${error instanceof Error ? error.message : String(error)}` const errorMsg = `Error converting record at index ${arrayIndex}: ${error instanceof Error ? error.message : String(error)}`
conversionStats.errorDetails.push(errorMsg) conversionStats.errorDetails.push(errorMsg)
console.error(`❌ Conversion error:`, errorMsg) console.error(`❌ Conversion error:`, errorMsg)
} }
}) })
// CRITICAL: Assign sequential indices to shapes to preserve layer order
// Shapes earlier in the array should have lower indices (rendered first/behind)
// Use fractional indexing format: a1, a2, a3, etc.
this.assignSequentialIndices(store, shapesNeedingIndex)
console.log(`📊 Automerge to Store conversion statistics:`, { console.log(`📊 Automerge to Store conversion statistics:`, {
total: conversionStats.total, total: conversionStats.total,
@ -1073,7 +1194,7 @@ export class AutomergeDurableObject {
store: {}, store: {},
schema: oldDoc.schema || this.createEmptyDocument().schema schema: oldDoc.schema || this.createEmptyDocument().schema
} }
const migrationStats = { const migrationStats = {
total: 0, total: 0,
converted: 0, converted: 0,
@ -1083,60 +1204,69 @@ export class AutomergeDurableObject {
recordTypes: {} as Record<string, number>, recordTypes: {} as Record<string, number>,
customRecords: [] as string[] // Track custom record IDs (obsidian_vault, etc.) customRecords: [] as string[] // Track custom record IDs (obsidian_vault, etc.)
} }
// Track shapes for layer order preservation
const shapesNeedingIndex: { id: string, originalIndex: number }[] = []
// Convert documents array to store object // Convert documents array to store object
if (oldDoc.documents && Array.isArray(oldDoc.documents)) { if (oldDoc.documents && Array.isArray(oldDoc.documents)) {
migrationStats.total = oldDoc.documents.length migrationStats.total = oldDoc.documents.length
oldDoc.documents.forEach((doc: any, index: number) => { oldDoc.documents.forEach((doc: any, arrayIndex: number) => {
try { try {
// Validate document structure // Validate document structure
if (!doc) { if (!doc) {
migrationStats.skipped++ migrationStats.skipped++
migrationStats.errorDetails.push(`Document at index ${index} is null or undefined`) migrationStats.errorDetails.push(`Document at index ${arrayIndex} is null or undefined`)
return return
} }
if (!doc.state) { if (!doc.state) {
migrationStats.skipped++ migrationStats.skipped++
migrationStats.errorDetails.push(`Document at index ${index} missing state property`) migrationStats.errorDetails.push(`Document at index ${arrayIndex} missing state property`)
return return
} }
if (!doc.state.id) { if (!doc.state.id) {
migrationStats.skipped++ migrationStats.skipped++
migrationStats.errorDetails.push(`Document at index ${index} missing state.id`) migrationStats.errorDetails.push(`Document at index ${arrayIndex} missing state.id`)
return return
} }
if (!doc.state.typeName) { if (!doc.state.typeName) {
migrationStats.skipped++ migrationStats.skipped++
migrationStats.errorDetails.push(`Document at index ${index} missing state.typeName (id: ${doc.state.id})`) migrationStats.errorDetails.push(`Document at index ${arrayIndex} missing state.typeName (id: ${doc.state.id})`)
return return
} }
// Validate ID is a string // Validate ID is a string
if (typeof doc.state.id !== 'string') { if (typeof doc.state.id !== 'string') {
migrationStats.skipped++ migrationStats.skipped++
migrationStats.errorDetails.push(`Document at index ${index} has invalid state.id type: ${typeof doc.state.id}`) migrationStats.errorDetails.push(`Document at index ${arrayIndex} has invalid state.id type: ${typeof doc.state.id}`)
return return
} }
// Track record types // Track record types
const typeName = doc.state.typeName const typeName = doc.state.typeName
migrationStats.recordTypes[typeName] = (migrationStats.recordTypes[typeName] || 0) + 1 migrationStats.recordTypes[typeName] = (migrationStats.recordTypes[typeName] || 0) + 1
// Track custom records (obsidian_vault, etc.) // Track custom records (obsidian_vault, etc.)
if (doc.state.id.startsWith('obsidian_vault:')) { if (doc.state.id.startsWith('obsidian_vault:')) {
migrationStats.customRecords.push(doc.state.id) migrationStats.customRecords.push(doc.state.id)
} }
// Extract the state and use it as the store record // Extract the state and use it as the store record
(newDoc.store as any)[doc.state.id] = doc.state (newDoc.store as any)[doc.state.id] = doc.state
// Track shapes for layer order preservation
if (doc.state.typeName === 'shape') {
shapesNeedingIndex.push({ id: doc.state.id, originalIndex: arrayIndex })
}
migrationStats.converted++ migrationStats.converted++
} catch (error) { } catch (error) {
migrationStats.errors++ migrationStats.errors++
const errorMsg = `Error migrating document at index ${index}: ${error instanceof Error ? error.message : String(error)}` const errorMsg = `Error migrating document at index ${arrayIndex}: ${error instanceof Error ? error.message : String(error)}`
migrationStats.errorDetails.push(errorMsg) migrationStats.errorDetails.push(errorMsg)
console.error(`❌ Migration error:`, errorMsg) console.error(`❌ Migration error:`, errorMsg)
} }
@ -1144,7 +1274,10 @@ export class AutomergeDurableObject {
} else { } else {
console.warn(`⚠️ migrateDocumentsToStore: oldDoc.documents is not an array or doesn't exist`) console.warn(`⚠️ migrateDocumentsToStore: oldDoc.documents is not an array or doesn't exist`)
} }
// CRITICAL: Assign sequential indices to shapes to preserve layer order
this.assignSequentialIndices(newDoc.store, shapesNeedingIndex)
// Count shapes after migration // Count shapes after migration
const shapeCount = Object.values(newDoc.store).filter((r: any) => r?.typeName === 'shape').length const shapeCount = Object.values(newDoc.store).filter((r: any) => r?.typeName === 'shape').length
@ -1253,24 +1386,13 @@ export class AutomergeDurableObject {
record.meta = {} record.meta = {}
needsUpdate = true needsUpdate = true
} }
// CRITICAL: IndexKey must follow tldraw's fractional indexing format // NOTE: Index assignment is now handled by assignSequentialIndices() during format conversion
// Valid format: starts with 'a' followed by digits, optionally followed by alphanumeric jitter // We only need to ensure index exists, not validate the format here
// Examples: "a1", "a2", "a10", "a1V", "a24sT", "a1V4rr" (fractional between a1 and a2) // This preserves layer order that was established during conversion
// Invalid: "c1", "b1" (old non-fractional format - single letter + single digit) if (!record.index || typeof record.index !== 'string') {
// tldraw uses fractional-indexing-jittered library: https://observablehq.com/@dgreensp/implementing-fractional-indexing // Only assign a default if truly missing - the conversion functions should have handled this
const isValidIndex = (idx: any): boolean => { console.log(`⚠️ Server: Shape ${record.id} missing index after conversion, assigning fallback`)
if (!idx || typeof idx !== 'string' || idx.length === 0) return false record.index = 'a1'
// Old format "b1", "c1" etc are invalid (single letter + single digit)
if (/^[b-z]\d$/i.test(idx)) return false
// Valid: starts with 'a' followed by at least one digit
if (/^a\d/.test(idx)) return true
// Also allow 'Z' prefix for very high indices
if (/^Z[a-z]/i.test(idx)) return true
return false
}
if (!isValidIndex(record.index)) {
console.log(`🔧 Server: Fixing invalid index "${record.index}" to "a1" for shape ${record.id}`)
record.index = 'a1' // Required index property for all shapes - must be valid IndexKey format
needsUpdate = true needsUpdate = true
} }

672
worker/cryptidAuth.ts Normal file
View File

@ -0,0 +1,672 @@
import { Environment, User, DeviceKey, VerificationToken } from './types';
// Generate a cryptographically secure random token
function generateToken(): string {
const array = new Uint8Array(32);
crypto.getRandomValues(array);
return Array.from(array, byte => byte.toString(16).padStart(2, '0')).join('');
}
// Generate a UUID v4
function generateUUID(): string {
return crypto.randomUUID();
}
// Send email via SendGrid
async function sendEmail(
env: Environment,
to: string,
subject: string,
htmlContent: string
): Promise<boolean> {
try {
const response = await fetch('https://api.sendgrid.com/v3/mail/send', {
method: 'POST',
headers: {
'Authorization': `Bearer ${env.SENDGRID_API_KEY}`,
'Content-Type': 'application/json',
},
body: JSON.stringify({
personalizations: [{ to: [{ email: to }] }],
from: { email: env.CRYPTID_EMAIL_FROM || 'noreply@jeffemmett.com', name: 'CryptID' },
subject,
content: [{ type: 'text/html', value: htmlContent }],
}),
});
if (!response.ok) {
console.error('SendGrid error:', await response.text());
return false;
}
return true;
} catch (error) {
console.error('Email send error:', error);
return false;
}
}
// Clean up expired tokens
async function cleanupExpiredTokens(db: D1Database): Promise<void> {
try {
await db.prepare(
"DELETE FROM verification_tokens WHERE expires_at < datetime('now') OR used = 1"
).run();
} catch (error) {
console.error('Token cleanup error:', error);
}
}
/**
* Link an email to an existing CryptID account (Device A)
* POST /auth/link-email
* Body: { email, cryptidUsername, publicKey, signature, challenge }
*/
export async function handleLinkEmail(
request: Request,
env: Environment
): Promise<Response> {
try {
const body = await request.json() as {
email: string;
cryptidUsername: string;
publicKey: string;
deviceName?: string;
};
const { email, cryptidUsername, publicKey, deviceName } = body;
if (!email || !cryptidUsername || !publicKey) {
return new Response(JSON.stringify({ error: 'Missing required fields' }), {
status: 400,
headers: { 'Content-Type': 'application/json' },
});
}
// Validate email format
const emailRegex = /^[^\s@]+@[^\s@]+\.[^\s@]+$/;
if (!emailRegex.test(email)) {
return new Response(JSON.stringify({ error: 'Invalid email format' }), {
status: 400,
headers: { 'Content-Type': 'application/json' },
});
}
const db = env.CRYPTID_DB;
if (!db) {
return new Response(JSON.stringify({ error: 'Database not configured' }), {
status: 503,
headers: { 'Content-Type': 'application/json' },
});
}
// Check if email is already linked to a different account
const existingUser = await db.prepare(
'SELECT * FROM users WHERE email = ?'
).bind(email).first<User>();
if (existingUser && existingUser.cryptid_username !== cryptidUsername) {
return new Response(JSON.stringify({
error: 'Email already linked to a different CryptID account'
}), {
status: 409,
headers: { 'Content-Type': 'application/json' },
});
}
// Check if this public key is already registered
const existingKey = await db.prepare(
'SELECT * FROM device_keys WHERE public_key = ?'
).bind(publicKey).first<DeviceKey>();
if (existingKey) {
// Key already registered, just need to verify email if not done
if (existingUser && existingUser.email_verified) {
return new Response(JSON.stringify({
success: true,
message: 'Email already verified',
emailVerified: true
}), {
headers: { 'Content-Type': 'application/json' },
});
}
}
const userId = existingUser?.id || generateUUID();
const userAgent = request.headers.get('User-Agent') || null;
// Create or update user
if (!existingUser) {
await db.prepare(
'INSERT INTO users (id, email, cryptid_username, email_verified) VALUES (?, ?, ?, 0)'
).bind(userId, email, cryptidUsername).run();
}
// Add device key if not exists
if (!existingKey) {
await db.prepare(
'INSERT INTO device_keys (id, user_id, public_key, device_name, user_agent) VALUES (?, ?, ?, ?, ?)'
).bind(generateUUID(), userId, publicKey, deviceName || 'Primary Device', userAgent).run();
}
// If email not verified, send verification email
if (!existingUser?.email_verified) {
// Clean up old tokens
await cleanupExpiredTokens(db);
// Create verification token (24 hour expiry)
const token = generateToken();
const expiresAt = new Date(Date.now() + 24 * 60 * 60 * 1000).toISOString();
await db.prepare(
'INSERT INTO verification_tokens (id, email, token, token_type, expires_at) VALUES (?, ?, ?, ?, ?)'
).bind(generateUUID(), email, token, 'email_verify', expiresAt).run();
// Send verification email
const verifyUrl = `${env.APP_URL || 'https://jeffemmett.com'}/verify-email?token=${token}`;
const emailSent = await sendEmail(
env,
email,
'Verify your CryptID email',
`
<h2>Verify your CryptID email</h2>
<p>Click the link below to verify your email address for CryptID: <strong>${cryptidUsername}</strong></p>
<p><a href="${verifyUrl}" style="display: inline-block; padding: 12px 24px; background: #4f46e5; color: white; text-decoration: none; border-radius: 6px;">Verify Email</a></p>
<p>Or copy this link: ${verifyUrl}</p>
<p>This link expires in 24 hours.</p>
<p style="color: #666; font-size: 12px;">If you didn't request this, you can safely ignore this email.</p>
`
);
return new Response(JSON.stringify({
success: true,
message: emailSent ? 'Verification email sent' : 'Account created but email failed to send',
emailVerified: false,
emailSent
}), {
headers: { 'Content-Type': 'application/json' },
});
}
return new Response(JSON.stringify({
success: true,
message: 'Email already verified',
emailVerified: true
}), {
headers: { 'Content-Type': 'application/json' },
});
} catch (error) {
console.error('Link email error:', error);
return new Response(JSON.stringify({ error: 'Internal server error' }), {
status: 500,
headers: { 'Content-Type': 'application/json' },
});
}
}
/**
* Verify email via token (clicked from email)
* GET /auth/verify-email/:token
*/
export async function handleVerifyEmail(
token: string,
env: Environment
): Promise<Response> {
try {
const db = env.CRYPTID_DB;
if (!db) {
return new Response(JSON.stringify({ error: 'Database not configured' }), {
status: 503,
headers: { 'Content-Type': 'application/json' },
});
}
// Find token
const tokenRecord = await db.prepare(
"SELECT * FROM verification_tokens WHERE token = ? AND token_type = 'email_verify' AND used = 0 AND expires_at > datetime('now')"
).bind(token).first<VerificationToken>();
if (!tokenRecord) {
return new Response(JSON.stringify({ error: 'Invalid or expired token' }), {
status: 400,
headers: { 'Content-Type': 'application/json' },
});
}
// Mark email as verified
await db.prepare(
"UPDATE users SET email_verified = 1, updated_at = datetime('now') WHERE email = ?"
).bind(tokenRecord.email).run();
// Mark token as used
await db.prepare(
'UPDATE verification_tokens SET used = 1 WHERE id = ?'
).bind(tokenRecord.id).run();
// Return success - frontend will redirect
return new Response(JSON.stringify({
success: true,
message: 'Email verified successfully',
email: tokenRecord.email
}), {
headers: { 'Content-Type': 'application/json' },
});
} catch (error) {
console.error('Verify email error:', error);
return new Response(JSON.stringify({ error: 'Internal server error' }), {
status: 500,
headers: { 'Content-Type': 'application/json' },
});
}
}
/**
* Request to link a new device (Device B enters email)
* POST /auth/request-device-link
* Body: { email, publicKey, deviceName }
*/
export async function handleRequestDeviceLink(
request: Request,
env: Environment
): Promise<Response> {
try {
const body = await request.json() as {
email: string;
publicKey: string;
deviceName?: string;
};
const { email, publicKey, deviceName } = body;
if (!email || !publicKey) {
return new Response(JSON.stringify({ error: 'Missing required fields' }), {
status: 400,
headers: { 'Content-Type': 'application/json' },
});
}
const db = env.CRYPTID_DB;
if (!db) {
return new Response(JSON.stringify({ error: 'Database not configured' }), {
status: 503,
headers: { 'Content-Type': 'application/json' },
});
}
// Check if email exists and is verified
const user = await db.prepare(
'SELECT * FROM users WHERE email = ? AND email_verified = 1'
).bind(email).first<User>();
if (!user) {
return new Response(JSON.stringify({
error: 'No verified CryptID account found for this email'
}), {
status: 404,
headers: { 'Content-Type': 'application/json' },
});
}
// Check if this public key is already registered
const existingKey = await db.prepare(
'SELECT * FROM device_keys WHERE public_key = ?'
).bind(publicKey).first<DeviceKey>();
if (existingKey) {
return new Response(JSON.stringify({
success: true,
message: 'Device already linked',
cryptidUsername: user.cryptid_username,
alreadyLinked: true
}), {
headers: { 'Content-Type': 'application/json' },
});
}
const userAgent = request.headers.get('User-Agent') || null;
// Clean up old tokens
await cleanupExpiredTokens(db);
// Create device link token (1 hour expiry for security)
const token = generateToken();
const expiresAt = new Date(Date.now() + 60 * 60 * 1000).toISOString();
await db.prepare(
'INSERT INTO verification_tokens (id, email, token, token_type, public_key, device_name, user_agent, expires_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?)'
).bind(generateUUID(), email, token, 'device_link', publicKey, deviceName || 'New Device', userAgent, expiresAt).run();
// Send device link email
const linkUrl = `${env.APP_URL || 'https://jeffemmett.com'}/link-device?token=${token}`;
const emailSent = await sendEmail(
env,
email,
'Link new device to your CryptID',
`
<h2>New Device Link Request</h2>
<p>Someone is trying to link a new device to your CryptID: <strong>${user.cryptid_username}</strong></p>
<p><strong>Device:</strong> ${deviceName || 'New Device'}</p>
<p>If this was you, click the button below to approve:</p>
<p><a href="${linkUrl}" style="display: inline-block; padding: 12px 24px; background: #4f46e5; color: white; text-decoration: none; border-radius: 6px;">Approve Device</a></p>
<p>Or copy this link: ${linkUrl}</p>
<p>This link expires in 1 hour.</p>
<p style="color: #c00; font-size: 12px;"><strong>If you didn't request this, do not click the link.</strong> Someone may be trying to access your account.</p>
`
);
return new Response(JSON.stringify({
success: true,
message: emailSent ? 'Verification email sent to your address' : 'Failed to send email',
emailSent,
cryptidUsername: user.cryptid_username
}), {
headers: { 'Content-Type': 'application/json' },
});
} catch (error) {
console.error('Request device link error:', error);
return new Response(JSON.stringify({ error: 'Internal server error' }), {
status: 500,
headers: { 'Content-Type': 'application/json' },
});
}
}
/**
* Complete device link (clicked from email on Device B)
* GET /auth/link-device/:token
*/
export async function handleLinkDevice(
token: string,
env: Environment
): Promise<Response> {
try {
const db = env.CRYPTID_DB;
if (!db) {
return new Response(JSON.stringify({ error: 'Database not configured' }), {
status: 503,
headers: { 'Content-Type': 'application/json' },
});
}
// Find token
const tokenRecord = await db.prepare(
"SELECT * FROM verification_tokens WHERE token = ? AND token_type = 'device_link' AND used = 0 AND expires_at > datetime('now')"
).bind(token).first<VerificationToken>();
if (!tokenRecord) {
return new Response(JSON.stringify({ error: 'Invalid or expired token' }), {
status: 400,
headers: { 'Content-Type': 'application/json' },
});
}
// Get user
const user = await db.prepare(
'SELECT * FROM users WHERE email = ?'
).bind(tokenRecord.email).first<User>();
if (!user) {
return new Response(JSON.stringify({ error: 'User not found' }), {
status: 404,
headers: { 'Content-Type': 'application/json' },
});
}
// Add the new device key
await db.prepare(
'INSERT INTO device_keys (id, user_id, public_key, device_name, user_agent) VALUES (?, ?, ?, ?, ?)'
).bind(
generateUUID(),
user.id,
tokenRecord.public_key,
tokenRecord.device_name,
tokenRecord.user_agent
).run();
// Mark token as used
await db.prepare(
'UPDATE verification_tokens SET used = 1 WHERE id = ?'
).bind(tokenRecord.id).run();
return new Response(JSON.stringify({
success: true,
message: 'Device linked successfully',
cryptidUsername: user.cryptid_username,
email: user.email
}), {
headers: { 'Content-Type': 'application/json' },
});
} catch (error) {
console.error('Link device error:', error);
return new Response(JSON.stringify({ error: 'Internal server error' }), {
status: 500,
headers: { 'Content-Type': 'application/json' },
});
}
}
/**
* Check if a public key is linked to an account
* POST /auth/lookup
* Body: { publicKey }
*/
export async function handleLookup(
request: Request,
env: Environment
): Promise<Response> {
try {
const body = await request.json() as { publicKey: string };
const { publicKey } = body;
if (!publicKey) {
return new Response(JSON.stringify({ error: 'Missing publicKey' }), {
status: 400,
headers: { 'Content-Type': 'application/json' },
});
}
const db = env.CRYPTID_DB;
if (!db) {
return new Response(JSON.stringify({ error: 'Database not configured' }), {
status: 503,
headers: { 'Content-Type': 'application/json' },
});
}
// Find device key and associated user
const result = await db.prepare(`
SELECT u.cryptid_username, u.email, u.email_verified, dk.device_name
FROM device_keys dk
JOIN users u ON dk.user_id = u.id
WHERE dk.public_key = ?
`).bind(publicKey).first<{
cryptid_username: string;
email: string;
email_verified: number;
device_name: string;
}>();
if (!result) {
return new Response(JSON.stringify({
found: false
}), {
headers: { 'Content-Type': 'application/json' },
});
}
// Update last_used timestamp
await db.prepare(
"UPDATE device_keys SET last_used = datetime('now') WHERE public_key = ?"
).bind(publicKey).run();
return new Response(JSON.stringify({
found: true,
cryptidUsername: result.cryptid_username,
email: result.email,
emailVerified: result.email_verified === 1,
deviceName: result.device_name
}), {
headers: { 'Content-Type': 'application/json' },
});
} catch (error) {
console.error('Lookup error:', error);
return new Response(JSON.stringify({ error: 'Internal server error' }), {
status: 500,
headers: { 'Content-Type': 'application/json' },
});
}
}
/**
* Get linked devices for an account
* POST /auth/devices
* Body: { publicKey } - authenticates via device's public key
*/
export async function handleGetDevices(
request: Request,
env: Environment
): Promise<Response> {
try {
const body = await request.json() as { publicKey: string };
const { publicKey } = body;
if (!publicKey) {
return new Response(JSON.stringify({ error: 'Missing publicKey' }), {
status: 400,
headers: { 'Content-Type': 'application/json' },
});
}
const db = env.CRYPTID_DB;
if (!db) {
return new Response(JSON.stringify({ error: 'Database not configured' }), {
status: 503,
headers: { 'Content-Type': 'application/json' },
});
}
// Find user by public key
const deviceKey = await db.prepare(`
SELECT user_id FROM device_keys WHERE public_key = ?
`).bind(publicKey).first<{ user_id: string }>();
if (!deviceKey) {
return new Response(JSON.stringify({ error: 'Device not found' }), {
status: 404,
headers: { 'Content-Type': 'application/json' },
});
}
// Get all devices for this user
const devices = await db.prepare(`
SELECT id, device_name, user_agent, created_at, last_used, public_key
FROM device_keys
WHERE user_id = ?
ORDER BY created_at DESC
`).bind(deviceKey.user_id).all<DeviceKey>();
return new Response(JSON.stringify({
devices: devices.results?.map((d: DeviceKey) => ({
id: d.id,
deviceName: d.device_name,
userAgent: d.user_agent,
createdAt: d.created_at,
lastUsed: d.last_used,
isCurrentDevice: d.public_key === publicKey
})) || []
}), {
headers: { 'Content-Type': 'application/json' },
});
} catch (error) {
console.error('Get devices error:', error);
return new Response(JSON.stringify({ error: 'Internal server error' }), {
status: 500,
headers: { 'Content-Type': 'application/json' },
});
}
}
/**
* Revoke a device
* DELETE /auth/devices/:deviceId
* Body: { publicKey } - authenticates via device's public key
*/
export async function handleRevokeDevice(
deviceId: string,
request: Request,
env: Environment
): Promise<Response> {
try {
const body = await request.json() as { publicKey: string };
const { publicKey } = body;
if (!publicKey) {
return new Response(JSON.stringify({ error: 'Missing publicKey' }), {
status: 400,
headers: { 'Content-Type': 'application/json' },
});
}
const db = env.CRYPTID_DB;
if (!db) {
return new Response(JSON.stringify({ error: 'Database not configured' }), {
status: 503,
headers: { 'Content-Type': 'application/json' },
});
}
// Find user by public key
const currentDevice = await db.prepare(`
SELECT user_id FROM device_keys WHERE public_key = ?
`).bind(publicKey).first<{ user_id: string }>();
if (!currentDevice) {
return new Response(JSON.stringify({ error: 'Unauthorized' }), {
status: 401,
headers: { 'Content-Type': 'application/json' },
});
}
// Verify the device to revoke belongs to the same user
const targetDevice = await db.prepare(`
SELECT user_id, public_key FROM device_keys WHERE id = ?
`).bind(deviceId).first<{ user_id: string; public_key: string }>();
if (!targetDevice || targetDevice.user_id !== currentDevice.user_id) {
return new Response(JSON.stringify({ error: 'Device not found' }), {
status: 404,
headers: { 'Content-Type': 'application/json' },
});
}
// Don't allow revoking the current device
if (targetDevice.public_key === publicKey) {
return new Response(JSON.stringify({ error: 'Cannot revoke current device' }), {
status: 400,
headers: { 'Content-Type': 'application/json' },
});
}
// Delete the device
await db.prepare('DELETE FROM device_keys WHERE id = ?').bind(deviceId).run();
return new Response(JSON.stringify({
success: true,
message: 'Device revoked'
}), {
headers: { 'Content-Type': 'application/json' },
});
} catch (error) {
console.error('Revoke device error:', error);
return new Response(JSON.stringify({ error: 'Internal server error' }), {
status: 500,
headers: { 'Content-Type': 'application/json' },
});
}
}

View File

@ -8,4 +8,44 @@ export interface Environment {
AUTOMERGE_DURABLE_OBJECT: DurableObjectNamespace AUTOMERGE_DURABLE_OBJECT: DurableObjectNamespace
DAILY_API_KEY: string; DAILY_API_KEY: string;
DAILY_DOMAIN: string; DAILY_DOMAIN: string;
// CryptID auth bindings
CRYPTID_DB?: D1Database;
SENDGRID_API_KEY?: string;
CRYPTID_EMAIL_FROM?: string;
APP_URL?: string;
}
// CryptID types for auth
export interface User {
id: string;
cryptid_username: string;
email: string | null;
email_verified: boolean;
created_at: string;
updated_at: string;
}
export interface DeviceKey {
id: string;
user_id: string;
public_key: string;
device_name: string | null;
user_agent: string | null;
created_at: string;
last_used: string | null;
}
export interface VerificationToken {
id: string;
user_id: string;
token: string;
type: 'email_verification' | 'device_link';
expires_at: string;
created_at: string;
metadata: string | null;
// Metadata fields that get parsed from JSON
email?: string;
public_key?: string;
device_name?: string;
user_agent?: string;
} }