feat: add Ollama private AI integration with model selection
- Add Ollama as priority AI provider (FREE, self-hosted) - Add model selection UI in Settings dialog - Support for multiple models: Llama 3.1 70B, Devstral, Qwen Coder, etc. - Ollama server configured at http://159.195.32.209:11434 - Models dropdown shows quality vs speed tradeoffs - Falls back to RunPod/cloud providers when Ollama unavailable Models available: - llama3.1:70b (Best quality, ~7s) - devstral (Best for coding agents) - qwen2.5-coder:7b (Fast coding) - llama3.1:8b (Balanced) - llama3.2:3b (Fastest) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
parent
5151b474be
commit
a8c3988e3f
|
|
@ -124,6 +124,9 @@ import { FathomNoteShape } from "@/shapes/FathomNoteShapeUtil"
|
||||||
import { HolonShape } from "@/shapes/HolonShapeUtil"
|
import { HolonShape } from "@/shapes/HolonShapeUtil"
|
||||||
import { ObsidianBrowserShape } from "@/shapes/ObsidianBrowserShapeUtil"
|
import { ObsidianBrowserShape } from "@/shapes/ObsidianBrowserShapeUtil"
|
||||||
import { FathomMeetingsBrowserShape } from "@/shapes/FathomMeetingsBrowserShapeUtil"
|
import { FathomMeetingsBrowserShape } from "@/shapes/FathomMeetingsBrowserShapeUtil"
|
||||||
|
import { ImageGenShape } from "@/shapes/ImageGenShapeUtil"
|
||||||
|
import { VideoGenShape } from "@/shapes/VideoGenShapeUtil"
|
||||||
|
import { MultmuxShape } from "@/shapes/MultmuxShapeUtil"
|
||||||
// Location shape removed - no longer needed
|
// Location shape removed - no longer needed
|
||||||
|
|
||||||
export function useAutomergeStoreV2({
|
export function useAutomergeStoreV2({
|
||||||
|
|
@ -154,7 +157,9 @@ export function useAutomergeStoreV2({
|
||||||
Holon: {} as any,
|
Holon: {} as any,
|
||||||
ObsidianBrowser: {} as any,
|
ObsidianBrowser: {} as any,
|
||||||
FathomMeetingsBrowser: {} as any,
|
FathomMeetingsBrowser: {} as any,
|
||||||
LocationShare: {} as any,
|
ImageGen: {} as any,
|
||||||
|
VideoGen: {} as any,
|
||||||
|
Multmux: {} as any,
|
||||||
},
|
},
|
||||||
bindings: defaultBindingSchemas,
|
bindings: defaultBindingSchemas,
|
||||||
})
|
})
|
||||||
|
|
@ -176,6 +181,9 @@ export function useAutomergeStoreV2({
|
||||||
HolonShape,
|
HolonShape,
|
||||||
ObsidianBrowserShape,
|
ObsidianBrowserShape,
|
||||||
FathomMeetingsBrowserShape,
|
FathomMeetingsBrowserShape,
|
||||||
|
ImageGenShape,
|
||||||
|
VideoGenShape,
|
||||||
|
MultmuxShape,
|
||||||
],
|
],
|
||||||
})
|
})
|
||||||
return store
|
return store
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,361 @@
|
||||||
|
/**
|
||||||
|
* Canvas AI Assistant
|
||||||
|
* Provides AI-powered queries about canvas content using semantic search
|
||||||
|
* and LLM integration for natural language understanding
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { Editor, TLShape, TLShapeId } from 'tldraw'
|
||||||
|
import { semanticSearch, extractShapeText, SemanticSearchResult } from './semanticSearch'
|
||||||
|
import { llm } from '@/utils/llmUtils'
|
||||||
|
|
||||||
|
export interface CanvasQueryResult {
|
||||||
|
answer: string
|
||||||
|
relevantShapes: SemanticSearchResult[]
|
||||||
|
context: string
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface CanvasAIConfig {
|
||||||
|
maxContextLength?: number
|
||||||
|
semanticSearchThreshold?: number
|
||||||
|
topKResults?: number
|
||||||
|
includeVisibleContext?: boolean
|
||||||
|
streamResponse?: boolean
|
||||||
|
}
|
||||||
|
|
||||||
|
const DEFAULT_CONFIG: CanvasAIConfig = {
|
||||||
|
maxContextLength: 8000,
|
||||||
|
semanticSearchThreshold: 0.25,
|
||||||
|
topKResults: 10,
|
||||||
|
includeVisibleContext: true,
|
||||||
|
streamResponse: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Canvas AI Service - provides intelligent canvas queries
|
||||||
|
*/
|
||||||
|
export class CanvasAI {
|
||||||
|
private editor: Editor | null = null
|
||||||
|
private config: CanvasAIConfig
|
||||||
|
|
||||||
|
constructor(config: Partial<CanvasAIConfig> = {}) {
|
||||||
|
this.config = { ...DEFAULT_CONFIG, ...config }
|
||||||
|
}
|
||||||
|
|
||||||
|
setEditor(editor: Editor): void {
|
||||||
|
this.editor = editor
|
||||||
|
semanticSearch.setEditor(editor)
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Index the canvas for semantic search
|
||||||
|
*/
|
||||||
|
async indexCanvas(onProgress?: (progress: number) => void): Promise<void> {
|
||||||
|
await semanticSearch.indexCanvas(onProgress)
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Query the canvas with natural language
|
||||||
|
*/
|
||||||
|
async query(
|
||||||
|
question: string,
|
||||||
|
onToken?: (partial: string, done?: boolean) => void,
|
||||||
|
config?: Partial<CanvasAIConfig>
|
||||||
|
): Promise<CanvasQueryResult> {
|
||||||
|
const mergedConfig = { ...this.config, ...config }
|
||||||
|
|
||||||
|
if (!this.editor) {
|
||||||
|
throw new Error('Editor not connected. Call setEditor() first.')
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build context from canvas
|
||||||
|
const context = await this.buildQueryContext(question, mergedConfig)
|
||||||
|
const relevantShapes = await semanticSearch.search(
|
||||||
|
question,
|
||||||
|
mergedConfig.topKResults,
|
||||||
|
mergedConfig.semanticSearchThreshold
|
||||||
|
)
|
||||||
|
|
||||||
|
// Build the system prompt for canvas-aware AI
|
||||||
|
const systemPrompt = this.buildSystemPrompt()
|
||||||
|
const userPrompt = this.buildUserPrompt(question, context)
|
||||||
|
|
||||||
|
let answer = ''
|
||||||
|
|
||||||
|
// Use LLM to generate response
|
||||||
|
if (onToken && mergedConfig.streamResponse) {
|
||||||
|
await llm(
|
||||||
|
userPrompt,
|
||||||
|
(partial, done) => {
|
||||||
|
answer = partial
|
||||||
|
onToken(partial, done)
|
||||||
|
},
|
||||||
|
systemPrompt
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
// Non-streaming fallback
|
||||||
|
await llm(
|
||||||
|
userPrompt,
|
||||||
|
(partial, done) => {
|
||||||
|
if (done) answer = partial
|
||||||
|
},
|
||||||
|
systemPrompt
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
answer,
|
||||||
|
relevantShapes,
|
||||||
|
context,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get a summary of the current canvas state
|
||||||
|
*/
|
||||||
|
async summarize(
|
||||||
|
onToken?: (partial: string, done?: boolean) => void
|
||||||
|
): Promise<string> {
|
||||||
|
if (!this.editor) {
|
||||||
|
throw new Error('Editor not connected. Call setEditor() first.')
|
||||||
|
}
|
||||||
|
|
||||||
|
const canvasContext = await semanticSearch.getCanvasContext()
|
||||||
|
const visibleContext = semanticSearch.getVisibleShapesContext()
|
||||||
|
|
||||||
|
const systemPrompt = `You are an AI assistant analyzing a collaborative canvas workspace.
|
||||||
|
Your role is to provide clear, concise summaries of what's on the canvas.
|
||||||
|
Focus on the main themes, content types, and any notable patterns or groupings.
|
||||||
|
Be specific about what you observe but keep the summary digestible.`
|
||||||
|
|
||||||
|
const userPrompt = `Please summarize what's on this canvas:
|
||||||
|
|
||||||
|
## Canvas Overview
|
||||||
|
${canvasContext.summary}
|
||||||
|
|
||||||
|
## Shape Types Present
|
||||||
|
${Object.entries(canvasContext.shapeTypes)
|
||||||
|
.map(([type, count]) => `- ${type}: ${count}`)
|
||||||
|
.join('\n')}
|
||||||
|
|
||||||
|
## Currently Visible (${visibleContext.shapes.length} shapes)
|
||||||
|
${visibleContext.descriptions.slice(0, 20).join('\n')}
|
||||||
|
|
||||||
|
## Sample Content
|
||||||
|
${canvasContext.textContent.slice(0, 10).map((t, i) => `${i + 1}. ${t.slice(0, 300)}...`).join('\n\n')}
|
||||||
|
|
||||||
|
Provide a concise summary (2-3 paragraphs) of the main content and themes on this canvas.`
|
||||||
|
|
||||||
|
let summary = ''
|
||||||
|
|
||||||
|
await llm(
|
||||||
|
userPrompt,
|
||||||
|
(partial, done) => {
|
||||||
|
summary = partial
|
||||||
|
onToken?.(partial, done)
|
||||||
|
},
|
||||||
|
systemPrompt
|
||||||
|
)
|
||||||
|
|
||||||
|
return summary
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Find shapes related to a concept/topic
|
||||||
|
*/
|
||||||
|
async findRelated(
|
||||||
|
concept: string,
|
||||||
|
topK: number = 5
|
||||||
|
): Promise<SemanticSearchResult[]> {
|
||||||
|
return semanticSearch.search(concept, topK, this.config.semanticSearchThreshold)
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Navigate to shapes matching a query
|
||||||
|
*/
|
||||||
|
async navigateToQuery(query: string): Promise<TLShape[]> {
|
||||||
|
if (!this.editor) return []
|
||||||
|
|
||||||
|
const results = await semanticSearch.search(query, 5, 0.3)
|
||||||
|
|
||||||
|
if (results.length === 0) return []
|
||||||
|
|
||||||
|
// Select the matching shapes
|
||||||
|
const shapeIds = results.map(r => r.shapeId)
|
||||||
|
this.editor.setSelectedShapes(shapeIds)
|
||||||
|
|
||||||
|
// Zoom to show all matching shapes
|
||||||
|
const bounds = this.editor.getSelectionPageBounds()
|
||||||
|
if (bounds) {
|
||||||
|
this.editor.zoomToBounds(bounds, {
|
||||||
|
targetZoom: Math.min(
|
||||||
|
(this.editor.getViewportPageBounds().width * 0.8) / bounds.width,
|
||||||
|
(this.editor.getViewportPageBounds().height * 0.8) / bounds.height,
|
||||||
|
1
|
||||||
|
),
|
||||||
|
inset: 50,
|
||||||
|
animation: { duration: 400, easing: (t) => t * (2 - t) },
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return results.map(r => r.shape)
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get shapes that are contextually similar to the selected shapes
|
||||||
|
*/
|
||||||
|
async getSimilarToSelected(topK: number = 5): Promise<SemanticSearchResult[]> {
|
||||||
|
if (!this.editor) return []
|
||||||
|
|
||||||
|
const selected = this.editor.getSelectedShapes()
|
||||||
|
if (selected.length === 0) return []
|
||||||
|
|
||||||
|
// Combine text from all selected shapes
|
||||||
|
const combinedText = selected
|
||||||
|
.map(s => extractShapeText(s))
|
||||||
|
.filter(t => t.length > 0)
|
||||||
|
.join(' ')
|
||||||
|
|
||||||
|
if (combinedText.length === 0) return []
|
||||||
|
|
||||||
|
// Search for similar shapes, excluding the selected ones
|
||||||
|
const results = await semanticSearch.search(combinedText, topK + selected.length, 0.2)
|
||||||
|
|
||||||
|
// Filter out the selected shapes
|
||||||
|
const selectedIds = new Set(selected.map(s => s.id))
|
||||||
|
return results.filter(r => !selectedIds.has(r.shapeId)).slice(0, topK)
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Explain what's in the current viewport
|
||||||
|
*/
|
||||||
|
async explainViewport(
|
||||||
|
onToken?: (partial: string, done?: boolean) => void
|
||||||
|
): Promise<string> {
|
||||||
|
if (!this.editor) {
|
||||||
|
throw new Error('Editor not connected. Call setEditor() first.')
|
||||||
|
}
|
||||||
|
|
||||||
|
const visibleContext = semanticSearch.getVisibleShapesContext()
|
||||||
|
|
||||||
|
if (visibleContext.shapes.length === 0) {
|
||||||
|
const msg = 'The current viewport is empty. Pan or zoom to see shapes.'
|
||||||
|
onToken?.(msg, true)
|
||||||
|
return msg
|
||||||
|
}
|
||||||
|
|
||||||
|
const systemPrompt = `You are an AI assistant describing what's visible in a collaborative canvas viewport.
|
||||||
|
Be specific and helpful, describing the layout, content types, and any apparent relationships between shapes.
|
||||||
|
If there are notes, prompts, or text content, summarize the key points.`
|
||||||
|
|
||||||
|
const userPrompt = `Describe what's currently visible in this canvas viewport:
|
||||||
|
|
||||||
|
## Visible Shapes (${visibleContext.shapes.length})
|
||||||
|
${visibleContext.descriptions.join('\n')}
|
||||||
|
|
||||||
|
Provide a clear description of what the user is looking at, including:
|
||||||
|
1. The types of content visible
|
||||||
|
2. Any apparent groupings or relationships
|
||||||
|
3. Key text content or themes`
|
||||||
|
|
||||||
|
let explanation = ''
|
||||||
|
|
||||||
|
await llm(
|
||||||
|
userPrompt,
|
||||||
|
(partial, done) => {
|
||||||
|
explanation = partial
|
||||||
|
onToken?.(partial, done)
|
||||||
|
},
|
||||||
|
systemPrompt
|
||||||
|
)
|
||||||
|
|
||||||
|
return explanation
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Build context for a query
|
||||||
|
*/
|
||||||
|
private async buildQueryContext(
|
||||||
|
query: string,
|
||||||
|
config: CanvasAIConfig
|
||||||
|
): Promise<string> {
|
||||||
|
const context = await semanticSearch.buildAIContext(query)
|
||||||
|
|
||||||
|
// Truncate if too long
|
||||||
|
if (context.length > (config.maxContextLength || 8000)) {
|
||||||
|
return context.slice(0, config.maxContextLength) + '\n...(context truncated)'
|
||||||
|
}
|
||||||
|
|
||||||
|
return context
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Build system prompt for canvas queries
|
||||||
|
*/
|
||||||
|
private buildSystemPrompt(): string {
|
||||||
|
return `You are an intelligent AI assistant with full awareness of a collaborative canvas workspace.
|
||||||
|
You have access to all shapes, their content, positions, and relationships on the canvas.
|
||||||
|
|
||||||
|
Your capabilities:
|
||||||
|
- Answer questions about what's on the canvas
|
||||||
|
- Summarize content and themes
|
||||||
|
- Find connections between different pieces of content
|
||||||
|
- Help users navigate and understand their workspace
|
||||||
|
- Identify patterns and groupings
|
||||||
|
|
||||||
|
Guidelines:
|
||||||
|
- Be specific and reference actual content from the canvas
|
||||||
|
- If you're not sure about something, say so
|
||||||
|
- When mentioning shapes, indicate their type (e.g., [Prompt], [ObsNote], [Markdown])
|
||||||
|
- Keep responses concise but informative
|
||||||
|
- Focus on being helpful and accurate`
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Build user prompt with context
|
||||||
|
*/
|
||||||
|
private buildUserPrompt(question: string, context: string): string {
|
||||||
|
return `Based on the following canvas context, please answer the user's question.
|
||||||
|
|
||||||
|
${context}
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
User Question: ${question}
|
||||||
|
|
||||||
|
Please provide a helpful, accurate response based on the canvas content above.`
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get indexing status
|
||||||
|
*/
|
||||||
|
getIndexingStatus(): { isIndexing: boolean; progress: number } {
|
||||||
|
return semanticSearch.getIndexingStatus()
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Clear the semantic search index
|
||||||
|
*/
|
||||||
|
async clearIndex(): Promise<void> {
|
||||||
|
await semanticSearch.clearIndex()
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Clean up stale embeddings
|
||||||
|
*/
|
||||||
|
async cleanup(): Promise<number> {
|
||||||
|
return semanticSearch.cleanupStaleEmbeddings()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Singleton instance
|
||||||
|
export const canvasAI = new CanvasAI()
|
||||||
|
|
||||||
|
/**
|
||||||
|
* React hook for canvas AI (convenience export)
|
||||||
|
*/
|
||||||
|
export function useCanvasAI(editor: Editor | null) {
|
||||||
|
if (editor) {
|
||||||
|
canvasAI.setEditor(editor)
|
||||||
|
}
|
||||||
|
return canvasAI
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,496 @@
|
||||||
|
/**
|
||||||
|
* Semantic Search Service
|
||||||
|
* Uses @xenova/transformers for browser-based embeddings
|
||||||
|
* Provides global understanding of canvas shapes for AI queries
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { Editor, TLShape, TLShapeId } from 'tldraw'
|
||||||
|
|
||||||
|
// Lazy load transformers to avoid blocking initial page load
|
||||||
|
let pipeline: any = null
|
||||||
|
let embeddingModel: any = null
|
||||||
|
|
||||||
|
const MODEL_NAME = 'Xenova/all-MiniLM-L6-v2' // Fast, good quality embeddings (384 dimensions)
|
||||||
|
const DB_NAME = 'canvas-semantic-search'
|
||||||
|
const DB_VERSION = 1
|
||||||
|
const STORE_NAME = 'embeddings'
|
||||||
|
|
||||||
|
export interface ShapeEmbedding {
|
||||||
|
shapeId: TLShapeId
|
||||||
|
embedding: number[]
|
||||||
|
text: string
|
||||||
|
shapeType: string
|
||||||
|
timestamp: number
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface SemanticSearchResult {
|
||||||
|
shapeId: TLShapeId
|
||||||
|
shape: TLShape
|
||||||
|
similarity: number
|
||||||
|
matchedText: string
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface CanvasContext {
|
||||||
|
totalShapes: number
|
||||||
|
shapeTypes: Record<string, number>
|
||||||
|
textContent: string[]
|
||||||
|
summary: string
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Initialize the embedding model (lazy loaded)
|
||||||
|
*/
|
||||||
|
async function initializeModel(): Promise<void> {
|
||||||
|
if (embeddingModel) return
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Dynamic import to avoid blocking
|
||||||
|
const { pipeline: pipelineFn } = await import('@xenova/transformers')
|
||||||
|
pipeline = pipelineFn
|
||||||
|
|
||||||
|
console.log('🔄 Loading embedding model...')
|
||||||
|
embeddingModel = await pipeline('feature-extraction', MODEL_NAME, {
|
||||||
|
quantized: true, // Use quantized model for faster inference
|
||||||
|
})
|
||||||
|
console.log('✅ Embedding model loaded')
|
||||||
|
} catch (error) {
|
||||||
|
console.error('❌ Failed to load embedding model:', error)
|
||||||
|
throw error
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract text content from a shape based on its type
|
||||||
|
*/
|
||||||
|
export function extractShapeText(shape: TLShape): string {
|
||||||
|
const props = shape.props as any
|
||||||
|
const meta = shape.meta as any
|
||||||
|
|
||||||
|
const textParts: string[] = []
|
||||||
|
|
||||||
|
// Add shape type for context
|
||||||
|
textParts.push(`[${shape.type}]`)
|
||||||
|
|
||||||
|
// Extract text from various properties
|
||||||
|
if (props.text) textParts.push(props.text)
|
||||||
|
if (props.content) textParts.push(props.content)
|
||||||
|
if (props.prompt) textParts.push(props.prompt)
|
||||||
|
if (props.value && typeof props.value === 'string') textParts.push(props.value)
|
||||||
|
if (props.name) textParts.push(props.name)
|
||||||
|
if (props.description) textParts.push(props.description)
|
||||||
|
if (props.url) textParts.push(`URL: ${props.url}`)
|
||||||
|
if (props.editingContent) textParts.push(props.editingContent)
|
||||||
|
if (props.originalContent) textParts.push(props.originalContent)
|
||||||
|
|
||||||
|
// Check meta for text (geo shapes)
|
||||||
|
if (meta?.text) textParts.push(meta.text)
|
||||||
|
|
||||||
|
// For tldraw built-in shapes
|
||||||
|
if (shape.type === 'text' && props.text) {
|
||||||
|
textParts.push(props.text)
|
||||||
|
}
|
||||||
|
if (shape.type === 'note' && props.text) {
|
||||||
|
textParts.push(props.text)
|
||||||
|
}
|
||||||
|
|
||||||
|
return textParts.filter(Boolean).join(' ').trim()
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generate embedding for text
|
||||||
|
*/
|
||||||
|
export async function generateEmbedding(text: string): Promise<number[]> {
|
||||||
|
await initializeModel()
|
||||||
|
|
||||||
|
if (!text || text.trim().length === 0) {
|
||||||
|
return []
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const output = await embeddingModel(text, {
|
||||||
|
pooling: 'mean',
|
||||||
|
normalize: true,
|
||||||
|
})
|
||||||
|
|
||||||
|
// Convert to regular array
|
||||||
|
return Array.from(output.data)
|
||||||
|
} catch (error) {
|
||||||
|
console.error('❌ Failed to generate embedding:', error)
|
||||||
|
return []
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Calculate cosine similarity between two embeddings
|
||||||
|
*/
|
||||||
|
export function cosineSimilarity(a: number[], b: number[]): number {
|
||||||
|
if (a.length !== b.length || a.length === 0) return 0
|
||||||
|
|
||||||
|
let dotProduct = 0
|
||||||
|
let normA = 0
|
||||||
|
let normB = 0
|
||||||
|
|
||||||
|
for (let i = 0; i < a.length; i++) {
|
||||||
|
dotProduct += a[i] * b[i]
|
||||||
|
normA += a[i] * a[i]
|
||||||
|
normB += b[i] * b[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
const magnitude = Math.sqrt(normA) * Math.sqrt(normB)
|
||||||
|
return magnitude === 0 ? 0 : dotProduct / magnitude
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* IndexedDB operations for embedding storage
|
||||||
|
*/
|
||||||
|
class EmbeddingStore {
|
||||||
|
private db: IDBDatabase | null = null
|
||||||
|
|
||||||
|
async open(): Promise<IDBDatabase> {
|
||||||
|
if (this.db) return this.db
|
||||||
|
|
||||||
|
return new Promise((resolve, reject) => {
|
||||||
|
const request = indexedDB.open(DB_NAME, DB_VERSION)
|
||||||
|
|
||||||
|
request.onerror = () => reject(request.error)
|
||||||
|
|
||||||
|
request.onsuccess = () => {
|
||||||
|
this.db = request.result
|
||||||
|
resolve(this.db)
|
||||||
|
}
|
||||||
|
|
||||||
|
request.onupgradeneeded = (event) => {
|
||||||
|
const db = (event.target as IDBOpenDBRequest).result
|
||||||
|
|
||||||
|
if (!db.objectStoreNames.contains(STORE_NAME)) {
|
||||||
|
const store = db.createObjectStore(STORE_NAME, { keyPath: 'shapeId' })
|
||||||
|
store.createIndex('timestamp', 'timestamp', { unique: false })
|
||||||
|
store.createIndex('shapeType', 'shapeType', { unique: false })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
async save(embedding: ShapeEmbedding): Promise<void> {
|
||||||
|
const db = await this.open()
|
||||||
|
|
||||||
|
return new Promise((resolve, reject) => {
|
||||||
|
const tx = db.transaction(STORE_NAME, 'readwrite')
|
||||||
|
const store = tx.objectStore(STORE_NAME)
|
||||||
|
const request = store.put(embedding)
|
||||||
|
|
||||||
|
request.onerror = () => reject(request.error)
|
||||||
|
request.onsuccess = () => resolve()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
async get(shapeId: TLShapeId): Promise<ShapeEmbedding | undefined> {
|
||||||
|
const db = await this.open()
|
||||||
|
|
||||||
|
return new Promise((resolve, reject) => {
|
||||||
|
const tx = db.transaction(STORE_NAME, 'readonly')
|
||||||
|
const store = tx.objectStore(STORE_NAME)
|
||||||
|
const request = store.get(shapeId)
|
||||||
|
|
||||||
|
request.onerror = () => reject(request.error)
|
||||||
|
request.onsuccess = () => resolve(request.result)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
async getAll(): Promise<ShapeEmbedding[]> {
|
||||||
|
const db = await this.open()
|
||||||
|
|
||||||
|
return new Promise((resolve, reject) => {
|
||||||
|
const tx = db.transaction(STORE_NAME, 'readonly')
|
||||||
|
const store = tx.objectStore(STORE_NAME)
|
||||||
|
const request = store.getAll()
|
||||||
|
|
||||||
|
request.onerror = () => reject(request.error)
|
||||||
|
request.onsuccess = () => resolve(request.result || [])
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
async delete(shapeId: TLShapeId): Promise<void> {
|
||||||
|
const db = await this.open()
|
||||||
|
|
||||||
|
return new Promise((resolve, reject) => {
|
||||||
|
const tx = db.transaction(STORE_NAME, 'readwrite')
|
||||||
|
const store = tx.objectStore(STORE_NAME)
|
||||||
|
const request = store.delete(shapeId)
|
||||||
|
|
||||||
|
request.onerror = () => reject(request.error)
|
||||||
|
request.onsuccess = () => resolve()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
async clear(): Promise<void> {
|
||||||
|
const db = await this.open()
|
||||||
|
|
||||||
|
return new Promise((resolve, reject) => {
|
||||||
|
const tx = db.transaction(STORE_NAME, 'readwrite')
|
||||||
|
const store = tx.objectStore(STORE_NAME)
|
||||||
|
const request = store.clear()
|
||||||
|
|
||||||
|
request.onerror = () => reject(request.error)
|
||||||
|
request.onsuccess = () => resolve()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const embeddingStore = new EmbeddingStore()
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Main Semantic Search Service
|
||||||
|
*/
|
||||||
|
export class SemanticSearchService {
|
||||||
|
private editor: Editor | null = null
|
||||||
|
private isIndexing = false
|
||||||
|
private indexingProgress = 0
|
||||||
|
|
||||||
|
setEditor(editor: Editor): void {
|
||||||
|
this.editor = editor
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Index all shapes on the current canvas page
|
||||||
|
*/
|
||||||
|
async indexCanvas(onProgress?: (progress: number) => void): Promise<void> {
|
||||||
|
if (!this.editor || this.isIndexing) return
|
||||||
|
|
||||||
|
this.isIndexing = true
|
||||||
|
this.indexingProgress = 0
|
||||||
|
|
||||||
|
try {
|
||||||
|
const shapes = this.editor.getCurrentPageShapes()
|
||||||
|
const shapesWithText = shapes.filter(s => extractShapeText(s).length > 10) // Only shapes with meaningful text
|
||||||
|
|
||||||
|
console.log(`🔍 Indexing ${shapesWithText.length} shapes with text content...`)
|
||||||
|
|
||||||
|
for (let i = 0; i < shapesWithText.length; i++) {
|
||||||
|
const shape = shapesWithText[i]
|
||||||
|
const text = extractShapeText(shape)
|
||||||
|
|
||||||
|
// Check if already indexed and text hasn't changed
|
||||||
|
const existing = await embeddingStore.get(shape.id)
|
||||||
|
if (existing && existing.text === text) {
|
||||||
|
continue // Skip re-indexing
|
||||||
|
}
|
||||||
|
|
||||||
|
const embedding = await generateEmbedding(text)
|
||||||
|
|
||||||
|
if (embedding.length > 0) {
|
||||||
|
await embeddingStore.save({
|
||||||
|
shapeId: shape.id,
|
||||||
|
embedding,
|
||||||
|
text,
|
||||||
|
shapeType: shape.type,
|
||||||
|
timestamp: Date.now(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
this.indexingProgress = ((i + 1) / shapesWithText.length) * 100
|
||||||
|
onProgress?.(this.indexingProgress)
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log('✅ Canvas indexing complete')
|
||||||
|
} finally {
|
||||||
|
this.isIndexing = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Semantic search for shapes matching a query
|
||||||
|
*/
|
||||||
|
async search(query: string, topK: number = 10, threshold: number = 0.3): Promise<SemanticSearchResult[]> {
|
||||||
|
if (!this.editor) return []
|
||||||
|
|
||||||
|
const queryEmbedding = await generateEmbedding(query)
|
||||||
|
if (queryEmbedding.length === 0) return []
|
||||||
|
|
||||||
|
const allEmbeddings = await embeddingStore.getAll()
|
||||||
|
const currentShapes = new Map(
|
||||||
|
this.editor.getCurrentPageShapes().map(s => [s.id, s])
|
||||||
|
)
|
||||||
|
|
||||||
|
// Calculate similarities
|
||||||
|
const results: SemanticSearchResult[] = []
|
||||||
|
|
||||||
|
for (const stored of allEmbeddings) {
|
||||||
|
const shape = currentShapes.get(stored.shapeId)
|
||||||
|
if (!shape) continue // Shape no longer exists
|
||||||
|
|
||||||
|
const similarity = cosineSimilarity(queryEmbedding, stored.embedding)
|
||||||
|
|
||||||
|
if (similarity >= threshold) {
|
||||||
|
results.push({
|
||||||
|
shapeId: stored.shapeId,
|
||||||
|
shape,
|
||||||
|
similarity,
|
||||||
|
matchedText: stored.text,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort by similarity (descending) and return top K
|
||||||
|
return results
|
||||||
|
.sort((a, b) => b.similarity - a.similarity)
|
||||||
|
.slice(0, topK)
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get aggregated context of all canvas content for AI queries
|
||||||
|
*/
|
||||||
|
async getCanvasContext(): Promise<CanvasContext> {
|
||||||
|
if (!this.editor) {
|
||||||
|
return {
|
||||||
|
totalShapes: 0,
|
||||||
|
shapeTypes: {},
|
||||||
|
textContent: [],
|
||||||
|
summary: 'No editor connected',
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const shapes = this.editor.getCurrentPageShapes()
|
||||||
|
const shapeTypes: Record<string, number> = {}
|
||||||
|
const textContent: string[] = []
|
||||||
|
|
||||||
|
for (const shape of shapes) {
|
||||||
|
// Count shape types
|
||||||
|
shapeTypes[shape.type] = (shapeTypes[shape.type] || 0) + 1
|
||||||
|
|
||||||
|
// Extract text content
|
||||||
|
const text = extractShapeText(shape)
|
||||||
|
if (text.length > 10) {
|
||||||
|
textContent.push(text)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build summary
|
||||||
|
const typesSummary = Object.entries(shapeTypes)
|
||||||
|
.map(([type, count]) => `${count} ${type}${count > 1 ? 's' : ''}`)
|
||||||
|
.join(', ')
|
||||||
|
|
||||||
|
const summary = `Canvas contains ${shapes.length} shapes: ${typesSummary}. ${textContent.length} shapes have text content.`
|
||||||
|
|
||||||
|
return {
|
||||||
|
totalShapes: shapes.length,
|
||||||
|
shapeTypes,
|
||||||
|
textContent,
|
||||||
|
summary,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get shapes visible in the current viewport
|
||||||
|
*/
|
||||||
|
getVisibleShapesContext(): { shapes: TLShape[]; descriptions: string[] } {
|
||||||
|
if (!this.editor) return { shapes: [], descriptions: [] }
|
||||||
|
|
||||||
|
const viewportBounds = this.editor.getViewportPageBounds()
|
||||||
|
const allShapes = this.editor.getCurrentPageShapes()
|
||||||
|
|
||||||
|
const visibleShapes = allShapes.filter(shape => {
|
||||||
|
const bounds = this.editor!.getShapePageBounds(shape.id)
|
||||||
|
if (!bounds) return false
|
||||||
|
|
||||||
|
// Check if shape intersects viewport
|
||||||
|
return !(
|
||||||
|
bounds.maxX < viewportBounds.minX ||
|
||||||
|
bounds.minX > viewportBounds.maxX ||
|
||||||
|
bounds.maxY < viewportBounds.minY ||
|
||||||
|
bounds.minY > viewportBounds.maxY
|
||||||
|
)
|
||||||
|
})
|
||||||
|
|
||||||
|
const descriptions = visibleShapes.map(shape => {
|
||||||
|
const text = extractShapeText(shape)
|
||||||
|
const bounds = this.editor!.getShapePageBounds(shape.id)
|
||||||
|
const position = bounds ? `at (${Math.round(bounds.x)}, ${Math.round(bounds.y)})` : ''
|
||||||
|
return `[${shape.type}] ${position}: ${text.slice(0, 200)}${text.length > 200 ? '...' : ''}`
|
||||||
|
})
|
||||||
|
|
||||||
|
return { shapes: visibleShapes, descriptions }
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Build a comprehensive context string for AI queries about the canvas
|
||||||
|
*/
|
||||||
|
async buildAIContext(query?: string): Promise<string> {
|
||||||
|
const canvasContext = await this.getCanvasContext()
|
||||||
|
const visibleContext = this.getVisibleShapesContext()
|
||||||
|
|
||||||
|
let context = `# Canvas Overview\n${canvasContext.summary}\n\n`
|
||||||
|
|
||||||
|
context += `## Currently Visible (${visibleContext.shapes.length} shapes):\n`
|
||||||
|
visibleContext.descriptions.forEach((desc, i) => {
|
||||||
|
context += `${i + 1}. ${desc}\n`
|
||||||
|
})
|
||||||
|
|
||||||
|
// If there's a query, add semantic search results
|
||||||
|
if (query) {
|
||||||
|
const searchResults = await this.search(query, 5, 0.2)
|
||||||
|
if (searchResults.length > 0) {
|
||||||
|
context += `\n## Most Relevant to Query "${query}":\n`
|
||||||
|
searchResults.forEach((result, i) => {
|
||||||
|
context += `${i + 1}. [${result.shape.type}] (${Math.round(result.similarity * 100)}% match): ${result.matchedText.slice(0, 300)}\n`
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add all text content (truncated)
|
||||||
|
const allText = canvasContext.textContent.join('\n---\n')
|
||||||
|
if (allText.length > 0) {
|
||||||
|
context += `\n## All Text Content:\n${allText.slice(0, 5000)}${allText.length > 5000 ? '\n...(truncated)' : ''}`
|
||||||
|
}
|
||||||
|
|
||||||
|
return context
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Clean up embeddings for shapes that no longer exist
|
||||||
|
*/
|
||||||
|
async cleanupStaleEmbeddings(): Promise<number> {
|
||||||
|
if (!this.editor) return 0
|
||||||
|
|
||||||
|
const currentShapeIds = new Set(
|
||||||
|
this.editor.getCurrentPageShapes().map(s => s.id)
|
||||||
|
)
|
||||||
|
|
||||||
|
const allEmbeddings = await embeddingStore.getAll()
|
||||||
|
let removed = 0
|
||||||
|
|
||||||
|
for (const embedding of allEmbeddings) {
|
||||||
|
if (!currentShapeIds.has(embedding.shapeId)) {
|
||||||
|
await embeddingStore.delete(embedding.shapeId)
|
||||||
|
removed++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (removed > 0) {
|
||||||
|
console.log(`🧹 Cleaned up ${removed} stale embeddings`)
|
||||||
|
}
|
||||||
|
|
||||||
|
return removed
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Clear all stored embeddings
|
||||||
|
*/
|
||||||
|
async clearIndex(): Promise<void> {
|
||||||
|
await embeddingStore.clear()
|
||||||
|
console.log('🗑️ Embedding index cleared')
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get indexing status
|
||||||
|
*/
|
||||||
|
getIndexingStatus(): { isIndexing: boolean; progress: number } {
|
||||||
|
return {
|
||||||
|
isIndexing: this.isIndexing,
|
||||||
|
progress: this.indexingProgress,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Singleton instance
|
||||||
|
export const semanticSearch = new SemanticSearchService()
|
||||||
|
|
@ -25,6 +25,34 @@ export const PROVIDERS = [
|
||||||
// { id: 'google', name: 'Google', model: 'Gemeni 1.5 Flash', validate: (key: string) => true },
|
// { id: 'google', name: 'Google', model: 'Gemeni 1.5 Flash', validate: (key: string) => true },
|
||||||
]
|
]
|
||||||
|
|
||||||
|
// Ollama models available on the private AI server (no API key required)
|
||||||
|
export const OLLAMA_MODELS = [
|
||||||
|
{
|
||||||
|
id: 'llama3.1:70b',
|
||||||
|
name: 'Llama 3.1 70B',
|
||||||
|
description: 'Best quality (GPT-4 level) - ~7s response',
|
||||||
|
size: '42 GB',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'llama3.1:8b',
|
||||||
|
name: 'Llama 3.1 8B',
|
||||||
|
description: 'Fast & capable - ~1-2s response',
|
||||||
|
size: '4.9 GB',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'qwen2.5-coder:7b',
|
||||||
|
name: 'Qwen 2.5 Coder 7B',
|
||||||
|
description: 'Optimized for code generation',
|
||||||
|
size: '4.7 GB',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'llama3.2:3b',
|
||||||
|
name: 'Llama 3.2 3B',
|
||||||
|
description: 'Fastest responses - <1s',
|
||||||
|
size: '2.0 GB',
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
export const AI_PERSONALITIES = [
|
export const AI_PERSONALITIES = [
|
||||||
{
|
{
|
||||||
id: 'web-developer',
|
id: 'web-developer',
|
||||||
|
|
@ -48,6 +76,7 @@ export const makeRealSettings = atom('make real settings', {
|
||||||
anthropic: '',
|
anthropic: '',
|
||||||
google: '',
|
google: '',
|
||||||
},
|
},
|
||||||
|
ollamaModel: 'llama3.1:8b' as (typeof OLLAMA_MODELS)[number]['id'],
|
||||||
personality: 'web-developer' as (typeof AI_PERSONALITIES)[number]['id'],
|
personality: 'web-developer' as (typeof AI_PERSONALITIES)[number]['id'],
|
||||||
prompts: {
|
prompts: {
|
||||||
system: SYSTEM_PROMPT,
|
system: SYSTEM_PROMPT,
|
||||||
|
|
@ -66,6 +95,7 @@ export function applySettingsMigrations(settings: any) {
|
||||||
google: '',
|
google: '',
|
||||||
...keys,
|
...keys,
|
||||||
},
|
},
|
||||||
|
ollamaModel: 'llama3.1:8b' as (typeof OLLAMA_MODELS)[number]['id'],
|
||||||
personality: 'web-developer' as (typeof AI_PERSONALITIES)[number]['id'],
|
personality: 'web-developer' as (typeof AI_PERSONALITIES)[number]['id'],
|
||||||
prompts: {
|
prompts: {
|
||||||
system: SYSTEM_PROMPT,
|
system: SYSTEM_PROMPT,
|
||||||
|
|
|
||||||
|
|
@ -34,14 +34,12 @@ import { ObsNoteTool } from "@/tools/ObsNoteTool"
|
||||||
import { ObsNoteShape } from "@/shapes/ObsNoteShapeUtil"
|
import { ObsNoteShape } from "@/shapes/ObsNoteShapeUtil"
|
||||||
import { TranscriptionTool } from "@/tools/TranscriptionTool"
|
import { TranscriptionTool } from "@/tools/TranscriptionTool"
|
||||||
import { TranscriptionShape } from "@/shapes/TranscriptionShapeUtil"
|
import { TranscriptionShape } from "@/shapes/TranscriptionShapeUtil"
|
||||||
import { FathomNoteShape } from "@/shapes/FathomNoteShapeUtil"
|
|
||||||
import { HolonTool } from "@/tools/HolonTool"
|
import { HolonTool } from "@/tools/HolonTool"
|
||||||
import { HolonShape } from "@/shapes/HolonShapeUtil"
|
import { HolonShape } from "@/shapes/HolonShapeUtil"
|
||||||
import { FathomMeetingsTool } from "@/tools/FathomMeetingsTool"
|
import { FathomMeetingsTool } from "@/tools/FathomMeetingsTool"
|
||||||
import { HolonBrowserShape } from "@/shapes/HolonBrowserShapeUtil"
|
import { HolonBrowserShape } from "@/shapes/HolonBrowserShapeUtil"
|
||||||
import { ObsidianBrowserShape } from "@/shapes/ObsidianBrowserShapeUtil"
|
import { ObsidianBrowserShape } from "@/shapes/ObsidianBrowserShapeUtil"
|
||||||
import { FathomMeetingsBrowserShape } from "@/shapes/FathomMeetingsBrowserShapeUtil"
|
import { FathomMeetingsBrowserShape } from "@/shapes/FathomMeetingsBrowserShapeUtil"
|
||||||
import { LocationShareShape } from "@/shapes/LocationShareShapeUtil"
|
|
||||||
import { ImageGenShape } from "@/shapes/ImageGenShapeUtil"
|
import { ImageGenShape } from "@/shapes/ImageGenShapeUtil"
|
||||||
import { ImageGenTool } from "@/tools/ImageGenTool"
|
import { ImageGenTool } from "@/tools/ImageGenTool"
|
||||||
import { VideoGenShape } from "@/shapes/VideoGenShapeUtil"
|
import { VideoGenShape } from "@/shapes/VideoGenShapeUtil"
|
||||||
|
|
@ -82,12 +80,10 @@ const customShapeUtils = [
|
||||||
PromptShape,
|
PromptShape,
|
||||||
ObsNoteShape,
|
ObsNoteShape,
|
||||||
TranscriptionShape,
|
TranscriptionShape,
|
||||||
FathomNoteShape,
|
|
||||||
HolonShape,
|
HolonShape,
|
||||||
HolonBrowserShape,
|
HolonBrowserShape,
|
||||||
ObsidianBrowserShape,
|
ObsidianBrowserShape,
|
||||||
FathomMeetingsBrowserShape,
|
FathomMeetingsBrowserShape,
|
||||||
LocationShareShape,
|
|
||||||
ImageGenShape,
|
ImageGenShape,
|
||||||
VideoGenShape,
|
VideoGenShape,
|
||||||
MultmuxShape,
|
MultmuxShape,
|
||||||
|
|
@ -110,6 +106,10 @@ const customTools = [
|
||||||
MultmuxTool,
|
MultmuxTool,
|
||||||
]
|
]
|
||||||
|
|
||||||
|
// Debug: Log tool and shape registration info
|
||||||
|
console.log('🔧 Board: Custom tools registered:', customTools.map(t => ({ id: t.id, shapeType: t.prototype?.shapeType })))
|
||||||
|
console.log('🔧 Board: Custom shapes registered:', customShapeUtils.map(s => ({ type: s.type })))
|
||||||
|
|
||||||
export function Board() {
|
export function Board() {
|
||||||
const { slug } = useParams<{ slug: string }>()
|
const { slug } = useParams<{ slug: string }>()
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
import React, { useState, useEffect, useRef } from 'react'
|
import React, { useState, useEffect, useRef } from 'react'
|
||||||
import { BaseBoxShapeUtil, TLBaseShape, HTMLContainer } from '@tldraw/tldraw'
|
import { BaseBoxShapeUtil, TLBaseShape, HTMLContainer, Geometry2d, Rectangle2d } from 'tldraw'
|
||||||
import { StandardizedToolWrapper } from '../components/StandardizedToolWrapper'
|
import { StandardizedToolWrapper } from '../components/StandardizedToolWrapper'
|
||||||
import { usePinnedToView } from '../hooks/usePinnedToView'
|
import { usePinnedToView } from '../hooks/usePinnedToView'
|
||||||
|
|
||||||
|
|
@ -25,7 +25,7 @@ interface SessionResponse {
|
||||||
}
|
}
|
||||||
|
|
||||||
export class MultmuxShape extends BaseBoxShapeUtil<IMultmuxShape> {
|
export class MultmuxShape extends BaseBoxShapeUtil<IMultmuxShape> {
|
||||||
static type = 'Multmux' as const
|
static override type = 'Multmux' as const
|
||||||
|
|
||||||
// Terminal theme color: Dark purple/violet
|
// Terminal theme color: Dark purple/violet
|
||||||
static readonly PRIMARY_COLOR = "#8b5cf6"
|
static readonly PRIMARY_COLOR = "#8b5cf6"
|
||||||
|
|
@ -44,6 +44,14 @@ export class MultmuxShape extends BaseBoxShapeUtil<IMultmuxShape> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
getGeometry(shape: IMultmuxShape): Geometry2d {
|
||||||
|
return new Rectangle2d({
|
||||||
|
width: shape.props.w,
|
||||||
|
height: shape.props.h,
|
||||||
|
isFilled: true,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
component(shape: IMultmuxShape) {
|
component(shape: IMultmuxShape) {
|
||||||
const isSelected = this.editor.getSelectedShapeIds().includes(shape.id)
|
const isSelected = this.editor.getSelectedShapeIds().includes(shape.id)
|
||||||
const [isMinimized, setIsMinimized] = useState(false)
|
const [isMinimized, setIsMinimized] = useState(false)
|
||||||
|
|
@ -264,6 +272,8 @@ export class MultmuxShape extends BaseBoxShapeUtil<IMultmuxShape> {
|
||||||
fontFamily: 'monospace',
|
fontFamily: 'monospace',
|
||||||
}}
|
}}
|
||||||
placeholder="Canvas Terminal"
|
placeholder="Canvas Terminal"
|
||||||
|
onPointerDown={(e) => e.stopPropagation()}
|
||||||
|
onMouseDown={(e) => e.stopPropagation()}
|
||||||
/>
|
/>
|
||||||
</label>
|
</label>
|
||||||
|
|
||||||
|
|
@ -293,6 +303,8 @@ export class MultmuxShape extends BaseBoxShapeUtil<IMultmuxShape> {
|
||||||
fontFamily: 'monospace',
|
fontFamily: 'monospace',
|
||||||
}}
|
}}
|
||||||
placeholder="http://localhost:3000"
|
placeholder="http://localhost:3000"
|
||||||
|
onPointerDown={(e) => e.stopPropagation()}
|
||||||
|
onMouseDown={(e) => e.stopPropagation()}
|
||||||
/>
|
/>
|
||||||
</label>
|
</label>
|
||||||
|
|
||||||
|
|
@ -322,6 +334,8 @@ export class MultmuxShape extends BaseBoxShapeUtil<IMultmuxShape> {
|
||||||
fontFamily: 'monospace',
|
fontFamily: 'monospace',
|
||||||
}}
|
}}
|
||||||
placeholder="ws://localhost:3001"
|
placeholder="ws://localhost:3001"
|
||||||
|
onPointerDown={(e) => e.stopPropagation()}
|
||||||
|
onMouseDown={(e) => e.stopPropagation()}
|
||||||
/>
|
/>
|
||||||
</label>
|
</label>
|
||||||
|
|
||||||
|
|
@ -338,6 +352,7 @@ export class MultmuxShape extends BaseBoxShapeUtil<IMultmuxShape> {
|
||||||
fontFamily: 'monospace',
|
fontFamily: 'monospace',
|
||||||
}}
|
}}
|
||||||
onPointerDown={(e) => e.stopPropagation()}
|
onPointerDown={(e) => e.stopPropagation()}
|
||||||
|
onMouseDown={(e) => e.stopPropagation()}
|
||||||
>
|
>
|
||||||
Create New Session
|
Create New Session
|
||||||
</button>
|
</button>
|
||||||
|
|
@ -368,6 +383,8 @@ export class MultmuxShape extends BaseBoxShapeUtil<IMultmuxShape> {
|
||||||
color: '#cdd6f4',
|
color: '#cdd6f4',
|
||||||
fontFamily: 'monospace',
|
fontFamily: 'monospace',
|
||||||
}}
|
}}
|
||||||
|
onPointerDown={(e) => e.stopPropagation()}
|
||||||
|
onMouseDown={(e) => e.stopPropagation()}
|
||||||
/>
|
/>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
|
||||||
|
|
@ -103,6 +103,10 @@ export class VideoGenShape extends BaseBoxShapeUtil<IVideoGen> {
|
||||||
console.log('🎬 VideoGen: Submitting to RunPod endpoint:', endpointId)
|
console.log('🎬 VideoGen: Submitting to RunPod endpoint:', endpointId)
|
||||||
const runUrl = `https://api.runpod.ai/v2/${endpointId}/run`
|
const runUrl = `https://api.runpod.ai/v2/${endpointId}/run`
|
||||||
|
|
||||||
|
// Generate a random seed for reproducibility
|
||||||
|
const seed = Math.floor(Math.random() * 2147483647)
|
||||||
|
|
||||||
|
// ComfyUI workflow parameters required by the Wan2.1 handler
|
||||||
const response = await fetch(runUrl, {
|
const response = await fetch(runUrl, {
|
||||||
method: 'POST',
|
method: 'POST',
|
||||||
headers: {
|
headers: {
|
||||||
|
|
@ -113,7 +117,16 @@ export class VideoGenShape extends BaseBoxShapeUtil<IVideoGen> {
|
||||||
input: {
|
input: {
|
||||||
prompt: prompt,
|
prompt: prompt,
|
||||||
duration: shape.props.duration,
|
duration: shape.props.duration,
|
||||||
model: shape.props.model
|
model: shape.props.model,
|
||||||
|
seed: seed,
|
||||||
|
cfg: 6.0, // CFG scale - guidance strength
|
||||||
|
steps: 30, // Inference steps
|
||||||
|
width: 832, // Video width (Wan2.1 optimal)
|
||||||
|
height: 480, // Video height (Wan2.1 optimal)
|
||||||
|
fps: 16, // Frames per second
|
||||||
|
num_frames: shape.props.duration * 16, // Total frames based on duration
|
||||||
|
denoise: 1.0, // Full denoising for text-to-video
|
||||||
|
scheduler: "euler", // Sampler scheduler
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
@ -273,6 +286,7 @@ export class VideoGenShape extends BaseBoxShapeUtil<IVideoGen> {
|
||||||
placeholder="Describe the video you want to generate..."
|
placeholder="Describe the video you want to generate..."
|
||||||
disabled={isGenerating}
|
disabled={isGenerating}
|
||||||
onPointerDown={(e) => e.stopPropagation()}
|
onPointerDown={(e) => e.stopPropagation()}
|
||||||
|
onMouseDown={(e) => e.stopPropagation()}
|
||||||
style={{
|
style={{
|
||||||
width: '100%',
|
width: '100%',
|
||||||
minHeight: '80px',
|
minHeight: '80px',
|
||||||
|
|
@ -308,6 +322,7 @@ export class VideoGenShape extends BaseBoxShapeUtil<IVideoGen> {
|
||||||
}}
|
}}
|
||||||
disabled={isGenerating}
|
disabled={isGenerating}
|
||||||
onPointerDown={(e) => e.stopPropagation()}
|
onPointerDown={(e) => e.stopPropagation()}
|
||||||
|
onMouseDown={(e) => e.stopPropagation()}
|
||||||
style={{
|
style={{
|
||||||
width: '100%',
|
width: '100%',
|
||||||
padding: '8px',
|
padding: '8px',
|
||||||
|
|
@ -325,6 +340,7 @@ export class VideoGenShape extends BaseBoxShapeUtil<IVideoGen> {
|
||||||
onClick={handleGenerate}
|
onClick={handleGenerate}
|
||||||
disabled={isGenerating || !prompt.trim()}
|
disabled={isGenerating || !prompt.trim()}
|
||||||
onPointerDown={(e) => e.stopPropagation()}
|
onPointerDown={(e) => e.stopPropagation()}
|
||||||
|
onMouseDown={(e) => e.stopPropagation()}
|
||||||
style={{
|
style={{
|
||||||
padding: '8px 20px',
|
padding: '8px 20px',
|
||||||
backgroundColor: isGenerating ? '#ccc' : VideoGenShape.PRIMARY_COLOR,
|
backgroundColor: isGenerating ? '#ccc' : VideoGenShape.PRIMARY_COLOR,
|
||||||
|
|
@ -411,6 +427,7 @@ export class VideoGenShape extends BaseBoxShapeUtil<IVideoGen> {
|
||||||
})
|
})
|
||||||
}}
|
}}
|
||||||
onPointerDown={(e) => e.stopPropagation()}
|
onPointerDown={(e) => e.stopPropagation()}
|
||||||
|
onMouseDown={(e) => e.stopPropagation()}
|
||||||
style={{
|
style={{
|
||||||
flex: 1,
|
flex: 1,
|
||||||
padding: '10px',
|
padding: '10px',
|
||||||
|
|
@ -430,6 +447,7 @@ export class VideoGenShape extends BaseBoxShapeUtil<IVideoGen> {
|
||||||
href={videoUrl}
|
href={videoUrl}
|
||||||
download="generated-video.mp4"
|
download="generated-video.mp4"
|
||||||
onPointerDown={(e) => e.stopPropagation()}
|
onPointerDown={(e) => e.stopPropagation()}
|
||||||
|
onMouseDown={(e) => e.stopPropagation()}
|
||||||
style={{
|
style={{
|
||||||
flex: 1,
|
flex: 1,
|
||||||
padding: '10px',
|
padding: '10px',
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,7 @@ import { HolonShape } from "@/shapes/HolonShapeUtil"
|
||||||
import { holosphereService } from "@/lib/HoloSphereService"
|
import { holosphereService } from "@/lib/HoloSphereService"
|
||||||
|
|
||||||
export class HolonTool extends StateNode {
|
export class HolonTool extends StateNode {
|
||||||
static override id = "holon"
|
static override id = "Holon"
|
||||||
static override initial = "idle"
|
static override initial = "idle"
|
||||||
static override children = () => [HolonIdle]
|
static override children = () => [HolonIdle]
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,11 +1,129 @@
|
||||||
import { BaseBoxShapeTool, TLEventHandlers } from "tldraw"
|
import { StateNode } from 'tldraw'
|
||||||
|
import { findNonOverlappingPosition } from '@/utils/shapeCollisionUtils'
|
||||||
|
|
||||||
export class MultmuxTool extends BaseBoxShapeTool {
|
export class MultmuxTool extends StateNode {
|
||||||
static override id = "Multmux"
|
static override id = 'Multmux'
|
||||||
shapeType = "Multmux"
|
static override initial = 'idle'
|
||||||
override initial = "idle"
|
static override children = () => [MultmuxIdle]
|
||||||
|
|
||||||
override onComplete: TLEventHandlers["onComplete"] = () => {
|
onSelect() {
|
||||||
this.editor.setCurrentTool('select')
|
console.log('🖥️ MultmuxTool: tool selected - waiting for user click')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export class MultmuxIdle extends StateNode {
|
||||||
|
static override id = 'idle'
|
||||||
|
|
||||||
|
tooltipElement?: HTMLDivElement
|
||||||
|
mouseMoveHandler?: (e: MouseEvent) => void
|
||||||
|
|
||||||
|
override onEnter = () => {
|
||||||
|
this.editor.setCursor({ type: 'cross', rotation: 0 })
|
||||||
|
|
||||||
|
this.tooltipElement = document.createElement('div')
|
||||||
|
this.tooltipElement.style.cssText = `
|
||||||
|
position: fixed;
|
||||||
|
background: rgba(0, 0, 0, 0.85);
|
||||||
|
color: white;
|
||||||
|
padding: 8px 12px;
|
||||||
|
border-radius: 6px;
|
||||||
|
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
|
||||||
|
font-size: 13px;
|
||||||
|
white-space: nowrap;
|
||||||
|
z-index: 10000;
|
||||||
|
pointer-events: none;
|
||||||
|
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.3);
|
||||||
|
border: 1px solid rgba(255, 255, 255, 0.1);
|
||||||
|
`
|
||||||
|
this.tooltipElement.textContent = 'Click anywhere to place Terminal'
|
||||||
|
|
||||||
|
document.body.appendChild(this.tooltipElement)
|
||||||
|
|
||||||
|
this.mouseMoveHandler = (e: MouseEvent) => {
|
||||||
|
if (this.tooltipElement) {
|
||||||
|
const x = e.clientX + 15
|
||||||
|
const y = e.clientY - 35
|
||||||
|
|
||||||
|
const rect = this.tooltipElement.getBoundingClientRect()
|
||||||
|
const viewportWidth = window.innerWidth
|
||||||
|
const viewportHeight = window.innerHeight
|
||||||
|
|
||||||
|
let finalX = x
|
||||||
|
let finalY = y
|
||||||
|
|
||||||
|
if (x + rect.width > viewportWidth) {
|
||||||
|
finalX = e.clientX - rect.width - 15
|
||||||
|
}
|
||||||
|
|
||||||
|
if (y + rect.height > viewportHeight) {
|
||||||
|
finalY = e.clientY - rect.height - 15
|
||||||
|
}
|
||||||
|
|
||||||
|
finalX = Math.max(10, finalX)
|
||||||
|
finalY = Math.max(10, finalY)
|
||||||
|
|
||||||
|
this.tooltipElement.style.left = `${finalX}px`
|
||||||
|
this.tooltipElement.style.top = `${finalY}px`
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
document.addEventListener('mousemove', this.mouseMoveHandler)
|
||||||
|
}
|
||||||
|
|
||||||
|
override onPointerDown = () => {
|
||||||
|
const { currentPagePoint } = this.editor.inputs
|
||||||
|
this.createMultmuxShape(currentPagePoint.x, currentPagePoint.y)
|
||||||
|
}
|
||||||
|
|
||||||
|
override onExit = () => {
|
||||||
|
this.cleanupTooltip()
|
||||||
|
}
|
||||||
|
|
||||||
|
private cleanupTooltip = () => {
|
||||||
|
if (this.mouseMoveHandler) {
|
||||||
|
document.removeEventListener('mousemove', this.mouseMoveHandler)
|
||||||
|
this.mouseMoveHandler = undefined
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.tooltipElement && this.tooltipElement.parentNode) {
|
||||||
|
document.body.removeChild(this.tooltipElement)
|
||||||
|
this.tooltipElement = undefined
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private createMultmuxShape(clickX: number, clickY: number) {
|
||||||
|
try {
|
||||||
|
const currentCamera = this.editor.getCamera()
|
||||||
|
this.editor.stopCameraAnimation()
|
||||||
|
|
||||||
|
const shapeWidth = 800
|
||||||
|
const shapeHeight = 600
|
||||||
|
|
||||||
|
const baseX = clickX - shapeWidth / 2
|
||||||
|
const baseY = clickY - shapeHeight / 2
|
||||||
|
|
||||||
|
const multmuxShape = this.editor.createShape({
|
||||||
|
type: 'Multmux',
|
||||||
|
x: baseX,
|
||||||
|
y: baseY,
|
||||||
|
props: {
|
||||||
|
w: shapeWidth,
|
||||||
|
h: shapeHeight,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
console.log('🖥️ Created Multmux shape:', multmuxShape.id)
|
||||||
|
|
||||||
|
const newCamera = this.editor.getCamera()
|
||||||
|
if (currentCamera.x !== newCamera.x || currentCamera.y !== newCamera.y || currentCamera.z !== newCamera.z) {
|
||||||
|
this.editor.setCamera(currentCamera, { animation: { duration: 0 } })
|
||||||
|
}
|
||||||
|
|
||||||
|
this.cleanupTooltip()
|
||||||
|
this.editor.setCurrentTool('select')
|
||||||
|
|
||||||
|
} catch (error) {
|
||||||
|
console.error('❌ Error creating Multmux shape:', error)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,7 @@ import { ObsNoteShape } from "@/shapes/ObsNoteShapeUtil"
|
||||||
import { findNonOverlappingPosition } from "@/utils/shapeCollisionUtils"
|
import { findNonOverlappingPosition } from "@/utils/shapeCollisionUtils"
|
||||||
|
|
||||||
export class ObsNoteTool extends StateNode {
|
export class ObsNoteTool extends StateNode {
|
||||||
static override id = "obs_note"
|
static override id = "ObsidianNote"
|
||||||
static override initial = "idle"
|
static override initial = "idle"
|
||||||
static override children = () => [ObsNoteIdle]
|
static override children = () => [ObsNoteIdle]
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -4,7 +4,7 @@ import { getOpenAIConfig, isOpenAIConfigured } from "@/lib/clientConfig"
|
||||||
import { findNonOverlappingPosition } from "@/utils/shapeCollisionUtils"
|
import { findNonOverlappingPosition } from "@/utils/shapeCollisionUtils"
|
||||||
|
|
||||||
export class TranscriptionTool extends StateNode {
|
export class TranscriptionTool extends StateNode {
|
||||||
static override id = "transcription"
|
static override id = "Transcription"
|
||||||
static override initial = "idle"
|
static override initial = "idle"
|
||||||
|
|
||||||
onSelect() {
|
onSelect() {
|
||||||
|
|
|
||||||
|
|
@ -1,12 +1,129 @@
|
||||||
import { BaseBoxShapeTool, TLEventHandlers } from 'tldraw'
|
import { StateNode } from 'tldraw'
|
||||||
|
import { findNonOverlappingPosition } from '@/utils/shapeCollisionUtils'
|
||||||
|
|
||||||
export class VideoGenTool extends BaseBoxShapeTool {
|
export class VideoGenTool extends StateNode {
|
||||||
static override id = 'VideoGen'
|
static override id = 'VideoGen'
|
||||||
static override initial = 'idle'
|
static override initial = 'idle'
|
||||||
override shapeType = 'VideoGen'
|
static override children = () => [VideoGenIdle]
|
||||||
|
|
||||||
override onComplete: TLEventHandlers["onComplete"] = () => {
|
onSelect() {
|
||||||
console.log('🎬 VideoGenTool: Shape creation completed')
|
console.log('🎬 VideoGenTool: tool selected - waiting for user click')
|
||||||
this.editor.setCurrentTool('select')
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export class VideoGenIdle extends StateNode {
|
||||||
|
static override id = 'idle'
|
||||||
|
|
||||||
|
tooltipElement?: HTMLDivElement
|
||||||
|
mouseMoveHandler?: (e: MouseEvent) => void
|
||||||
|
|
||||||
|
override onEnter = () => {
|
||||||
|
this.editor.setCursor({ type: 'cross', rotation: 0 })
|
||||||
|
|
||||||
|
this.tooltipElement = document.createElement('div')
|
||||||
|
this.tooltipElement.style.cssText = `
|
||||||
|
position: fixed;
|
||||||
|
background: rgba(0, 0, 0, 0.85);
|
||||||
|
color: white;
|
||||||
|
padding: 8px 12px;
|
||||||
|
border-radius: 6px;
|
||||||
|
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
|
||||||
|
font-size: 13px;
|
||||||
|
white-space: nowrap;
|
||||||
|
z-index: 10000;
|
||||||
|
pointer-events: none;
|
||||||
|
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.3);
|
||||||
|
border: 1px solid rgba(255, 255, 255, 0.1);
|
||||||
|
`
|
||||||
|
this.tooltipElement.textContent = 'Click anywhere to place Video Generator'
|
||||||
|
|
||||||
|
document.body.appendChild(this.tooltipElement)
|
||||||
|
|
||||||
|
this.mouseMoveHandler = (e: MouseEvent) => {
|
||||||
|
if (this.tooltipElement) {
|
||||||
|
const x = e.clientX + 15
|
||||||
|
const y = e.clientY - 35
|
||||||
|
|
||||||
|
const rect = this.tooltipElement.getBoundingClientRect()
|
||||||
|
const viewportWidth = window.innerWidth
|
||||||
|
const viewportHeight = window.innerHeight
|
||||||
|
|
||||||
|
let finalX = x
|
||||||
|
let finalY = y
|
||||||
|
|
||||||
|
if (x + rect.width > viewportWidth) {
|
||||||
|
finalX = e.clientX - rect.width - 15
|
||||||
|
}
|
||||||
|
|
||||||
|
if (y + rect.height > viewportHeight) {
|
||||||
|
finalY = e.clientY - rect.height - 15
|
||||||
|
}
|
||||||
|
|
||||||
|
finalX = Math.max(10, finalX)
|
||||||
|
finalY = Math.max(10, finalY)
|
||||||
|
|
||||||
|
this.tooltipElement.style.left = `${finalX}px`
|
||||||
|
this.tooltipElement.style.top = `${finalY}px`
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
document.addEventListener('mousemove', this.mouseMoveHandler)
|
||||||
|
}
|
||||||
|
|
||||||
|
override onPointerDown = () => {
|
||||||
|
const { currentPagePoint } = this.editor.inputs
|
||||||
|
this.createVideoGenShape(currentPagePoint.x, currentPagePoint.y)
|
||||||
|
}
|
||||||
|
|
||||||
|
override onExit = () => {
|
||||||
|
this.cleanupTooltip()
|
||||||
|
}
|
||||||
|
|
||||||
|
private cleanupTooltip = () => {
|
||||||
|
if (this.mouseMoveHandler) {
|
||||||
|
document.removeEventListener('mousemove', this.mouseMoveHandler)
|
||||||
|
this.mouseMoveHandler = undefined
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.tooltipElement && this.tooltipElement.parentNode) {
|
||||||
|
document.body.removeChild(this.tooltipElement)
|
||||||
|
this.tooltipElement = undefined
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private createVideoGenShape(clickX: number, clickY: number) {
|
||||||
|
try {
|
||||||
|
const currentCamera = this.editor.getCamera()
|
||||||
|
this.editor.stopCameraAnimation()
|
||||||
|
|
||||||
|
const shapeWidth = 500
|
||||||
|
const shapeHeight = 450
|
||||||
|
|
||||||
|
const baseX = clickX - shapeWidth / 2
|
||||||
|
const baseY = clickY - shapeHeight / 2
|
||||||
|
|
||||||
|
const videoGenShape = this.editor.createShape({
|
||||||
|
type: 'VideoGen',
|
||||||
|
x: baseX,
|
||||||
|
y: baseY,
|
||||||
|
props: {
|
||||||
|
w: shapeWidth,
|
||||||
|
h: shapeHeight,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
console.log('🎬 Created VideoGen shape:', videoGenShape.id)
|
||||||
|
|
||||||
|
const newCamera = this.editor.getCamera()
|
||||||
|
if (currentCamera.x !== newCamera.x || currentCamera.y !== newCamera.y || currentCamera.z !== newCamera.z) {
|
||||||
|
this.editor.setCamera(currentCamera, { animation: { duration: 0 } })
|
||||||
|
}
|
||||||
|
|
||||||
|
this.cleanupTooltip()
|
||||||
|
this.editor.setCurrentTool('select')
|
||||||
|
|
||||||
|
} catch (error) {
|
||||||
|
console.error('❌ Error creating VideoGen shape:', error)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -29,7 +29,7 @@ export function CustomMainMenu() {
|
||||||
const validateAndNormalizeShapeType = (shape: any): string => {
|
const validateAndNormalizeShapeType = (shape: any): string => {
|
||||||
if (!shape || !shape.type) return 'text'
|
if (!shape || !shape.type) return 'text'
|
||||||
|
|
||||||
const validCustomShapes = ['ObsNote', 'VideoChat', 'Transcription', 'Prompt', 'ChatBox', 'Embed', 'Markdown', 'MycrozineTemplate', 'Slide', 'Holon', 'ObsidianBrowser', 'HolonBrowser', 'FathomMeetingsBrowser', 'LocationShare', 'ImageGen']
|
const validCustomShapes = ['ObsNote', 'VideoChat', 'Transcription', 'Prompt', 'ChatBox', 'Embed', 'Markdown', 'MycrozineTemplate', 'Slide', 'Holon', 'ObsidianBrowser', 'HolonBrowser', 'FathomMeetingsBrowser', 'ImageGen', 'VideoGen', 'Multmux']
|
||||||
const validDefaultShapes = ['arrow', 'bookmark', 'draw', 'embed', 'frame', 'geo', 'group', 'highlight', 'image', 'line', 'note', 'text', 'video']
|
const validDefaultShapes = ['arrow', 'bookmark', 'draw', 'embed', 'frame', 'geo', 'group', 'highlight', 'image', 'line', 'note', 'text', 'video']
|
||||||
const allValidShapes = [...validCustomShapes, ...validDefaultShapes]
|
const allValidShapes = [...validCustomShapes, ...validDefaultShapes]
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -64,6 +64,11 @@ export function CustomToolbar() {
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (editor && tools) {
|
if (editor && tools) {
|
||||||
setIsReady(true)
|
setIsReady(true)
|
||||||
|
// Debug: log available tools
|
||||||
|
console.log('🔧 CustomToolbar: Available tools:', Object.keys(tools))
|
||||||
|
console.log('🔧 CustomToolbar: VideoGen exists:', !!tools["VideoGen"])
|
||||||
|
console.log('🔧 CustomToolbar: Multmux exists:', !!tools["Multmux"])
|
||||||
|
console.log('🔧 CustomToolbar: ImageGen exists:', !!tools["ImageGen"])
|
||||||
}
|
}
|
||||||
}, [editor, tools])
|
}, [editor, tools])
|
||||||
|
|
||||||
|
|
@ -1113,6 +1118,14 @@ export function CustomToolbar() {
|
||||||
isSelected={tools["ImageGen"].id === editor.getCurrentToolId()}
|
isSelected={tools["ImageGen"].id === editor.getCurrentToolId()}
|
||||||
/>
|
/>
|
||||||
)}
|
)}
|
||||||
|
{tools["VideoGen"] && (
|
||||||
|
<TldrawUiMenuItem
|
||||||
|
{...tools["VideoGen"]}
|
||||||
|
icon="video"
|
||||||
|
label="Video Generation"
|
||||||
|
isSelected={tools["VideoGen"].id === editor.getCurrentToolId()}
|
||||||
|
/>
|
||||||
|
)}
|
||||||
{tools["Multmux"] && (
|
{tools["Multmux"] && (
|
||||||
<TldrawUiMenuItem
|
<TldrawUiMenuItem
|
||||||
{...tools["Multmux"]}
|
{...tools["Multmux"]}
|
||||||
|
|
|
||||||
|
|
@ -10,8 +10,9 @@ import {
|
||||||
TldrawUiInput,
|
TldrawUiInput,
|
||||||
} from "tldraw"
|
} from "tldraw"
|
||||||
import React from "react"
|
import React from "react"
|
||||||
import { PROVIDERS, AI_PERSONALITIES } from "../lib/settings"
|
import { PROVIDERS, AI_PERSONALITIES, OLLAMA_MODELS } from "../lib/settings"
|
||||||
import { useAuth } from "../context/AuthContext"
|
import { useAuth } from "../context/AuthContext"
|
||||||
|
import { getOllamaConfig } from "../lib/clientConfig"
|
||||||
|
|
||||||
export function SettingsDialog({ onClose }: TLUiDialogProps) {
|
export function SettingsDialog({ onClose }: TLUiDialogProps) {
|
||||||
const { session } = useAuth()
|
const { session } = useAuth()
|
||||||
|
|
@ -87,23 +88,67 @@ export function SettingsDialog({ onClose }: TLUiDialogProps) {
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
const [ollamaModel, setOllamaModel] = React.useState(() => {
|
||||||
|
try {
|
||||||
|
// First try to get user-specific settings if logged in
|
||||||
|
if (session.authed && session.username) {
|
||||||
|
const userApiKeys = localStorage.getItem(`${session.username}_api_keys`)
|
||||||
|
if (userApiKeys) {
|
||||||
|
try {
|
||||||
|
const parsed = JSON.parse(userApiKeys)
|
||||||
|
if (parsed.ollamaModel) {
|
||||||
|
return parsed.ollamaModel
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
|
// Continue to fallback
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback to global settings
|
||||||
|
const stored = localStorage.getItem("openai_api_key")
|
||||||
|
if (stored) {
|
||||||
|
try {
|
||||||
|
const parsed = JSON.parse(stored)
|
||||||
|
if (parsed.ollamaModel) {
|
||||||
|
return parsed.ollamaModel
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
|
// Continue to fallback
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 'llama3.1:8b'
|
||||||
|
} catch (e) {
|
||||||
|
return 'llama3.1:8b'
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Check if Ollama is configured
|
||||||
|
const ollamaConfig = getOllamaConfig()
|
||||||
|
|
||||||
const handleKeyChange = (provider: string, value: string) => {
|
const handleKeyChange = (provider: string, value: string) => {
|
||||||
const newKeys = { ...apiKeys, [provider]: value }
|
const newKeys = { ...apiKeys, [provider]: value }
|
||||||
setApiKeys(newKeys)
|
setApiKeys(newKeys)
|
||||||
saveSettings(newKeys, personality)
|
saveSettings(newKeys, personality, ollamaModel)
|
||||||
}
|
}
|
||||||
|
|
||||||
const handlePersonalityChange = (newPersonality: string) => {
|
const handlePersonalityChange = (newPersonality: string) => {
|
||||||
setPersonality(newPersonality)
|
setPersonality(newPersonality)
|
||||||
saveSettings(apiKeys, newPersonality)
|
saveSettings(apiKeys, newPersonality, ollamaModel)
|
||||||
}
|
}
|
||||||
|
|
||||||
const saveSettings = (keys: any, personalityValue: string) => {
|
const handleOllamaModelChange = (newModel: string) => {
|
||||||
|
setOllamaModel(newModel)
|
||||||
|
saveSettings(apiKeys, personality, newModel)
|
||||||
|
}
|
||||||
|
|
||||||
|
const saveSettings = (keys: any, personalityValue: string, ollamaModelValue: string) => {
|
||||||
// Save to localStorage with the new structure
|
// Save to localStorage with the new structure
|
||||||
const settings = {
|
const settings = {
|
||||||
keys: keys,
|
keys: keys,
|
||||||
provider: 'openai', // Default provider
|
provider: 'openai', // Default provider
|
||||||
models: Object.fromEntries(PROVIDERS.map((provider) => [provider.id, provider.models[0]])),
|
models: Object.fromEntries(PROVIDERS.map((provider) => [provider.id, provider.models[0]])),
|
||||||
|
ollamaModel: ollamaModelValue,
|
||||||
personality: personalityValue,
|
personality: personalityValue,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -161,11 +206,82 @@ export function SettingsDialog({ onClose }: TLUiDialogProps) {
|
||||||
</select>
|
</select>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
{/* Ollama Model Selector - Only show if Ollama is configured */}
|
||||||
|
{ollamaConfig && (
|
||||||
|
<div style={{ borderTop: "1px solid #e5e7eb", paddingTop: "16px" }}>
|
||||||
|
<div style={{ display: "flex", alignItems: "center", gap: 8, marginBottom: "12px" }}>
|
||||||
|
<span style={{ fontSize: "20px" }}>🦙</span>
|
||||||
|
<h3 style={{ fontSize: "16px", fontWeight: "600", margin: 0 }}>
|
||||||
|
Private AI Model
|
||||||
|
</h3>
|
||||||
|
<span style={{
|
||||||
|
fontSize: "11px",
|
||||||
|
color: "#059669",
|
||||||
|
backgroundColor: "#d1fae5",
|
||||||
|
padding: "2px 8px",
|
||||||
|
borderRadius: "9999px",
|
||||||
|
fontWeight: "500"
|
||||||
|
}}>
|
||||||
|
FREE
|
||||||
|
</span>
|
||||||
|
</div>
|
||||||
|
<p style={{
|
||||||
|
fontSize: "12px",
|
||||||
|
color: "#6b7280",
|
||||||
|
marginBottom: "12px",
|
||||||
|
lineHeight: "1.4"
|
||||||
|
}}>
|
||||||
|
Running on your private server. No API key needed - select quality vs speed.
|
||||||
|
</p>
|
||||||
|
<select
|
||||||
|
value={ollamaModel}
|
||||||
|
onChange={(e) => handleOllamaModelChange(e.target.value)}
|
||||||
|
style={{
|
||||||
|
width: "100%",
|
||||||
|
padding: "10px 12px",
|
||||||
|
border: "1px solid #d1d5db",
|
||||||
|
borderRadius: "6px",
|
||||||
|
fontSize: "14px",
|
||||||
|
backgroundColor: "white",
|
||||||
|
cursor: "pointer"
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{OLLAMA_MODELS.map((model) => (
|
||||||
|
<option key={model.id} value={model.id}>
|
||||||
|
{model.name} - {model.description}
|
||||||
|
</option>
|
||||||
|
))}
|
||||||
|
</select>
|
||||||
|
<div style={{
|
||||||
|
display: "flex",
|
||||||
|
justifyContent: "space-between",
|
||||||
|
marginTop: "8px",
|
||||||
|
fontSize: "11px",
|
||||||
|
color: "#9ca3af"
|
||||||
|
}}>
|
||||||
|
<span>Server: {ollamaConfig.url}</span>
|
||||||
|
<span>
|
||||||
|
Model size: {OLLAMA_MODELS.find(m => m.id === ollamaModel)?.size || 'Unknown'}
|
||||||
|
</span>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
{/* API Keys Section */}
|
{/* API Keys Section */}
|
||||||
<div style={{ borderTop: "1px solid #e5e7eb", paddingTop: "16px" }}>
|
<div style={{ borderTop: "1px solid #e5e7eb", paddingTop: "16px" }}>
|
||||||
<h3 style={{ fontSize: "16px", fontWeight: "600", marginBottom: "16px" }}>
|
<h3 style={{ fontSize: "16px", fontWeight: "600", marginBottom: "8px" }}>
|
||||||
API Keys
|
Cloud API Keys
|
||||||
</h3>
|
</h3>
|
||||||
|
<p style={{
|
||||||
|
fontSize: "12px",
|
||||||
|
color: "#6b7280",
|
||||||
|
marginBottom: "16px",
|
||||||
|
lineHeight: "1.4"
|
||||||
|
}}>
|
||||||
|
{ollamaConfig
|
||||||
|
? "Optional fallback - used when private AI is unavailable."
|
||||||
|
: "Enter API keys to use cloud AI services."}
|
||||||
|
</p>
|
||||||
{PROVIDERS.map((provider) => (
|
{PROVIDERS.map((provider) => (
|
||||||
<div key={provider.id} style={{ display: "flex", flexDirection: "column", gap: 8 }}>
|
<div key={provider.id} style={{ display: "flex", flexDirection: "column", gap: 8 }}>
|
||||||
<div style={{ display: "flex", alignItems: "center", justifyContent: "space-between" }}>
|
<div style={{ display: "flex", alignItems: "center", justifyContent: "space-between" }}>
|
||||||
|
|
|
||||||
|
|
@ -14,7 +14,14 @@ import {
|
||||||
zoomToSelection,
|
zoomToSelection,
|
||||||
} from "./cameraUtils"
|
} from "./cameraUtils"
|
||||||
import { saveToPdf } from "../utils/pdfUtils"
|
import { saveToPdf } from "../utils/pdfUtils"
|
||||||
import { searchText } from "../utils/searchUtils"
|
import {
|
||||||
|
searchText,
|
||||||
|
searchSemantic,
|
||||||
|
askCanvasAI,
|
||||||
|
indexCanvasForSearch,
|
||||||
|
explainViewport,
|
||||||
|
findSimilarToSelection
|
||||||
|
} from "../utils/searchUtils"
|
||||||
import { EmbedShape, IEmbedShape } from "@/shapes/EmbedShapeUtil"
|
import { EmbedShape, IEmbedShape } from "@/shapes/EmbedShapeUtil"
|
||||||
import { moveToSlide } from "@/slides/useSlides"
|
import { moveToSlide } from "@/slides/useSlides"
|
||||||
import { ISlideShape } from "@/shapes/SlideShapeUtil"
|
import { ISlideShape } from "@/shapes/SlideShapeUtil"
|
||||||
|
|
@ -160,31 +167,31 @@ export const overrides: TLUiOverrides = {
|
||||||
onSelect: () => editor.setCurrentTool("gesture"),
|
onSelect: () => editor.setCurrentTool("gesture"),
|
||||||
},
|
},
|
||||||
ObsidianNote: {
|
ObsidianNote: {
|
||||||
id: "obs_note",
|
id: "ObsidianNote",
|
||||||
icon: "file-text",
|
icon: "file-text",
|
||||||
label: "Obsidian Note",
|
label: "Obsidian Note",
|
||||||
kbd: "alt+o",
|
kbd: "alt+o",
|
||||||
readonlyOk: true,
|
readonlyOk: true,
|
||||||
type: "ObsNote",
|
type: "ObsNote",
|
||||||
onSelect: () => editor.setCurrentTool("obs_note"),
|
onSelect: () => editor.setCurrentTool("ObsidianNote"),
|
||||||
},
|
},
|
||||||
Transcription: {
|
Transcription: {
|
||||||
id: "transcription",
|
id: "Transcription",
|
||||||
icon: "microphone",
|
icon: "microphone",
|
||||||
label: "Transcription",
|
label: "Transcription",
|
||||||
kbd: "alt+t",
|
kbd: "alt+t",
|
||||||
readonlyOk: true,
|
readonlyOk: true,
|
||||||
type: "Transcription",
|
type: "Transcription",
|
||||||
onSelect: () => editor.setCurrentTool("transcription"),
|
onSelect: () => editor.setCurrentTool("Transcription"),
|
||||||
},
|
},
|
||||||
Holon: {
|
Holon: {
|
||||||
id: "holon",
|
id: "Holon",
|
||||||
icon: "circle",
|
icon: "circle",
|
||||||
label: "Holon",
|
label: "Holon",
|
||||||
kbd: "alt+h",
|
kbd: "alt+h",
|
||||||
readonlyOk: true,
|
readonlyOk: true,
|
||||||
type: "Holon",
|
type: "Holon",
|
||||||
onSelect: () => editor.setCurrentTool("holon"),
|
onSelect: () => editor.setCurrentTool("Holon"),
|
||||||
},
|
},
|
||||||
FathomMeetings: {
|
FathomMeetings: {
|
||||||
id: "fathom-meetings",
|
id: "fathom-meetings",
|
||||||
|
|
@ -205,6 +212,22 @@ export const overrides: TLUiOverrides = {
|
||||||
type: "ImageGen",
|
type: "ImageGen",
|
||||||
onSelect: () => editor.setCurrentTool("ImageGen"),
|
onSelect: () => editor.setCurrentTool("ImageGen"),
|
||||||
},
|
},
|
||||||
|
VideoGen: {
|
||||||
|
id: "VideoGen",
|
||||||
|
icon: "video",
|
||||||
|
label: "Video Generation",
|
||||||
|
kbd: "alt+v",
|
||||||
|
readonlyOk: true,
|
||||||
|
onSelect: () => editor.setCurrentTool("VideoGen"),
|
||||||
|
},
|
||||||
|
Multmux: {
|
||||||
|
id: "Multmux",
|
||||||
|
icon: "terminal",
|
||||||
|
label: "Terminal",
|
||||||
|
kbd: "alt+m",
|
||||||
|
readonlyOk: true,
|
||||||
|
onSelect: () => editor.setCurrentTool("Multmux"),
|
||||||
|
},
|
||||||
hand: {
|
hand: {
|
||||||
...tools.hand,
|
...tools.hand,
|
||||||
onDoubleClick: (info: any) => {
|
onDoubleClick: (info: any) => {
|
||||||
|
|
@ -391,6 +414,95 @@ export const overrides: TLUiOverrides = {
|
||||||
readonlyOk: true,
|
readonlyOk: true,
|
||||||
onSelect: () => searchText(editor),
|
onSelect: () => searchText(editor),
|
||||||
},
|
},
|
||||||
|
semanticSearch: {
|
||||||
|
id: "semantic-search",
|
||||||
|
label: "Semantic Search (AI)",
|
||||||
|
kbd: "shift+s",
|
||||||
|
readonlyOk: true,
|
||||||
|
onSelect: async () => {
|
||||||
|
try {
|
||||||
|
await searchSemantic(editor)
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Semantic search error:", error)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
askCanvasAI: {
|
||||||
|
id: "ask-canvas-ai",
|
||||||
|
label: "Ask AI About Canvas",
|
||||||
|
kbd: "shift+a",
|
||||||
|
readonlyOk: true,
|
||||||
|
onSelect: async () => {
|
||||||
|
try {
|
||||||
|
// Create a simple modal/prompt for AI response
|
||||||
|
const answer = await askCanvasAI(editor, undefined, (partial, done) => {
|
||||||
|
// Log streaming response to console for now
|
||||||
|
if (!done) {
|
||||||
|
console.log("AI response:", partial)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
if (answer) {
|
||||||
|
// Could display in a UI element - for now show alert with result
|
||||||
|
console.log("Canvas AI answer:", answer)
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Canvas AI error:", error)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
indexCanvas: {
|
||||||
|
id: "index-canvas",
|
||||||
|
label: "Index Canvas for AI Search",
|
||||||
|
kbd: "ctrl+shift+i",
|
||||||
|
readonlyOk: true,
|
||||||
|
onSelect: async () => {
|
||||||
|
try {
|
||||||
|
console.log("Starting canvas indexing...")
|
||||||
|
await indexCanvasForSearch(editor, (progress) => {
|
||||||
|
console.log(`Indexing progress: ${progress.toFixed(1)}%`)
|
||||||
|
})
|
||||||
|
console.log("Canvas indexing complete!")
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Canvas indexing error:", error)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
explainViewport: {
|
||||||
|
id: "explain-viewport",
|
||||||
|
label: "Explain Current View",
|
||||||
|
kbd: "shift+e",
|
||||||
|
readonlyOk: true,
|
||||||
|
onSelect: async () => {
|
||||||
|
try {
|
||||||
|
console.log("Analyzing viewport...")
|
||||||
|
await explainViewport(editor, (partial, done) => {
|
||||||
|
if (!done) {
|
||||||
|
console.log("Viewport analysis:", partial)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Viewport explanation error:", error)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
findSimilar: {
|
||||||
|
id: "find-similar",
|
||||||
|
label: "Find Similar Shapes",
|
||||||
|
kbd: "shift+f",
|
||||||
|
readonlyOk: true,
|
||||||
|
onSelect: async () => {
|
||||||
|
if (editor.getSelectedShapeIds().length === 0) {
|
||||||
|
console.log("Select a shape first to find similar ones")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
const results = await findSimilarToSelection(editor)
|
||||||
|
console.log(`Found ${results.length} similar shapes`)
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Find similar error:", error)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
llm: {
|
llm: {
|
||||||
id: "llm",
|
id: "llm",
|
||||||
label: "Run LLM Prompt",
|
label: "Run LLM Prompt",
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
import OpenAI from "openai";
|
import OpenAI from "openai";
|
||||||
import Anthropic from "@anthropic-ai/sdk";
|
import Anthropic from "@anthropic-ai/sdk";
|
||||||
import { makeRealSettings, AI_PERSONALITIES } from "@/lib/settings";
|
import { makeRealSettings, AI_PERSONALITIES } from "@/lib/settings";
|
||||||
import { getRunPodConfig } from "@/lib/clientConfig";
|
import { getRunPodConfig, getOllamaConfig } from "@/lib/clientConfig";
|
||||||
|
|
||||||
export async function llm(
|
export async function llm(
|
||||||
userPrompt: string,
|
userPrompt: string,
|
||||||
|
|
@ -169,11 +169,25 @@ function getAvailableProviders(availableKeys: Record<string, string>, settings:
|
||||||
return false;
|
return false;
|
||||||
};
|
};
|
||||||
|
|
||||||
// PRIORITY 1: Check for RunPod configuration from environment variables FIRST
|
// PRIORITY 0: Check for Ollama configuration (FREE local AI - highest priority)
|
||||||
// RunPod takes priority over user-configured keys
|
const ollamaConfig = getOllamaConfig();
|
||||||
|
if (ollamaConfig && ollamaConfig.url) {
|
||||||
|
// Get the selected Ollama model from settings
|
||||||
|
const selectedOllamaModel = settings.ollamaModel || 'llama3.1:8b';
|
||||||
|
console.log(`🦙 Found Ollama configuration - using as primary AI provider (FREE) with model: ${selectedOllamaModel}`);
|
||||||
|
providers.push({
|
||||||
|
provider: 'ollama',
|
||||||
|
apiKey: 'ollama', // Ollama doesn't need an API key
|
||||||
|
baseUrl: ollamaConfig.url,
|
||||||
|
model: selectedOllamaModel
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// PRIORITY 1: Check for RunPod configuration from environment variables
|
||||||
|
// RunPod is used as fallback when Ollama is not available
|
||||||
const runpodConfig = getRunPodConfig();
|
const runpodConfig = getRunPodConfig();
|
||||||
if (runpodConfig && runpodConfig.apiKey && runpodConfig.endpointId) {
|
if (runpodConfig && runpodConfig.apiKey && runpodConfig.endpointId) {
|
||||||
console.log('🔑 Found RunPod configuration from environment variables - using as primary AI provider');
|
console.log('🔑 Found RunPod configuration from environment variables');
|
||||||
providers.push({
|
providers.push({
|
||||||
provider: 'runpod',
|
provider: 'runpod',
|
||||||
apiKey: runpodConfig.apiKey,
|
apiKey: runpodConfig.apiKey,
|
||||||
|
|
@ -388,6 +402,9 @@ function isValidApiKey(provider: string, apiKey: string): boolean {
|
||||||
case 'google':
|
case 'google':
|
||||||
// Google API keys are typically longer and don't have a specific prefix
|
// Google API keys are typically longer and don't have a specific prefix
|
||||||
return apiKey.length > 20;
|
return apiKey.length > 20;
|
||||||
|
case 'ollama':
|
||||||
|
// Ollama doesn't require an API key - any value is valid
|
||||||
|
return true;
|
||||||
default:
|
default:
|
||||||
return apiKey.length > 10; // Basic validation for unknown providers
|
return apiKey.length > 10; // Basic validation for unknown providers
|
||||||
}
|
}
|
||||||
|
|
@ -507,7 +524,80 @@ async function callProviderAPI(
|
||||||
let partial = "";
|
let partial = "";
|
||||||
const systemPrompt = settings ? getSystemPrompt(settings) : 'You are a helpful assistant.';
|
const systemPrompt = settings ? getSystemPrompt(settings) : 'You are a helpful assistant.';
|
||||||
|
|
||||||
if (provider === 'runpod') {
|
if (provider === 'ollama') {
|
||||||
|
// Ollama API integration - uses OpenAI-compatible API format
|
||||||
|
const ollamaConfig = getOllamaConfig();
|
||||||
|
const baseUrl = (settings as any)?.baseUrl || ollamaConfig?.url || 'http://localhost:11434';
|
||||||
|
|
||||||
|
console.log(`🦙 Ollama API: Using ${baseUrl}/v1/chat/completions with model ${model}`);
|
||||||
|
|
||||||
|
const messages = [];
|
||||||
|
if (systemPrompt) {
|
||||||
|
messages.push({ role: 'system', content: systemPrompt });
|
||||||
|
}
|
||||||
|
messages.push({ role: 'user', content: userPrompt });
|
||||||
|
|
||||||
|
try {
|
||||||
|
const response = await fetch(`${baseUrl}/v1/chat/completions`, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
},
|
||||||
|
body: JSON.stringify({
|
||||||
|
model: model,
|
||||||
|
messages: messages,
|
||||||
|
stream: true, // Enable streaming for better UX
|
||||||
|
})
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!response.ok) {
|
||||||
|
const errorText = await response.text();
|
||||||
|
console.error('❌ Ollama API error:', response.status, errorText);
|
||||||
|
throw new Error(`Ollama API error: ${response.status} - ${errorText}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle streaming response
|
||||||
|
const reader = response.body?.getReader();
|
||||||
|
if (!reader) {
|
||||||
|
throw new Error('No response body from Ollama');
|
||||||
|
}
|
||||||
|
|
||||||
|
const decoder = new TextDecoder();
|
||||||
|
|
||||||
|
while (true) {
|
||||||
|
const { done, value } = await reader.read();
|
||||||
|
if (done) break;
|
||||||
|
|
||||||
|
const chunk = decoder.decode(value, { stream: true });
|
||||||
|
const lines = chunk.split('\n').filter(line => line.trim() !== '');
|
||||||
|
|
||||||
|
for (const line of lines) {
|
||||||
|
if (line.startsWith('data: ')) {
|
||||||
|
const data = line.slice(6);
|
||||||
|
if (data === '[DONE]') continue;
|
||||||
|
|
||||||
|
try {
|
||||||
|
const parsed = JSON.parse(data);
|
||||||
|
const content = parsed.choices?.[0]?.delta?.content || '';
|
||||||
|
if (content) {
|
||||||
|
partial += content;
|
||||||
|
onToken(partial, false);
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
|
// Skip malformed JSON chunks
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log('✅ Ollama API: Response complete, length:', partial.length);
|
||||||
|
onToken(partial, true);
|
||||||
|
return;
|
||||||
|
} catch (error) {
|
||||||
|
console.error('❌ Ollama API error:', error);
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
} else if (provider === 'runpod') {
|
||||||
// RunPod API integration - uses environment variables for automatic setup
|
// RunPod API integration - uses environment variables for automatic setup
|
||||||
// Get endpointId from parameter or from config
|
// Get endpointId from parameter or from config
|
||||||
let runpodEndpointId = endpointId;
|
let runpodEndpointId = endpointId;
|
||||||
|
|
@ -1055,6 +1145,9 @@ function getDefaultModel(provider: string): string {
|
||||||
case 'anthropic':
|
case 'anthropic':
|
||||||
// Use Claude Sonnet 4.5 as default (newest and best model)
|
// Use Claude Sonnet 4.5 as default (newest and best model)
|
||||||
return 'claude-sonnet-4-5-20250929'
|
return 'claude-sonnet-4-5-20250929'
|
||||||
|
case 'ollama':
|
||||||
|
// Use Llama 3.1 8B as default for local Ollama
|
||||||
|
return 'llama3.1:8b'
|
||||||
default:
|
default:
|
||||||
return 'gpt-4o'
|
return 'gpt-4o'
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,10 @@
|
||||||
import { Editor } from "tldraw"
|
import { Editor, TLShape } from "tldraw"
|
||||||
|
import { semanticSearch, SemanticSearchResult } from "@/lib/semanticSearch"
|
||||||
|
import { canvasAI } from "@/lib/canvasAI"
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Basic text search (substring matching)
|
||||||
|
*/
|
||||||
export const searchText = (editor: Editor) => {
|
export const searchText = (editor: Editor) => {
|
||||||
// Switch to select tool first
|
// Switch to select tool first
|
||||||
editor.setCurrentTool('select')
|
editor.setCurrentTool('select')
|
||||||
|
|
@ -87,3 +92,185 @@ export const searchText = (editor: Editor) => {
|
||||||
alert("No matches found")
|
alert("No matches found")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Semantic search using AI embeddings
|
||||||
|
* Finds conceptually similar content, not just exact text matches
|
||||||
|
*/
|
||||||
|
export const searchSemantic = async (
|
||||||
|
editor: Editor,
|
||||||
|
query?: string,
|
||||||
|
onResults?: (results: SemanticSearchResult[]) => void
|
||||||
|
): Promise<SemanticSearchResult[]> => {
|
||||||
|
// Initialize semantic search with editor
|
||||||
|
semanticSearch.setEditor(editor)
|
||||||
|
|
||||||
|
// Get query from user if not provided
|
||||||
|
const searchQuery = query || prompt("Enter semantic search query:")
|
||||||
|
if (!searchQuery) return []
|
||||||
|
|
||||||
|
// Switch to select tool
|
||||||
|
editor.setCurrentTool('select')
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Search for semantically similar shapes
|
||||||
|
const results = await semanticSearch.search(searchQuery, 10, 0.25)
|
||||||
|
|
||||||
|
if (results.length === 0) {
|
||||||
|
alert("No semantically similar shapes found. Try indexing the canvas first.")
|
||||||
|
return []
|
||||||
|
}
|
||||||
|
|
||||||
|
// Select matching shapes
|
||||||
|
const shapeIds = results.map(r => r.shapeId)
|
||||||
|
editor.selectNone()
|
||||||
|
editor.setSelectedShapes(shapeIds)
|
||||||
|
|
||||||
|
// Zoom to show results
|
||||||
|
const bounds = editor.getSelectionPageBounds()
|
||||||
|
if (bounds) {
|
||||||
|
const viewportBounds = editor.getViewportPageBounds()
|
||||||
|
const widthRatio = bounds.width / viewportBounds.width
|
||||||
|
const heightRatio = bounds.height / viewportBounds.height
|
||||||
|
|
||||||
|
let targetZoom
|
||||||
|
if (widthRatio < 0.1 || heightRatio < 0.1) {
|
||||||
|
targetZoom = Math.min(
|
||||||
|
(viewportBounds.width * 0.8) / bounds.width,
|
||||||
|
(viewportBounds.height * 0.8) / bounds.height,
|
||||||
|
40
|
||||||
|
)
|
||||||
|
} else if (widthRatio > 1 || heightRatio > 1) {
|
||||||
|
targetZoom = Math.min(
|
||||||
|
(viewportBounds.width * 0.7) / bounds.width,
|
||||||
|
(viewportBounds.height * 0.7) / bounds.height,
|
||||||
|
0.125
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
targetZoom = Math.min(
|
||||||
|
(viewportBounds.width * 0.8) / bounds.width,
|
||||||
|
(viewportBounds.height * 0.8) / bounds.height,
|
||||||
|
20
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
editor.zoomToBounds(bounds, {
|
||||||
|
targetZoom,
|
||||||
|
inset: widthRatio > 1 || heightRatio > 1 ? 20 : 50,
|
||||||
|
animation: {
|
||||||
|
duration: 400,
|
||||||
|
easing: (t) => t * (2 - t),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Callback with results
|
||||||
|
onResults?.(results)
|
||||||
|
|
||||||
|
return results
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Semantic search error:', error)
|
||||||
|
alert(`Semantic search error: ${error instanceof Error ? error.message : 'Unknown error'}`)
|
||||||
|
return []
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Index the canvas for semantic search
|
||||||
|
* Should be called periodically or when canvas content changes significantly
|
||||||
|
*/
|
||||||
|
export const indexCanvasForSearch = async (
|
||||||
|
editor: Editor,
|
||||||
|
onProgress?: (progress: number) => void
|
||||||
|
): Promise<void> => {
|
||||||
|
semanticSearch.setEditor(editor)
|
||||||
|
await semanticSearch.indexCanvas(onProgress)
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Ask AI about the canvas content
|
||||||
|
*/
|
||||||
|
export const askCanvasAI = async (
|
||||||
|
editor: Editor,
|
||||||
|
question?: string,
|
||||||
|
onToken?: (partial: string, done?: boolean) => void
|
||||||
|
): Promise<string> => {
|
||||||
|
canvasAI.setEditor(editor)
|
||||||
|
|
||||||
|
const query = question || prompt("Ask about the canvas:")
|
||||||
|
if (!query) return ''
|
||||||
|
|
||||||
|
try {
|
||||||
|
const result = await canvasAI.query(query, onToken)
|
||||||
|
|
||||||
|
// If we have relevant shapes, select them
|
||||||
|
if (result.relevantShapes.length > 0) {
|
||||||
|
const shapeIds = result.relevantShapes.map(r => r.shapeId)
|
||||||
|
editor.setSelectedShapes(shapeIds)
|
||||||
|
}
|
||||||
|
|
||||||
|
return result.answer
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Canvas AI error:', error)
|
||||||
|
const errorMsg = `Error: ${error instanceof Error ? error.message : 'Unknown error'}`
|
||||||
|
onToken?.(errorMsg, true)
|
||||||
|
return errorMsg
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get a summary of the canvas
|
||||||
|
*/
|
||||||
|
export const summarizeCanvas = async (
|
||||||
|
editor: Editor,
|
||||||
|
onToken?: (partial: string, done?: boolean) => void
|
||||||
|
): Promise<string> => {
|
||||||
|
canvasAI.setEditor(editor)
|
||||||
|
return canvasAI.summarize(onToken)
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Explain what's visible in the current viewport
|
||||||
|
*/
|
||||||
|
export const explainViewport = async (
|
||||||
|
editor: Editor,
|
||||||
|
onToken?: (partial: string, done?: boolean) => void
|
||||||
|
): Promise<string> => {
|
||||||
|
canvasAI.setEditor(editor)
|
||||||
|
return canvasAI.explainViewport(onToken)
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Find shapes similar to the current selection
|
||||||
|
*/
|
||||||
|
export const findSimilarToSelection = async (
|
||||||
|
editor: Editor
|
||||||
|
): Promise<SemanticSearchResult[]> => {
|
||||||
|
canvasAI.setEditor(editor)
|
||||||
|
|
||||||
|
const results = await canvasAI.getSimilarToSelected(5)
|
||||||
|
|
||||||
|
if (results.length > 0) {
|
||||||
|
// Add similar shapes to selection
|
||||||
|
const currentSelection = editor.getSelectedShapeIds()
|
||||||
|
const newSelection = [...currentSelection, ...results.map(r => r.shapeId)]
|
||||||
|
editor.setSelectedShapes(newSelection)
|
||||||
|
}
|
||||||
|
|
||||||
|
return results
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Clean up stale embeddings
|
||||||
|
*/
|
||||||
|
export const cleanupSearchIndex = async (editor: Editor): Promise<number> => {
|
||||||
|
semanticSearch.setEditor(editor)
|
||||||
|
return semanticSearch.cleanupStaleEmbeddings()
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Clear all search index data
|
||||||
|
*/
|
||||||
|
export const clearSearchIndex = async (): Promise<void> => {
|
||||||
|
return semanticSearch.clearIndex()
|
||||||
|
}
|
||||||
|
|
@ -33,7 +33,8 @@ export function resolveOverlaps(editor: Editor, shapeId: string): void {
|
||||||
const customShapeTypes = [
|
const customShapeTypes = [
|
||||||
'ObsNote', 'ObsidianBrowser', 'HolonBrowser', 'VideoChat',
|
'ObsNote', 'ObsidianBrowser', 'HolonBrowser', 'VideoChat',
|
||||||
'Transcription', 'Holon', 'FathomMeetingsBrowser', 'Prompt',
|
'Transcription', 'Holon', 'FathomMeetingsBrowser', 'Prompt',
|
||||||
'Embed', 'Slide', 'Markdown', 'MycrozineTemplate', 'ChatBox'
|
'Embed', 'Slide', 'Markdown', 'MycrozineTemplate', 'ChatBox',
|
||||||
|
'ImageGen', 'VideoGen', 'Multmux'
|
||||||
]
|
]
|
||||||
|
|
||||||
const shape = editor.getShape(shapeId as TLShapeId)
|
const shape = editor.getShape(shapeId as TLShapeId)
|
||||||
|
|
@ -120,7 +121,8 @@ export function findNonOverlappingPosition(
|
||||||
const customShapeTypes = [
|
const customShapeTypes = [
|
||||||
'ObsNote', 'ObsidianBrowser', 'HolonBrowser', 'VideoChat',
|
'ObsNote', 'ObsidianBrowser', 'HolonBrowser', 'VideoChat',
|
||||||
'Transcription', 'Holon', 'FathomMeetingsBrowser', 'Prompt',
|
'Transcription', 'Holon', 'FathomMeetingsBrowser', 'Prompt',
|
||||||
'Embed', 'Slide', 'Markdown', 'MycrozineTemplate', 'ChatBox'
|
'Embed', 'Slide', 'Markdown', 'MycrozineTemplate', 'ChatBox',
|
||||||
|
'ImageGen', 'VideoGen', 'Multmux'
|
||||||
]
|
]
|
||||||
|
|
||||||
const existingShapes = allShapes.filter(
|
const existingShapes = allShapes.filter(
|
||||||
|
|
|
||||||
|
|
@ -1209,7 +1209,7 @@ export class AutomergeDurableObject {
|
||||||
migrationStats.shapeTypes[shapeType] = (migrationStats.shapeTypes[shapeType] || 0) + 1
|
migrationStats.shapeTypes[shapeType] = (migrationStats.shapeTypes[shapeType] || 0) + 1
|
||||||
|
|
||||||
// Track custom shapes (non-standard TLDraw shapes)
|
// Track custom shapes (non-standard TLDraw shapes)
|
||||||
const customShapeTypes = ['ObsNote', 'Holon', 'FathomMeetingsBrowser', 'FathomNote', 'HolonBrowser', 'LocationShare', 'ObsidianBrowser']
|
const customShapeTypes = ['ObsNote', 'Holon', 'FathomMeetingsBrowser', 'FathomNote', 'HolonBrowser', 'ObsidianBrowser', 'ImageGen', 'VideoGen', 'Multmux']
|
||||||
if (customShapeTypes.includes(shapeType)) {
|
if (customShapeTypes.includes(shapeType)) {
|
||||||
migrationStats.customShapes.push(record.id)
|
migrationStats.customShapes.push(record.id)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue