+ This location is shared securely and will expire based on the sender's privacy settings. The location data
+ is stored in a decentralized filesystem and is only accessible via this unique link.
+
+ Your location data is stored securely in your private filesystem. Only people with the share link can view
+ your location, and shares automatically expire based on your settings.
+
+
+
+ )
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/constants/workerUrl.ts b/src/constants/workerUrl.ts
new file mode 100644
index 0000000..e82096b
--- /dev/null
+++ b/src/constants/workerUrl.ts
@@ -0,0 +1,36 @@
+// Environment-based worker URL configuration
+// You can easily switch between environments by changing the WORKER_ENV variable
+
+// Available environments:
+// - 'local': Use local worker running on port 5172
+// - 'dev': Use Cloudflare dev environment (jeffemmett-canvas-automerge-dev)
+// - 'production': Use production environment (jeffemmett-canvas)
+
+const WORKER_ENV = import.meta.env.VITE_WORKER_ENV || 'dev' // Default to dev for testing
+
+const WORKER_URLS = {
+ local: `http://${window.location.hostname}:5172`,
+ dev: "https://jeffemmett-canvas-automerge-dev.jeffemmett.workers.dev",
+ production: "https://jeffemmett-canvas.jeffemmett.workers.dev"
+}
+
+// Main worker URL - automatically switches based on environment
+export const WORKER_URL = WORKER_URLS[WORKER_ENV as keyof typeof WORKER_URLS] || WORKER_URLS.dev
+
+// Legacy support for existing code
+export const LOCAL_WORKER_URL = WORKER_URLS.local
+
+// Helper function to get current environment info
+export const getWorkerInfo = () => ({
+ environment: WORKER_ENV,
+ url: WORKER_URL,
+ isLocal: WORKER_ENV === 'local',
+ isDev: WORKER_ENV === 'dev',
+ isProduction: WORKER_ENV === 'production'
+})
+
+// Log current environment on import (for debugging)
+console.log(`๐ง Worker Environment: ${WORKER_ENV}`)
+console.log(`๐ง Worker URL: ${WORKER_URL}`)
+console.log(`๐ง Available environments: local, dev, production`)
+console.log(`๐ง To switch: Set VITE_WORKER_ENV environment variable or change WORKER_ENV in this file`)
diff --git a/src/context/AuthContext.tsx b/src/context/AuthContext.tsx
index 8a40b7b..9c08fb1 100644
--- a/src/context/AuthContext.tsx
+++ b/src/context/AuthContext.tsx
@@ -26,7 +26,7 @@ const initialSession: Session = {
obsidianVaultName: undefined
};
-const AuthContext = createContext(undefined);
+export const AuthContext = createContext(undefined);
export const AuthProvider: React.FC<{ children: ReactNode }> = ({ children }) => {
const [session, setSessionState] = useState(initialSession);
diff --git a/src/context/AutomergeHandleContext.tsx b/src/context/AutomergeHandleContext.tsx
new file mode 100644
index 0000000..f88e7eb
--- /dev/null
+++ b/src/context/AutomergeHandleContext.tsx
@@ -0,0 +1,27 @@
+import React, { createContext, useContext, ReactNode } from 'react'
+import { DocHandle } from '@automerge/automerge-repo'
+
+interface AutomergeHandleContextType {
+ handle: DocHandle | null
+}
+
+const AutomergeHandleContext = createContext({
+ handle: null,
+})
+
+export const AutomergeHandleProvider: React.FC<{
+ handle: DocHandle | null
+ children: ReactNode
+}> = ({ handle, children }) => {
+ return (
+
+ {children}
+
+ )
+}
+
+export const useAutomergeHandle = (): DocHandle | null => {
+ const context = useContext(AutomergeHandleContext)
+ return context.handle
+}
+
diff --git a/src/css/location.css b/src/css/location.css
new file mode 100644
index 0000000..f624028
--- /dev/null
+++ b/src/css/location.css
@@ -0,0 +1,417 @@
+/* Location Sharing Components Styles */
+
+/* Spinner animation */
+.spinner {
+ width: 20px;
+ height: 20px;
+ border: 2px solid currentColor;
+ border-top-color: transparent;
+ border-radius: 50%;
+ animation: spin 0.6s linear infinite;
+}
+
+@keyframes spin {
+ to {
+ transform: rotate(360deg);
+ }
+}
+
+/* Location Capture */
+.location-capture {
+ width: 100%;
+}
+
+.capture-header h2 {
+ margin-bottom: 0.5rem;
+}
+
+.capture-button {
+ display: flex;
+ align-items: center;
+ justify-content: center;
+}
+
+/* Location Map */
+.location-map-wrapper {
+ width: 100%;
+}
+
+.location-map {
+ width: 100%;
+ min-height: 300px;
+}
+
+.map-info {
+ margin-top: 0.75rem;
+}
+
+.map-loading,
+.map-error {
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ min-height: 300px;
+}
+
+/* Share Settings */
+.share-settings {
+ width: 100%;
+}
+
+.settings-header {
+ margin-bottom: 1rem;
+}
+
+.setting-group {
+ margin-bottom: 1.5rem;
+}
+
+.precision-options {
+ display: flex;
+ flex-direction: column;
+ gap: 0.5rem;
+}
+
+.precision-option {
+ display: flex;
+ align-items: flex-start;
+ gap: 0.75rem;
+ padding: 0.75rem;
+ border-radius: 0.5rem;
+ cursor: pointer;
+ transition: all 0.2s;
+}
+
+.precision-option input[type="radio"] {
+ margin-top: 0.125rem;
+ cursor: pointer;
+}
+
+.privacy-notice {
+ padding: 1rem;
+ border-radius: 0.5rem;
+ background-color: rgba(var(--muted), 0.5);
+}
+
+/* Share Location Flow */
+.share-location {
+ width: 100%;
+ max-width: 56rem;
+ margin: 0 auto;
+ padding: 1.5rem;
+}
+
+.progress-steps {
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ gap: 1rem;
+ margin-bottom: 2rem;
+}
+
+.step-item {
+ display: flex;
+ align-items: center;
+ gap: 0.5rem;
+}
+
+.step-number {
+ width: 2rem;
+ height: 2rem;
+ border-radius: 50%;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ font-size: 0.875rem;
+ font-weight: 500;
+ transition: all 0.2s;
+}
+
+.step-connector {
+ height: 2px;
+ width: 3rem;
+ transition: all 0.2s;
+}
+
+.step-content {
+ width: 100%;
+}
+
+.settings-step {
+ display: flex;
+ flex-direction: column;
+ gap: 1.5rem;
+}
+
+.location-preview {
+ width: 100%;
+}
+
+.settings-actions {
+ display: flex;
+ gap: 0.75rem;
+}
+
+.share-step {
+ display: flex;
+ flex-direction: column;
+ gap: 1.5rem;
+}
+
+.share-success {
+ text-align: center;
+ margin-bottom: 1.5rem;
+}
+
+.share-link-box {
+ background-color: rgba(var(--muted), 0.5);
+ border: 1px solid rgba(var(--border), 1);
+ border-radius: 0.5rem;
+ padding: 1rem;
+}
+
+.share-link-box input {
+ width: 100%;
+ padding: 0.5rem 0.75rem;
+ border: 1px solid rgba(var(--border), 1);
+ border-radius: 0.5rem;
+ background-color: rgba(var(--background), 1);
+ font-size: 0.875rem;
+}
+
+.share-details {
+ background-color: rgba(var(--muted), 0.5);
+ border-radius: 0.5rem;
+ padding: 1rem;
+}
+
+.detail-row {
+ display: flex;
+ justify-content: space-between;
+ font-size: 0.875rem;
+}
+
+/* Location Viewer */
+.location-viewer {
+ width: 100%;
+ max-width: 56rem;
+ margin: 0 auto;
+ padding: 1.5rem;
+}
+
+.viewer-header {
+ margin-bottom: 1.5rem;
+}
+
+.viewer-content {
+ display: flex;
+ flex-direction: column;
+ gap: 1.5rem;
+}
+
+.share-info {
+ background-color: rgba(var(--muted), 0.5);
+ border-radius: 0.5rem;
+ padding: 1rem;
+}
+
+.info-row {
+ display: flex;
+ justify-content: space-between;
+ font-size: 0.875rem;
+ margin-bottom: 0.5rem;
+}
+
+.info-row:last-child {
+ margin-bottom: 0;
+}
+
+/* Location Dashboard */
+.location-dashboard {
+ width: 100%;
+ max-width: 72rem;
+ margin: 0 auto;
+ padding: 1.5rem;
+}
+
+.dashboard-header {
+ margin-bottom: 2rem;
+}
+
+.dashboard-content {
+ width: 100%;
+}
+
+.stats-grid {
+ display: grid;
+ grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
+ gap: 1rem;
+ margin-bottom: 2rem;
+}
+
+.stat-card {
+ background-color: rgba(var(--muted), 0.5);
+ border: 1px solid rgba(var(--border), 1);
+ border-radius: 0.5rem;
+ padding: 1rem;
+}
+
+.stat-label {
+ font-size: 0.875rem;
+ color: rgba(var(--muted-foreground), 1);
+ margin-bottom: 0.25rem;
+}
+
+.stat-value {
+ font-size: 1.875rem;
+ font-weight: 700;
+}
+
+.shares-list {
+ display: flex;
+ flex-direction: column;
+ gap: 1rem;
+}
+
+.share-card {
+ background-color: rgba(var(--background), 1);
+ border-radius: 0.5rem;
+ border: 2px solid rgba(var(--border), 1);
+ transition: all 0.2s;
+}
+
+.share-card-header {
+ display: flex;
+ align-items: flex-start;
+ justify-content: space-between;
+ gap: 1rem;
+ padding: 1rem;
+}
+
+.share-info {
+ flex: 1;
+}
+
+.share-meta {
+ display: flex;
+ flex-direction: column;
+ gap: 0.25rem;
+ font-size: 0.75rem;
+ color: rgba(var(--muted-foreground), 1);
+}
+
+.share-actions {
+ display: flex;
+ gap: 0.5rem;
+}
+
+.share-card-body {
+ padding: 1rem;
+ padding-top: 0;
+ border-top: 1px solid rgba(var(--border), 1);
+ margin-top: 1rem;
+}
+
+.empty-state {
+ display: flex;
+ flex-direction: column;
+ align-items: center;
+ justify-content: center;
+ min-height: 400px;
+ text-align: center;
+}
+
+/* Auth required messages */
+.share-location-auth,
+.location-dashboard-auth {
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ min-height: 400px;
+}
+
+/* Error messages */
+.error-message {
+ background-color: rgba(var(--destructive), 0.1);
+ border: 1px solid rgba(var(--destructive), 0.2);
+ border-radius: 0.5rem;
+ padding: 1rem;
+}
+
+.permission-denied {
+ background-color: rgba(var(--destructive), 0.1);
+ border: 1px solid rgba(var(--destructive), 0.2);
+ border-radius: 0.5rem;
+ padding: 1rem;
+ margin-top: 1rem;
+}
+
+.current-location {
+ background-color: rgba(var(--muted), 0.5);
+ border-radius: 0.5rem;
+ padding: 1rem;
+ margin-top: 1rem;
+}
+
+.location-details {
+ display: flex;
+ flex-direction: column;
+ gap: 0.25rem;
+ font-size: 0.75rem;
+}
+
+/* Responsive adjustments */
+@media (max-width: 768px) {
+ .share-location,
+ .location-viewer,
+ .location-dashboard {
+ padding: 1rem;
+ }
+
+ .progress-steps {
+ flex-wrap: wrap;
+ }
+
+ .step-connector {
+ display: none;
+ }
+
+ .stats-grid {
+ grid-template-columns: 1fr;
+ }
+
+ .share-card-header {
+ flex-direction: column;
+ }
+
+ .share-actions {
+ width: 100%;
+ }
+
+ .share-actions button {
+ flex: 1;
+ }
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/css/obsidian-browser.css b/src/css/obsidian-browser.css
index 4435f6c..f098f41 100644
--- a/src/css/obsidian-browser.css
+++ b/src/css/obsidian-browser.css
@@ -15,6 +15,23 @@
pointer-events: auto; /* Ensure the browser is clickable */
}
+/* Shape mode: remove modal overlay styles */
+.obsidian-browser.shape-mode {
+ position: relative;
+ top: auto;
+ left: auto;
+ right: auto;
+ bottom: auto;
+ background: transparent;
+ z-index: auto;
+ display: flex;
+ align-items: stretch;
+ justify-content: stretch;
+ padding: 0;
+ width: 100%;
+ height: 100%;
+}
+
.obsidian-browser > div {
background: white;
border-radius: 12px;
@@ -39,6 +56,16 @@
overflow-y: auto;
position: relative; /* Allow absolute positioning of close button */
pointer-events: auto; /* Ensure content is clickable */
+ overscroll-behavior: contain;
+}
+
+/* Shape mode: adjust browser-content padding */
+.obsidian-browser.shape-mode .browser-content {
+ padding: 0;
+ padding-top: 0;
+ align-items: stretch;
+ width: 100%;
+ height: 100%;
}
.vault-title {
@@ -46,6 +73,14 @@
margin-bottom: 30px;
}
+/* Shape mode: reduce vault-title margin - hide completely when vault is connected */
+.obsidian-browser.shape-mode .vault-title {
+ margin-bottom: 0;
+ padding: 0;
+ padding-top: 0;
+ display: none; /* Hide completely since vault name is in header */
+}
+
.vault-title h2 {
margin: 0;
font-size: 24px;
@@ -53,6 +88,24 @@
color: #333;
}
+/* Shape mode: hide vault title when vault is connected (vault name is in header) */
+.obsidian-browser.shape-mode .vault-title h2 {
+ display: none;
+}
+
+/* Shape mode: keep vault-connect-section visible when no vault */
+.obsidian-browser.shape-mode .vault-connect-section {
+ display: block;
+ margin-top: 8px;
+}
+
+/* Show vault-title only when no vault is connected */
+.obsidian-browser.shape-mode .vault-title:has(.vault-connect-section) {
+ display: block;
+ padding: 8px 12px;
+ margin-bottom: 8px;
+}
+
.vault-connect-section {
margin-top: 12px;
text-align: center;
@@ -137,12 +190,24 @@
pointer-events: auto; /* Ensure controls are clickable */
}
+/* Shape mode: adjust browser-controls padding - more compact */
+.obsidian-browser.shape-mode .browser-controls {
+ padding: 8px 12px;
+ gap: 8px;
+ border-bottom: 1px solid #e0e0e0;
+}
+
.search-container {
margin-bottom: 20px;
width: 100%;
max-width: none;
}
+/* Shape mode: reduce search-container margin */
+.obsidian-browser.shape-mode .search-container {
+ margin-bottom: 8px;
+}
+
.view-controls {
display: flex;
justify-content: space-between;
@@ -177,6 +242,25 @@
border-color: #007acc;
}
+.disconnect-vault-button {
+ padding: 6px 12px;
+ border: 1px solid #dc3545;
+ background: #dc3545;
+ color: white;
+ border-radius: 4px;
+ cursor: pointer;
+ font-size: 14px;
+ font-weight: 500;
+ transition: all 0.2s;
+ margin-left: 8px;
+}
+
+.disconnect-vault-button:hover {
+ background: #c82333;
+ border-color: #c82333;
+ transform: translateY(-1px);
+}
+
.selection-controls {
display: flex;
gap: 8px;
@@ -300,6 +384,7 @@
flex: 1;
overflow-y: auto;
padding: 0;
+ overscroll-behavior: contain;
}
.notes-display.grid {
@@ -316,6 +401,17 @@
padding: 16px;
}
+/* Shape mode: reduce notes-display padding for more space */
+.obsidian-browser.shape-mode .notes-display.grid {
+ padding: 12px;
+ gap: 12px;
+}
+
+.obsidian-browser.shape-mode .notes-display.list {
+ padding: 12px;
+ gap: 6px;
+}
+
.notes-header {
display: flex;
justify-content: space-between;
@@ -327,6 +423,12 @@
color: #666;
}
+/* Shape mode: reduce notes-header padding */
+.obsidian-browser.shape-mode .notes-header {
+ padding: 8px 12px;
+ font-size: 11px;
+}
+
.last-imported {
font-style: italic;
}
@@ -335,6 +437,7 @@
flex: 1;
overflow-y: auto;
padding: 0;
+ overscroll-behavior: contain;
}
/* Note Cards */
@@ -1002,3 +1105,135 @@ mark {
opacity: 0.5;
cursor: not-allowed;
}
+
+/* Folder Tree Styles */
+.folder-tree-container {
+ width: 100%;
+ height: 100%;
+ overflow-y: auto;
+ padding: 10px;
+ overscroll-behavior: contain;
+}
+
+.folder-tree {
+ font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
+}
+
+.folder-tree-item {
+ margin: 2px 0;
+}
+
+.folder-item {
+ display: flex;
+ align-items: center;
+ padding: 8px 12px;
+ border-radius: 6px;
+ cursor: pointer;
+ transition: all 0.2s ease;
+ user-select: none;
+}
+
+.folder-item:hover {
+ background-color: #f5f5f5;
+}
+
+.folder-item.selected {
+ background-color: #e3f2fd;
+ border: 1px solid #2196f3;
+}
+
+.folder-toggle {
+ background: none;
+ border: none;
+ cursor: pointer;
+ padding: 4px;
+ margin-right: 8px;
+ font-size: 12px;
+ color: #666;
+ transition: color 0.2s ease;
+}
+
+.folder-toggle:hover {
+ color: #333;
+}
+
+.folder-icon {
+ margin-right: 8px;
+ font-size: 16px;
+}
+
+.folder-name {
+ flex: 1;
+ font-weight: 500;
+ color: #333;
+}
+
+.folder-count {
+ font-size: 12px;
+ color: #666;
+ background-color: #f0f0f0;
+ padding: 2px 6px;
+ border-radius: 10px;
+ margin-left: 8px;
+}
+
+.folder-children {
+ margin-left: 0;
+}
+
+.note-item {
+ display: flex;
+ align-items: center;
+ padding: 6px 12px;
+ border-radius: 4px;
+ cursor: pointer;
+ transition: all 0.2s ease;
+ user-select: none;
+ margin: 1px 0;
+}
+
+.note-item:hover {
+ background-color: #f8f9fa;
+}
+
+.note-item.selected {
+ background-color: #e8f5e8;
+ border: 1px solid #4caf50;
+}
+
+.note-icon {
+ margin-right: 8px;
+ font-size: 14px;
+ color: #666;
+}
+
+.note-name {
+ flex: 1;
+ font-size: 14px;
+ color: #333;
+ white-space: nowrap;
+ overflow: hidden;
+ text-overflow: ellipsis;
+}
+
+.no-folder-tree {
+ text-align: center;
+ padding: 40px 20px;
+ color: #666;
+}
+
+/* Tree view specific adjustments */
+.notes-display.tree {
+ height: 100%;
+ overflow: hidden;
+}
+
+.notes-display.tree .folder-tree-container {
+ height: 100%;
+ max-height: 500px;
+ overflow-y: auto;
+ border: 1px solid #e0e0e0;
+ border-radius: 8px;
+ background-color: #fafafa;
+ overscroll-behavior: contain;
+}
diff --git a/src/css/style.css b/src/css/style.css
index 37c23cd..c58949f 100644
--- a/src/css/style.css
+++ b/src/css/style.css
@@ -391,6 +391,19 @@ p:has(+ ol) {
-webkit-tap-highlight-color: transparent;
}
+/* Ensure scrollable elements handle wheel events on the element being hovered */
+[style*="overflow-y: auto"],
+[style*="overflow-y: scroll"],
+[style*="overflow-x: auto"],
+[style*="overflow-x: scroll"],
+[style*="overflow: auto"],
+[style*="overflow: scroll"],
+.overflow-y-auto,
+.overflow-x-auto,
+.overflow-auto {
+ overscroll-behavior: contain;
+}
+
.tl-background {
background-color: transparent;
}
diff --git a/src/css/user-profile.css b/src/css/user-profile.css
index f2a3e18..8869ac4 100644
--- a/src/css/user-profile.css
+++ b/src/css/user-profile.css
@@ -183,7 +183,7 @@
box-shadow: 0 4px 8px rgba(0, 122, 204, 0.3);
}
-.clear-vault-button {
+.disconnect-vault-button {
background: #dc3545;
color: white;
border: none;
@@ -195,7 +195,7 @@
transition: all 0.2s ease;
}
-.clear-vault-button:hover {
+.disconnect-vault-button:hover {
background: #c82333;
transform: translateY(-1px);
}
diff --git a/src/hooks/useAdvancedSpeakerDiarization.ts b/src/hooks/useAdvancedSpeakerDiarization.ts
new file mode 100644
index 0000000..ab299d7
--- /dev/null
+++ b/src/hooks/useAdvancedSpeakerDiarization.ts
@@ -0,0 +1,207 @@
+import { useState, useRef, useCallback, useEffect } from 'react'
+
+interface SpeakerSegment {
+ speaker: string
+ text: string
+ startTime: number
+ endTime: number
+ confidence: number
+}
+
+interface UseAdvancedSpeakerDiarizationOptions {
+ onTranscriptUpdate?: (segments: SpeakerSegment[]) => void
+ onError?: (error: Error) => void
+ maxSpeakers?: number
+ enableRealTime?: boolean
+}
+
+export const useAdvancedSpeakerDiarization = ({
+ onTranscriptUpdate,
+ onError,
+ maxSpeakers = 4,
+ enableRealTime = false
+}: UseAdvancedSpeakerDiarizationOptions = {}) => {
+ const [isProcessing, setIsProcessing] = useState(false)
+ const [speakers, setSpeakers] = useState([])
+ const [segments, setSegments] = useState([])
+ const [isSupported, setIsSupported] = useState(false)
+
+ const audioContextRef = useRef(null)
+ const mediaStreamRef = useRef(null)
+ const processorRef = useRef(null)
+ const audioBufferRef = useRef([])
+
+ // Check if advanced features are supported
+ useEffect(() => {
+ // Check for Web Audio API support
+ const hasWebAudio = !!(window.AudioContext || (window as any).webkitAudioContext)
+ const hasMediaDevices = !!(navigator.mediaDevices && navigator.mediaDevices.getUserMedia)
+
+ setIsSupported(hasWebAudio && hasMediaDevices)
+
+ if (!hasWebAudio) {
+ onError?.(new Error('Web Audio API is not supported'))
+ }
+ if (!hasMediaDevices) {
+ onError?.(new Error('Media Devices API is not supported'))
+ }
+ }, [onError])
+
+ // Simple speaker detection based on audio characteristics
+ const detectSpeakerCharacteristics = useCallback((audioData: Float32Array) => {
+ // Calculate basic audio features
+ const rms = Math.sqrt(audioData.reduce((sum, val) => sum + val * val, 0) / audioData.length)
+ const maxAmplitude = Math.max(...audioData.map(Math.abs))
+ const zeroCrossings = audioData.slice(1).reduce((count, val, i) =>
+ count + (Math.sign(val) !== Math.sign(audioData[i]) ? 1 : 0), 0
+ )
+
+ // Simple speaker identification based on audio characteristics
+ const speakerId = `Speaker_${Math.floor(rms * 1000) % maxSpeakers + 1}`
+
+ return {
+ speakerId,
+ confidence: Math.min(rms * 10, 1), // Simple confidence based on RMS
+ features: {
+ rms,
+ maxAmplitude,
+ zeroCrossings
+ }
+ }
+ }, [maxSpeakers])
+
+ // Process audio data for speaker diarization
+ const processAudioData = useCallback((audioData: Float32Array, timestamp: number) => {
+ if (!enableRealTime) return
+
+ const speakerInfo = detectSpeakerCharacteristics(audioData)
+
+ // Create a simple segment
+ const segment: SpeakerSegment = {
+ speaker: speakerInfo.speakerId,
+ text: '', // Would need transcription integration
+ startTime: timestamp,
+ endTime: timestamp + (audioData.length / 16000), // Assuming 16kHz
+ confidence: speakerInfo.confidence
+ }
+
+ // Update segments
+ setSegments(prev => [...prev, segment])
+
+ // Update speakers list
+ setSpeakers(prev => {
+ if (!prev.includes(speakerInfo.speakerId)) {
+ return [...prev, speakerInfo.speakerId]
+ }
+ return prev
+ })
+
+ onTranscriptUpdate?.([segment])
+ }, [enableRealTime, detectSpeakerCharacteristics, onTranscriptUpdate])
+
+ // Start audio processing
+ const startProcessing = useCallback(async () => {
+ if (!isSupported) {
+ onError?.(new Error('Advanced speaker diarization not supported'))
+ return
+ }
+
+ try {
+ setIsProcessing(true)
+
+ // Get audio stream
+ const stream = await navigator.mediaDevices.getUserMedia({
+ audio: {
+ sampleRate: 16000,
+ channelCount: 1,
+ echoCancellation: true,
+ noiseSuppression: true
+ }
+ })
+
+ mediaStreamRef.current = stream
+
+ // Create audio context
+ const AudioContext = window.AudioContext || (window as any).webkitAudioContext
+ const audioContext = new AudioContext({ sampleRate: 16000 })
+ audioContextRef.current = audioContext
+
+ // Create audio source
+ const source = audioContext.createMediaStreamSource(stream)
+
+ // Create processor for real-time analysis
+ const processor = audioContext.createScriptProcessor(4096, 1, 1)
+ processorRef.current = processor
+
+ processor.onaudioprocess = (event) => {
+ const inputBuffer = event.inputBuffer
+ const audioData = inputBuffer.getChannelData(0)
+ const timestamp = audioContext.currentTime
+
+ processAudioData(audioData, timestamp)
+ }
+
+ // Connect audio nodes
+ source.connect(processor)
+ processor.connect(audioContext.destination)
+
+ console.log('๐ค Advanced speaker diarization started')
+
+ } catch (error) {
+ console.error('โ Error starting speaker diarization:', error)
+ onError?.(error as Error)
+ setIsProcessing(false)
+ }
+ }, [isSupported, processAudioData, onError])
+
+ // Stop audio processing
+ const stopProcessing = useCallback(() => {
+ if (mediaStreamRef.current) {
+ mediaStreamRef.current.getTracks().forEach(track => track.stop())
+ mediaStreamRef.current = null
+ }
+
+ if (processorRef.current) {
+ processorRef.current.disconnect()
+ processorRef.current = null
+ }
+
+ if (audioContextRef.current) {
+ audioContextRef.current.close()
+ audioContextRef.current = null
+ }
+
+ setIsProcessing(false)
+ console.log('๐ Advanced speaker diarization stopped')
+ }, [])
+
+ // Cleanup on unmount
+ useEffect(() => {
+ return () => {
+ stopProcessing()
+ }
+ }, [stopProcessing])
+
+ // Format segments as readable text
+ const formatSegmentsAsText = useCallback((segments: SpeakerSegment[]) => {
+ return segments.map(segment =>
+ `${segment.speaker}: ${segment.text}`
+ ).join('\n')
+ }, [])
+
+ return {
+ isProcessing,
+ isSupported,
+ speakers,
+ segments,
+ startProcessing,
+ stopProcessing,
+ formatSegmentsAsText
+ }
+}
+
+export default useAdvancedSpeakerDiarization
+
+
+
+
diff --git a/src/hooks/useWebSpeechTranscription.ts b/src/hooks/useWebSpeechTranscription.ts
new file mode 100644
index 0000000..6014343
--- /dev/null
+++ b/src/hooks/useWebSpeechTranscription.ts
@@ -0,0 +1,335 @@
+import { useState, useRef, useCallback, useEffect } from 'react'
+
+// TypeScript declarations for Web Speech API
+declare global {
+ interface Window {
+ SpeechRecognition: typeof SpeechRecognition
+ webkitSpeechRecognition: typeof SpeechRecognition
+ }
+
+ interface SpeechRecognition extends EventTarget {
+ continuous: boolean
+ interimResults: boolean
+ lang: string
+ maxAlternatives: number
+ start(): void
+ stop(): void
+ onstart: ((this: SpeechRecognition, ev: Event) => any) | null
+ onresult: ((this: SpeechRecognition, ev: SpeechRecognitionEvent) => any) | null
+ onerror: ((this: SpeechRecognition, ev: SpeechRecognitionErrorEvent) => any) | null
+ onend: ((this: SpeechRecognition, ev: Event) => any) | null
+ }
+
+ interface SpeechRecognitionEvent extends Event {
+ resultIndex: number
+ results: SpeechRecognitionResultList
+ }
+
+ interface SpeechRecognitionErrorEvent extends Event {
+ error: string
+ }
+
+ interface SpeechRecognitionResultList {
+ length: number
+ item(index: number): SpeechRecognitionResult
+ [index: number]: SpeechRecognitionResult
+ }
+
+ interface SpeechRecognitionResult {
+ length: number
+ item(index: number): SpeechRecognitionAlternative
+ [index: number]: SpeechRecognitionAlternative
+ isFinal: boolean
+ }
+
+ interface SpeechRecognitionAlternative {
+ transcript: string
+ confidence: number
+ }
+
+ var SpeechRecognition: {
+ prototype: SpeechRecognition
+ new(): SpeechRecognition
+ }
+}
+
+interface UseWebSpeechTranscriptionOptions {
+ onTranscriptUpdate?: (text: string) => void
+ onError?: (error: Error) => void
+ language?: string
+ continuous?: boolean
+ interimResults?: boolean
+}
+
+export const useWebSpeechTranscription = ({
+ onTranscriptUpdate,
+ onError,
+ language = 'en-US',
+ continuous = true,
+ interimResults = true
+}: UseWebSpeechTranscriptionOptions = {}) => {
+ const [isRecording, setIsRecording] = useState(false)
+ const [isTranscribing, setIsTranscribing] = useState(false)
+ const [transcript, setTranscript] = useState('')
+ const [interimTranscript, setInterimTranscript] = useState('')
+ const [isSupported, setIsSupported] = useState(false)
+
+ const recognitionRef = useRef(null)
+ const finalTranscriptRef = useRef('')
+ const interimTranscriptRef = useRef('')
+ const lastSpeechTimeRef = useRef(0)
+ const pauseTimeoutRef = useRef(null)
+ const lastConfidenceRef = useRef(0)
+ const speakerChangeThreshold = 0.3 // Threshold for detecting speaker changes
+
+ // Function to add line breaks after pauses and improve punctuation
+ const processTranscript = useCallback((text: string, isFinal: boolean = false, confidence?: number) => {
+ if (!text.trim()) return text
+
+ let processedText = text.trim()
+
+ // Add punctuation if missing at the end
+ if (isFinal && processedText && !/[.!?]$/.test(processedText)) {
+ processedText += '.'
+ }
+
+ // Add line break if there's been a pause (for final results)
+ if (isFinal) {
+ const now = Date.now()
+ const timeSinceLastSpeech = now - lastSpeechTimeRef.current
+
+ // If more than 3 seconds since last speech, add a line break
+ if (timeSinceLastSpeech > 3000 && lastSpeechTimeRef.current > 0) {
+ processedText = '\n' + processedText
+ }
+
+ lastSpeechTimeRef.current = now
+ }
+
+ return processedText
+ }, [])
+
+ // Function to detect speaker changes based on confidence and timing
+ const detectSpeakerChange = useCallback((confidence: number) => {
+ if (lastConfidenceRef.current === 0) {
+ lastConfidenceRef.current = confidence
+ return false
+ }
+
+ const confidenceDiff = Math.abs(confidence - lastConfidenceRef.current)
+ const now = Date.now()
+ const timeSinceLastSpeech = now - lastSpeechTimeRef.current
+
+ // Detect speaker change if confidence changes significantly and there's been a pause
+ const isSpeakerChange = confidenceDiff > speakerChangeThreshold && timeSinceLastSpeech > 1000
+
+ if (isSpeakerChange) {
+ // Reduced debug logging
+ lastConfidenceRef.current = confidence
+ return true
+ }
+
+ lastConfidenceRef.current = confidence
+ return false
+ }, [speakerChangeThreshold])
+
+ // Function to handle pause detection
+ const handlePauseDetection = useCallback(() => {
+ // Clear existing timeout
+ if (pauseTimeoutRef.current) {
+ clearTimeout(pauseTimeoutRef.current)
+ }
+
+ // Set new timeout for pause detection
+ pauseTimeoutRef.current = setTimeout(() => {
+ const now = Date.now()
+ const timeSinceLastSpeech = now - lastSpeechTimeRef.current
+
+ // If more than 2 seconds of silence, add a line break to interim transcript
+ if (timeSinceLastSpeech > 2000 && lastSpeechTimeRef.current > 0) {
+ const currentTranscript = finalTranscriptRef.current + '\n'
+ setTranscript(currentTranscript)
+ onTranscriptUpdate?.(currentTranscript)
+ // Reduced debug logging
+ }
+ }, 2000) // Check after 2 seconds of silence
+ }, [onTranscriptUpdate])
+
+ // Check if Web Speech API is supported
+ useEffect(() => {
+ const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition
+ if (SpeechRecognition) {
+ setIsSupported(true)
+ // Reduced debug logging
+ } else {
+ setIsSupported(false)
+ console.log('โ Web Speech API is not supported')
+ onError?.(new Error('Web Speech API is not supported in this browser'))
+ }
+ }, [onError])
+
+ // Initialize speech recognition
+ const initializeRecognition = useCallback(() => {
+ if (!isSupported) return null
+
+ const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition
+ const recognition = new SpeechRecognition()
+
+ recognition.continuous = continuous
+ recognition.interimResults = interimResults
+ recognition.lang = language
+ recognition.maxAlternatives = 1
+
+ recognition.onstart = () => {
+ console.log('๐ค Web Speech API started')
+ setIsRecording(true)
+ setIsTranscribing(true)
+ }
+
+ recognition.onresult = (event) => {
+ let interimTranscript = ''
+ let finalTranscript = ''
+
+ // Process all results
+ for (let i = event.resultIndex; i < event.results.length; i++) {
+ const result = event.results[i]
+ const transcript = result[0].transcript
+
+ if (result.isFinal) {
+ finalTranscript += transcript
+ } else {
+ interimTranscript += transcript
+ }
+ }
+
+ // Update final transcript with processing
+ if (finalTranscript) {
+ // Get confidence from the first result
+ const confidence = event.results[event.results.length - 1]?.[0]?.confidence || 0
+
+ // Detect speaker change
+ const isSpeakerChange = detectSpeakerChange(confidence)
+
+ // Add speaker indicator if change detected
+ let speakerPrefix = ''
+ if (isSpeakerChange) {
+ speakerPrefix = '\n[Speaker Change]\n'
+ }
+
+ const processedFinal = processTranscript(finalTranscript, true, confidence)
+ const newText = speakerPrefix + processedFinal
+ finalTranscriptRef.current += newText
+ setTranscript(finalTranscriptRef.current)
+ onTranscriptUpdate?.(newText) // Only send the new text portion
+ console.log(`โ Final transcript: "${processedFinal}" (confidence: ${confidence.toFixed(2)})`)
+
+ // Trigger pause detection
+ handlePauseDetection()
+ }
+
+ // Update interim transcript
+ if (interimTranscript) {
+ const processedInterim = processTranscript(interimTranscript, false)
+ interimTranscriptRef.current = processedInterim
+ setInterimTranscript(processedInterim)
+ console.log(`๐ Interim transcript: "${processedInterim}"`)
+ }
+ }
+
+ recognition.onerror = (event) => {
+ console.error('โ Web Speech API error:', event.error)
+ setIsRecording(false)
+ setIsTranscribing(false)
+ onError?.(new Error(`Speech recognition error: ${event.error}`))
+ }
+
+ recognition.onend = () => {
+ console.log('๐ Web Speech API ended')
+ setIsRecording(false)
+ setIsTranscribing(false)
+ }
+
+ return recognition
+ }, [isSupported, continuous, interimResults, language, onTranscriptUpdate, onError])
+
+ // Start recording
+ const startRecording = useCallback(() => {
+ if (!isSupported) {
+ onError?.(new Error('Web Speech API is not supported'))
+ return
+ }
+
+ try {
+ console.log('๐ค Starting Web Speech API recording...')
+
+ // Don't reset transcripts for continuous transcription - keep existing content
+ // finalTranscriptRef.current = ''
+ // interimTranscriptRef.current = ''
+ // setTranscript('')
+ // setInterimTranscript('')
+ lastSpeechTimeRef.current = 0
+ lastConfidenceRef.current = 0
+
+ // Clear any existing pause timeout
+ if (pauseTimeoutRef.current) {
+ clearTimeout(pauseTimeoutRef.current)
+ pauseTimeoutRef.current = null
+ }
+
+ // Initialize and start recognition
+ const recognition = initializeRecognition()
+ if (recognition) {
+ recognitionRef.current = recognition
+ recognition.start()
+ }
+ } catch (error) {
+ console.error('โ Error starting Web Speech API:', error)
+ onError?.(error as Error)
+ }
+ }, [isSupported, initializeRecognition, onError])
+
+ // Stop recording
+ const stopRecording = useCallback(() => {
+ if (recognitionRef.current) {
+ console.log('๐ Stopping Web Speech API recording...')
+ recognitionRef.current.stop()
+ recognitionRef.current = null
+ }
+ }, [])
+
+ // Cleanup
+ const cleanup = useCallback(() => {
+ if (recognitionRef.current) {
+ recognitionRef.current.stop()
+ recognitionRef.current = null
+ }
+
+ // Clear pause timeout
+ if (pauseTimeoutRef.current) {
+ clearTimeout(pauseTimeoutRef.current)
+ pauseTimeoutRef.current = null
+ }
+
+ setIsRecording(false)
+ setIsTranscribing(false)
+ }, [])
+
+ // Cleanup on unmount
+ useEffect(() => {
+ return cleanup
+ }, [cleanup])
+
+ return {
+ isRecording,
+ isTranscribing,
+ transcript,
+ interimTranscript,
+ isSupported,
+ startRecording,
+ stopRecording,
+ cleanup
+ }
+}
+
+// Export as default for compatibility
+export default useWebSpeechTranscription
diff --git a/src/hooks/useWhisperTranscription.ts b/src/hooks/useWhisperTranscription.ts
deleted file mode 100644
index 184899b..0000000
--- a/src/hooks/useWhisperTranscription.ts
+++ /dev/null
@@ -1,329 +0,0 @@
-import { useCallback, useEffect, useRef, useState } from 'react'
-import { getOpenAIConfig, isOpenAIConfigured } from '../lib/clientConfig'
-
-interface UseWhisperTranscriptionOptions {
- apiKey?: string
- onTranscriptUpdate?: (text: string) => void
- onError?: (error: Error) => void
- language?: string
- enableStreaming?: boolean
- removeSilence?: boolean
-}
-
-export const useWhisperTranscription = ({
- apiKey,
- onTranscriptUpdate,
- onError,
- language = 'en',
- enableStreaming: _enableStreaming = true,
- removeSilence: _removeSilence = true
-}: UseWhisperTranscriptionOptions = {}) => {
- const transcriptRef = useRef('')
- const isRecordingRef = useRef(false)
- const mediaRecorderRef = useRef(null)
- const audioChunksRef = useRef([])
- const streamRef = useRef(null)
-
- // Get OpenAI API key from user profile settings
- const openaiConfig = getOpenAIConfig()
- const isConfigured = isOpenAIConfigured()
-
- // Custom state management
- const [recording, setRecording] = useState(false)
- const [speaking, setSpeaking] = useState(false)
- const [transcribing, setTranscribing] = useState(false)
- const [transcript, setTranscript] = useState({ text: '' })
-
- // Custom startRecording implementation
- const startRecording = useCallback(async () => {
- try {
- console.log('๐ค Starting custom recording...')
-
- // Get microphone access
- const stream = await navigator.mediaDevices.getUserMedia({
- audio: {
- echoCancellation: true,
- noiseSuppression: true,
- autoGainControl: true
- }
- })
- streamRef.current = stream
-
- // Debug the audio stream
- console.log('๐ค Audio stream created:', stream)
- console.log('๐ค Audio tracks:', stream.getAudioTracks().length)
- console.log('๐ค Track settings:', stream.getAudioTracks()[0]?.getSettings())
-
- // Set up audio level monitoring
- const audioContext = new AudioContext()
- const analyser = audioContext.createAnalyser()
- const source = audioContext.createMediaStreamSource(stream)
- source.connect(analyser)
- analyser.fftSize = 256
- const bufferLength = analyser.frequencyBinCount
- const dataArray = new Uint8Array(bufferLength)
-
- const checkAudioLevel = () => {
- analyser.getByteFrequencyData(dataArray)
- const average = dataArray.reduce((a, b) => a + b) / bufferLength
- console.log('๐ต Audio level:', average.toFixed(2))
- if (mediaRecorderRef.current?.state === 'recording') {
- requestAnimationFrame(checkAudioLevel)
- }
- }
- checkAudioLevel()
-
- // Create MediaRecorder with fallback options
- let mediaRecorder: MediaRecorder
- const options = [
- { mimeType: 'audio/webm;codecs=opus' },
- { mimeType: 'audio/webm' },
- { mimeType: 'audio/mp4' },
- { mimeType: 'audio/wav' }
- ]
-
- for (const option of options) {
- if (MediaRecorder.isTypeSupported(option.mimeType)) {
- console.log('๐ต Using MIME type:', option.mimeType)
- mediaRecorder = new MediaRecorder(stream, option)
- break
- }
- }
-
- if (!mediaRecorder!) {
- throw new Error('No supported audio format found')
- }
-
- mediaRecorderRef.current = mediaRecorder
- audioChunksRef.current = []
-
- // Handle data available
- mediaRecorder.ondataavailable = (event) => {
- console.log('๐ต Data available event fired!')
- console.log('๐ต Data size:', event.data.size, 'bytes')
- console.log('๐ต MediaRecorder state:', mediaRecorder.state)
- console.log('๐ต Event data type:', event.data.type)
- console.log('๐ต Current chunks count:', audioChunksRef.current.length)
-
- if (event.data.size > 0) {
- audioChunksRef.current.push(event.data)
- console.log('โ Chunk added successfully, total chunks:', audioChunksRef.current.length)
- } else {
- console.log('โ ๏ธ Empty data chunk received - this might be normal for the first chunk')
- }
- }
-
- // Handle MediaRecorder errors
- mediaRecorder.onerror = (event) => {
- console.error('โ MediaRecorder error:', event)
- }
-
- // Handle MediaRecorder state changes
- mediaRecorder.onstart = () => {
- console.log('๐ค MediaRecorder started')
- }
-
- // Handle recording stop
- mediaRecorder.onstop = async () => {
- console.log('๐ Recording stopped, processing audio...')
- console.log('๐ Total chunks collected:', audioChunksRef.current.length)
- console.log('๐ Chunk sizes:', audioChunksRef.current.map(chunk => chunk.size))
- setTranscribing(true)
-
- try {
- // Create audio blob
- const audioBlob = new Blob(audioChunksRef.current, { type: 'audio/webm' })
- console.log('๐ต Audio blob created:', audioBlob.size, 'bytes')
- console.log('๐ต Audio chunks collected:', audioChunksRef.current.length)
- console.log('๐ต Blob type:', audioBlob.type)
-
- if (audioBlob.size === 0) {
- console.error('โ No audio data recorded!')
- console.error('โ Chunks:', audioChunksRef.current)
- console.error('โ Stream active:', streamRef.current?.active)
- console.error('โ Stream tracks:', streamRef.current?.getTracks().length)
- throw new Error('No audio data was recorded. Please check microphone permissions and try again.')
- }
-
- // Transcribe with OpenAI
- const apiKeyToUse = apiKey || openaiConfig?.apiKey
- console.log('๐ Using API key:', apiKeyToUse ? 'present' : 'missing')
- console.log('๐ API key length:', apiKeyToUse?.length || 0)
-
- if (!apiKeyToUse) {
- throw new Error('No OpenAI API key available')
- }
-
- const formData = new FormData()
- formData.append('file', audioBlob, 'recording.webm')
- formData.append('model', 'whisper-1')
- formData.append('language', language)
- formData.append('response_format', 'text')
-
- console.log('๐ค Sending request to OpenAI API...')
- const response = await fetch('https://api.openai.com/v1/audio/transcriptions', {
- method: 'POST',
- headers: {
- 'Authorization': `Bearer ${apiKeyToUse}`,
- },
- body: formData
- })
-
- if (!response.ok) {
- throw new Error(`OpenAI API error: ${response.status} ${response.statusText}`)
- }
-
- const transcriptionText = await response.text()
- console.log('๐ฏ TRANSCRIPTION RESULT:', transcriptionText)
-
- setTranscript({ text: transcriptionText })
- onTranscriptUpdate?.(transcriptionText)
-
- } catch (error) {
- console.error('โ Transcription error:', error)
- onError?.(error as Error)
- } finally {
- setTranscribing(false)
- }
- }
-
- // Start recording with timeslice to get data chunks
- mediaRecorder.start(1000) // 1-second chunks
- setRecording(true)
- isRecordingRef.current = true
- console.log('โ Custom recording started with 1000ms timeslice')
- console.log('๐ค MediaRecorder state after start:', mediaRecorder.state)
- console.log('๐ค MediaRecorder mimeType:', mediaRecorder.mimeType)
-
- // Auto-stop after 10 seconds for testing (increased time)
- setTimeout(() => {
- if (mediaRecorderRef.current && mediaRecorderRef.current.state === 'recording') {
- console.log('โฐ Auto-stopping recording after 10 seconds...')
- mediaRecorderRef.current.stop()
- }
- }, 10000)
-
- // Add a test to check if we're getting any data after 2 seconds
- setTimeout(() => {
- console.log('๐งช 2-second test - chunks collected so far:', audioChunksRef.current.length)
- console.log('๐งช 2-second test - chunk sizes:', audioChunksRef.current.map(chunk => chunk.size))
- console.log('๐งช 2-second test - MediaRecorder state:', mediaRecorderRef.current?.state)
- }, 2000)
-
- } catch (error) {
- console.error('โ Error starting custom recording:', error)
- onError?.(error as Error)
- }
- }, [apiKey, openaiConfig?.apiKey, language, onTranscriptUpdate, onError])
-
- // Custom stopRecording implementation
- const stopRecording = useCallback(async () => {
- try {
- console.log('๐ Stopping custom recording...')
-
- if (mediaRecorderRef.current && mediaRecorderRef.current.state === 'recording') {
- mediaRecorderRef.current.stop()
- }
-
- if (streamRef.current) {
- streamRef.current.getTracks().forEach(track => track.stop())
- streamRef.current = null
- }
-
- setRecording(false)
- isRecordingRef.current = false
- console.log('โ Custom recording stopped')
-
- } catch (error) {
- console.error('โ Error stopping custom recording:', error)
- onError?.(error as Error)
- }
- }, [onError])
-
- // Custom pauseRecording implementation (placeholder)
- const pauseRecording = useCallback(async () => {
- console.log('โธ๏ธ Pause recording not implemented in custom version')
- }, [])
-
- // Update transcript when it changes
- useEffect(() => {
- if (transcript?.text && transcript.text !== transcriptRef.current) {
- console.log('โ New transcript text received:', transcript.text)
- console.log('๐ฏ TRANSCRIPT EMITTED TO CONSOLE:', transcript.text)
- transcriptRef.current = transcript.text
- onTranscriptUpdate?.(transcript.text)
- }
- }, [transcript?.text, onTranscriptUpdate])
-
- // Handle recording state changes
- useEffect(() => {
- isRecordingRef.current = recording
- }, [recording])
-
- // Check if OpenAI is configured
- useEffect(() => {
- if (!isConfigured && !apiKey) {
- onError?.(new Error('OpenAI API key not configured. Please set VITE_OPENAI_API_KEY in your environment variables.'))
- }
- }, [isConfigured, apiKey, onError])
-
- const startTranscription = useCallback(async () => {
- try {
- console.log('๐ค Starting custom Whisper transcription...')
-
- // Check if OpenAI is configured
- if (!isConfigured && !apiKey) {
- console.error('โ No OpenAI API key found')
- onError?.(new Error('OpenAI API key not configured. Please set VITE_OPENAI_API_KEY in your environment variables.'))
- return
- }
-
- await startRecording()
- console.log('โ Custom Whisper transcription started')
-
- } catch (error) {
- console.error('โ Error starting custom Whisper transcription:', error)
- onError?.(error as Error)
- }
- }, [startRecording, onError, apiKey, isConfigured])
-
- const stopTranscription = useCallback(async () => {
- try {
- console.log('๐ Stopping custom Whisper transcription...')
- await stopRecording()
- console.log('โ Custom Whisper transcription stopped')
- } catch (error) {
- console.error('โ Error stopping custom Whisper transcription:', error)
- onError?.(error as Error)
- }
- }, [stopRecording, onError])
-
- const pauseTranscription = useCallback(async () => {
- try {
- console.log('โธ๏ธ Pausing custom Whisper transcription...')
- await pauseRecording()
- console.log('โ Custom Whisper transcription paused')
- } catch (error) {
- console.error('โ Error pausing custom Whisper transcription:', error)
- onError?.(error as Error)
- }
- }, [pauseRecording, onError])
-
- return {
- // State
- isRecording: recording,
- isSpeaking: speaking,
- isTranscribing: transcribing,
- transcript: transcript?.text || '',
-
- // Actions
- startTranscription,
- stopTranscription,
- pauseTranscription,
-
- // Raw functions for advanced usage
- startRecording,
- stopRecording,
- pauseRecording,
- }
-}
\ No newline at end of file
diff --git a/src/hooks/useWhisperTranscriptionSimple.ts b/src/hooks/useWhisperTranscriptionSimple.ts
new file mode 100644
index 0000000..1be6b7c
--- /dev/null
+++ b/src/hooks/useWhisperTranscriptionSimple.ts
@@ -0,0 +1,967 @@
+import { useCallback, useEffect, useRef, useState } from 'react'
+import { pipeline, env } from '@xenova/transformers'
+
+// Configure the transformers library
+env.allowRemoteModels = true
+env.allowLocalModels = false
+env.useBrowserCache = true
+env.useCustomCache = false
+
+// Helper function to detect audio format from blob
+function detectAudioFormat(blob: Blob): Promise {
+ if (blob.type && blob.type !== 'application/octet-stream') {
+ return Promise.resolve(blob.type)
+ }
+
+ // Try to detect from the first few bytes
+ return new Promise((resolve) => {
+ const reader = new FileReader()
+ reader.onload = () => {
+ try {
+ const arrayBuffer = reader.result as ArrayBuffer
+ if (!arrayBuffer || arrayBuffer.byteLength < 4) {
+ resolve('audio/webm;codecs=opus') // Default fallback
+ return
+ }
+
+ const uint8Array = new Uint8Array(arrayBuffer.slice(0, 12))
+
+ // Check for common audio format signatures
+ if (uint8Array[0] === 0x52 && uint8Array[1] === 0x49 && uint8Array[2] === 0x46 && uint8Array[3] === 0x46) {
+ resolve('audio/wav')
+ } else if (uint8Array[0] === 0x4F && uint8Array[1] === 0x67 && uint8Array[2] === 0x67 && uint8Array[3] === 0x53) {
+ resolve('audio/ogg;codecs=opus')
+ } else if (uint8Array[0] === 0x1A && uint8Array[1] === 0x45 && uint8Array[2] === 0xDF && uint8Array[3] === 0xA3) {
+ resolve('audio/webm;codecs=opus')
+ } else {
+ resolve('audio/webm;codecs=opus') // Default fallback
+ }
+ } catch (error) {
+ console.warn('โ ๏ธ Error detecting audio format:', error)
+ resolve('audio/webm;codecs=opus') // Default fallback
+ }
+ }
+ reader.onerror = () => {
+ resolve('audio/webm;codecs=opus') // Default fallback
+ }
+ reader.readAsArrayBuffer(blob.slice(0, 12))
+ })
+}
+
+// Simple resampling function for audio data
+function resampleAudio(audioData: Float32Array, fromSampleRate: number, toSampleRate: number): Float32Array {
+ if (fromSampleRate === toSampleRate) {
+ return audioData
+ }
+
+ // Validate input parameters
+ if (!audioData || audioData.length === 0) {
+ throw new Error('Invalid audio data for resampling')
+ }
+
+ if (fromSampleRate <= 0 || toSampleRate <= 0) {
+ throw new Error('Invalid sample rates for resampling')
+ }
+
+ const ratio = fromSampleRate / toSampleRate
+ const newLength = Math.floor(audioData.length / ratio)
+
+ // Ensure we have a valid length
+ if (newLength <= 0) {
+ throw new Error('Invalid resampled length')
+ }
+
+ const resampled = new Float32Array(newLength)
+
+ for (let i = 0; i < newLength; i++) {
+ const sourceIndex = Math.floor(i * ratio)
+ // Ensure sourceIndex is within bounds
+ if (sourceIndex >= 0 && sourceIndex < audioData.length) {
+ resampled[i] = audioData[sourceIndex]
+ } else {
+ resampled[i] = 0
+ }
+ }
+
+ return resampled
+}
+
+interface ModelOption {
+ name: string
+ options: {
+ quantized: boolean
+ use_browser_cache: boolean
+ use_custom_cache: boolean
+ }
+}
+
+interface UseWhisperTranscriptionOptions {
+ onTranscriptUpdate?: (text: string) => void
+ onError?: (error: Error) => void
+ language?: string
+ enableStreaming?: boolean
+ enableAdvancedErrorHandling?: boolean
+ modelOptions?: ModelOption[]
+ autoInitialize?: boolean // If false, model will only load when startRecording is called
+}
+
+export const useWhisperTranscription = ({
+ onTranscriptUpdate,
+ onError,
+ language = 'en',
+ enableStreaming = false,
+ enableAdvancedErrorHandling = false,
+ modelOptions,
+ autoInitialize = true // Default to true for backward compatibility
+}: UseWhisperTranscriptionOptions = {}) => {
+ const [isRecording, setIsRecording] = useState(false)
+ const [isTranscribing, setIsTranscribing] = useState(false)
+ const [isSpeaking, setIsSpeaking] = useState(false)
+ const [transcript, setTranscript] = useState('')
+ const [modelLoaded, setModelLoaded] = useState(false)
+
+ const transcriberRef = useRef(null)
+ const streamRef = useRef(null)
+ const mediaRecorderRef = useRef(null)
+ const audioChunksRef = useRef([])
+ const isRecordingRef = useRef(false)
+ const transcriptRef = useRef('')
+ const streamingTranscriptRef = useRef('')
+ const periodicTranscriptionRef = useRef(null)
+ const lastTranscriptionTimeRef = useRef(0)
+ const lastSpeechTimeRef = useRef(0)
+ const previousTranscriptLengthRef = useRef(0) // Track previous transcript length for continuous transcription
+
+ // Function to process transcript with line breaks and punctuation
+ const processTranscript = useCallback((text: string, isStreaming: boolean = false) => {
+ if (!text.trim()) return text
+
+ let processedText = text.trim()
+
+ // Add punctuation if missing at the end
+ if (!/[.!?]$/.test(processedText)) {
+ processedText += '.'
+ }
+
+ // Add line break if there's been a pause (for streaming)
+ if (isStreaming) {
+ const now = Date.now()
+ const timeSinceLastSpeech = now - lastSpeechTimeRef.current
+
+ // If more than 3 seconds since last speech, add a line break
+ if (timeSinceLastSpeech > 3000 && lastSpeechTimeRef.current > 0) {
+ processedText = '\n' + processedText
+ }
+
+ lastSpeechTimeRef.current = now
+ }
+
+ return processedText
+ }, [])
+
+ // Initialize transcriber with optional advanced error handling
+ const initializeTranscriber = useCallback(async () => {
+ if (transcriberRef.current) return transcriberRef.current
+
+ try {
+ console.log('๐ค Loading Whisper model...')
+
+ // Check if we're running in a CORS-restricted environment
+ if (typeof window !== 'undefined' && window.location.protocol === 'file:') {
+ console.warn('โ ๏ธ Running from file:// protocol - CORS issues may occur')
+ console.warn('๐ก Consider running from a local development server for better compatibility')
+ }
+
+ if (enableAdvancedErrorHandling && modelOptions) {
+ // Use advanced model loading with fallbacks
+ let transcriber = null
+ let lastError = null
+
+ for (const modelOption of modelOptions) {
+ try {
+ console.log(`๐ Trying model: ${modelOption.name}`)
+ transcriber = await pipeline('automatic-speech-recognition', modelOption.name, {
+ ...modelOption.options,
+ progress_callback: (progress: any) => {
+ if (progress.status === 'downloading') {
+ console.log(`๐ฆ Downloading model: ${progress.file} (${Math.round(progress.progress * 100)}%)`)
+ }
+ }
+ })
+ console.log(`โ Successfully loaded model: ${modelOption.name}`)
+ break
+ } catch (error) {
+ console.warn(`โ ๏ธ Failed to load model ${modelOption.name}:`, error)
+ lastError = error
+ continue
+ }
+ }
+
+ if (!transcriber) {
+ throw lastError || new Error('Failed to load any model')
+ }
+
+ transcriberRef.current = transcriber
+ setModelLoaded(true)
+ return transcriber
+ } else {
+ // Simple model loading (default behavior) with fallback
+ const modelOptions = [
+ 'Xenova/whisper-tiny.en',
+ 'Xenova/whisper-tiny'
+ ]
+
+ let transcriber = null
+ let lastError = null
+
+ for (const modelName of modelOptions) {
+ try {
+ // Reduced debug logging
+
+ const loadPromise = pipeline('automatic-speech-recognition', modelName, {
+ quantized: true,
+ progress_callback: (progress: any) => {
+ if (progress.status === 'downloading') {
+ console.log(`๐ฆ Downloading model: ${progress.file} (${Math.round(progress.progress * 100)}%)`)
+ } else if (progress.status === 'loading') {
+ console.log(`๐ Loading model: ${progress.file}`)
+ }
+ }
+ })
+
+ const timeoutPromise = new Promise((_, reject) =>
+ setTimeout(() => reject(new Error('Model loading timeout')), 60000) // 60 seconds timeout
+ )
+
+ transcriber = await Promise.race([loadPromise, timeoutPromise])
+
+ transcriberRef.current = transcriber
+ setModelLoaded(true)
+ console.log(`โ Whisper model loaded: ${modelName}`)
+
+ return transcriber
+ } catch (error) {
+ // Reduced error logging - only show final error
+ lastError = error
+ continue
+ }
+ }
+
+ // If all models failed, throw the last error
+ throw lastError || new Error('Failed to load any Whisper model')
+ }
+ } catch (error) {
+ console.error('โ Failed to load model:', error)
+ onError?.(error as Error)
+ throw error
+ }
+ }, [onError, enableAdvancedErrorHandling, modelOptions])
+
+ // Handle streaming transcript updates
+ const handleStreamingTranscriptUpdate = useCallback((newText: string) => {
+ if (newText.trim()) {
+ const newTextTrimmed = newText.trim()
+ const currentTranscript = streamingTranscriptRef.current.trim()
+
+ if (currentTranscript === '') {
+ streamingTranscriptRef.current = newTextTrimmed
+ } else {
+ // Check if the new text is already contained in the current transcript
+ if (!currentTranscript.includes(newTextTrimmed)) {
+ streamingTranscriptRef.current = currentTranscript + ' ' + newTextTrimmed
+ } else {
+ // Find the best overlap point to avoid duplicates
+ const words = newTextTrimmed.split(' ')
+ const currentWords = currentTranscript.split(' ')
+
+ let overlapIndex = 0
+ let maxOverlap = 0
+
+ for (let i = 1; i <= Math.min(words.length, currentWords.length); i++) {
+ const currentEnd = currentWords.slice(-i).join(' ')
+ const newStart = words.slice(0, i).join(' ')
+
+ if (currentEnd === newStart && i > maxOverlap) {
+ maxOverlap = i
+ overlapIndex = i
+ }
+ }
+
+ if (overlapIndex > 0 && overlapIndex < words.length) {
+ const newPart = words.slice(overlapIndex).join(' ')
+ streamingTranscriptRef.current = currentTranscript + ' ' + newPart
+ }
+ }
+ }
+
+ const processedTranscript = processTranscript(streamingTranscriptRef.current, true)
+ streamingTranscriptRef.current = processedTranscript
+ setTranscript(processedTranscript)
+
+ // Only send the new portion for continuous transcription
+ const newTextPortion = processedTranscript.substring(previousTranscriptLengthRef.current)
+ if (newTextPortion.trim()) {
+ onTranscriptUpdate?.(newTextPortion)
+ previousTranscriptLengthRef.current = processedTranscript.length
+ }
+
+ console.log(`๐ Real-time transcript updated: "${newTextTrimmed}" -> Total: "${processedTranscript}"`)
+ console.log(`๐ Streaming transcript state updated, calling onTranscriptUpdate with: "${processedTranscript}"`)
+ }
+ }, [onTranscriptUpdate, processTranscript])
+
+ // Process accumulated audio chunks for streaming transcription
+ const processAccumulatedAudioChunks = useCallback(async () => {
+ try {
+ // Throttle transcription requests
+ const now = Date.now()
+ if (now - (lastTranscriptionTimeRef.current || 0) < 800) { // Reduced to 0.8 seconds for better responsiveness
+ return // Skip if less than 0.8 seconds since last transcription
+ }
+
+ const chunks = audioChunksRef.current || []
+ if (chunks.length === 0 || chunks.length < 2) {
+ console.log(`โ ๏ธ Not enough chunks for real-time processing: ${chunks.length}`)
+ return
+ }
+
+ // Take the last 4-5 chunks for balanced processing (1-2 seconds)
+ const recentChunks = chunks.slice(-5)
+ const validChunks = recentChunks.filter(chunk => chunk && chunk.size > 2000) // Filter out small chunks
+
+ if (validChunks.length < 2) {
+ console.log(`โ ๏ธ Not enough valid chunks for real-time processing: ${validChunks.length}`)
+ return
+ }
+
+ const totalSize = validChunks.reduce((sum, chunk) => sum + chunk.size, 0)
+ if (totalSize < 20000) { // Increased to 20KB for reliable decoding
+ console.log(`โ ๏ธ Not enough audio data for real-time processing: ${totalSize} bytes`)
+ return
+ }
+
+ // Use the MIME type from the MediaRecorder, not individual chunks
+ let mimeType = 'audio/webm;codecs=opus' // Default to WebM
+ if (mediaRecorderRef.current && mediaRecorderRef.current.mimeType) {
+ mimeType = mediaRecorderRef.current.mimeType
+ }
+
+ console.log(`๐ Real-time processing ${validChunks.length} chunks, total size: ${totalSize} bytes, type: ${mimeType}`)
+ console.log(`๐ Chunk sizes:`, validChunks.map(c => c.size))
+ console.log(`๐ Chunk types:`, validChunks.map(c => c.type))
+
+ // Create a more robust blob with proper headers
+ const tempBlob = new Blob(validChunks, { type: mimeType })
+
+ // Validate blob size
+ if (tempBlob.size < 10000) {
+ console.log(`โ ๏ธ Blob too small for processing: ${tempBlob.size} bytes`)
+ return
+ }
+
+ const audioBuffer = await tempBlob.arrayBuffer()
+
+ // Validate audio buffer
+ if (audioBuffer.byteLength < 10000) {
+ console.log(`โ ๏ธ Audio buffer too small: ${audioBuffer.byteLength} bytes`)
+ return
+ }
+
+ const audioContext = new AudioContext()
+ let audioBufferFromBlob: AudioBuffer
+
+ try {
+ // Try to decode the audio buffer
+ audioBufferFromBlob = await audioContext.decodeAudioData(audioBuffer)
+ console.log(`โ Successfully decoded real-time audio buffer: ${audioBufferFromBlob.length} samples`)
+ } catch (decodeError) {
+ console.log('โ ๏ธ Real-time chunk decode failed, trying alternative approach:', decodeError)
+
+ // Try alternative approach: create a new blob with different MIME type
+ try {
+ const alternativeBlob = new Blob(validChunks, { type: 'audio/webm' })
+ const alternativeBuffer = await alternativeBlob.arrayBuffer()
+ audioBufferFromBlob = await audioContext.decodeAudioData(alternativeBuffer)
+ console.log(`โ Successfully decoded with alternative approach: ${audioBufferFromBlob.length} samples`)
+ } catch (altError) {
+ console.log('โ ๏ธ Alternative decode also failed, skipping:', altError)
+ await audioContext.close()
+ return
+ }
+ }
+
+ await audioContext.close()
+
+ const audioData = audioBufferFromBlob.getChannelData(0)
+ if (!audioData || audioData.length === 0) {
+ return
+ }
+
+ // Resample if necessary
+ let processedAudioData: Float32Array = audioData
+ if (audioBufferFromBlob.sampleRate !== 16000) {
+ processedAudioData = resampleAudio(audioData as Float32Array, audioBufferFromBlob.sampleRate, 16000)
+ }
+
+ // Check for meaningful audio content
+ const rms = Math.sqrt(processedAudioData.reduce((sum, val) => sum + val * val, 0) / processedAudioData.length)
+ const maxAmplitude = Math.max(...processedAudioData.map(Math.abs))
+ const dynamicRange = maxAmplitude - Math.min(...processedAudioData.map(Math.abs))
+
+ console.log(`๐ Real-time audio analysis: RMS=${rms.toFixed(6)}, Max=${maxAmplitude.toFixed(6)}, Range=${dynamicRange.toFixed(6)}`)
+
+ if (rms < 0.001) {
+ console.log('โ ๏ธ Audio too quiet for transcription (RMS < 0.001)')
+ return // Skip very quiet audio
+ }
+
+ if (dynamicRange < 0.01) {
+ console.log('โ ๏ธ Audio has very low dynamic range, may be mostly noise')
+ return
+ }
+
+ // Ensure reasonable length for real-time processing (max 2 seconds for balanced speed)
+ const maxRealtimeSamples = 32000 // 2 seconds at 16kHz
+ if (processedAudioData.length > maxRealtimeSamples) {
+ processedAudioData = processedAudioData.slice(-maxRealtimeSamples)
+ }
+
+ if (processedAudioData.length < 2000) { // Increased to 2 second minimum for reliable processing
+ return // Skip very short audio
+ }
+
+ console.log(`๐ต Real-time audio: ${processedAudioData.length} samples (${(processedAudioData.length / 16000).toFixed(2)}s)`)
+
+ // Transcribe with parameters optimized for real-time processing
+ const result = await transcriberRef.current(processedAudioData, {
+ language: language,
+ task: 'transcribe',
+ return_timestamps: false,
+ chunk_length_s: 5, // Longer chunks for better context
+ stride_length_s: 2, // Larger stride for better coverage
+ no_speech_threshold: 0.3, // Higher threshold to reduce noise
+ logprob_threshold: -0.8, // More sensitive detection
+ compression_ratio_threshold: 2.0 // More permissive for real-time
+ })
+
+ const transcriptionText = result?.text || ''
+ if (transcriptionText.trim()) {
+ lastTranscriptionTimeRef.current = Date.now()
+ console.log(`โ Real-time transcript: "${transcriptionText.trim()}"`)
+ console.log(`๐ Calling handleStreamingTranscriptUpdate with: "${transcriptionText.trim()}"`)
+ handleStreamingTranscriptUpdate(transcriptionText.trim())
+ } else {
+ console.log('โ ๏ธ No real-time transcription text produced, trying fallback parameters...')
+
+ // Try with more permissive parameters for real-time processing
+ try {
+ const fallbackResult = await transcriberRef.current(processedAudioData, {
+ task: 'transcribe',
+ return_timestamps: false,
+ chunk_length_s: 3, // Shorter chunks for fallback
+ stride_length_s: 1, // Smaller stride for fallback
+ no_speech_threshold: 0.1, // Very low threshold for fallback
+ logprob_threshold: -1.2, // Very sensitive for fallback
+ compression_ratio_threshold: 2.5 // Very permissive for fallback
+ })
+
+ const fallbackText = fallbackResult?.text || ''
+ if (fallbackText.trim()) {
+ console.log(`โ Fallback real-time transcript: "${fallbackText.trim()}"`)
+ lastTranscriptionTimeRef.current = Date.now()
+ handleStreamingTranscriptUpdate(fallbackText.trim())
+ } else {
+ console.log('โ ๏ธ Fallback transcription also produced no text')
+ }
+ } catch (fallbackError) {
+ console.log('โ ๏ธ Fallback transcription failed:', fallbackError)
+ }
+ }
+
+ } catch (error) {
+ console.error('โ Error processing accumulated audio chunks:', error)
+ }
+ }, [handleStreamingTranscriptUpdate, language])
+
+ // Process recorded audio chunks (final processing)
+ const processAudioChunks = useCallback(async () => {
+ if (!transcriberRef.current || audioChunksRef.current.length === 0) {
+ console.log('โ ๏ธ No transcriber or audio chunks to process')
+ return
+ }
+
+ // Ensure model is loaded
+ if (!modelLoaded) {
+ console.log('โ ๏ธ Model not loaded yet, waiting...')
+ try {
+ await initializeTranscriber()
+ } catch (error) {
+ console.error('โ Failed to initialize transcriber:', error)
+ onError?.(error as Error)
+ return
+ }
+ }
+
+ try {
+ setIsTranscribing(true)
+ console.log('๐ Processing final audio chunks...')
+
+ // Create a blob from all chunks with proper MIME type detection
+ let mimeType = 'audio/webm;codecs=opus'
+ if (audioChunksRef.current.length > 0 && audioChunksRef.current[0].type) {
+ mimeType = audioChunksRef.current[0].type
+ }
+
+ // Filter out small chunks that might be corrupted
+ const validChunks = audioChunksRef.current.filter(chunk => chunk && chunk.size > 1000)
+
+ if (validChunks.length === 0) {
+ console.log('โ ๏ธ No valid audio chunks to process')
+ return
+ }
+
+ console.log(`๐ Processing ${validChunks.length} valid chunks out of ${audioChunksRef.current.length} total chunks`)
+
+ const audioBlob = new Blob(validChunks, { type: mimeType })
+
+ // Validate blob size
+ if (audioBlob.size < 10000) {
+ console.log(`โ ๏ธ Audio blob too small for processing: ${audioBlob.size} bytes`)
+ return
+ }
+
+ // Convert blob to array buffer
+ const arrayBuffer = await audioBlob.arrayBuffer()
+
+ // Validate array buffer
+ if (arrayBuffer.byteLength < 10000) {
+ console.log(`โ ๏ธ Audio buffer too small: ${arrayBuffer.byteLength} bytes`)
+ return
+ }
+
+ // Create audio context to convert to Float32Array
+ const audioContext = new AudioContext()
+
+ let audioBuffer: AudioBuffer
+ try {
+ audioBuffer = await audioContext.decodeAudioData(arrayBuffer)
+ console.log(`โ Successfully decoded final audio buffer: ${audioBuffer.length} samples`)
+ } catch (decodeError) {
+ console.error('โ Failed to decode final audio buffer:', decodeError)
+
+ // Try alternative approach with different MIME type
+ try {
+ console.log('๐ Trying alternative MIME type for final processing...')
+ const alternativeBlob = new Blob(validChunks, { type: 'audio/webm' })
+ const alternativeBuffer = await alternativeBlob.arrayBuffer()
+ audioBuffer = await audioContext.decodeAudioData(alternativeBuffer)
+ console.log(`โ Successfully decoded with alternative approach: ${audioBuffer.length} samples`)
+ } catch (altError) {
+ console.error('โ Alternative decode also failed:', altError)
+ await audioContext.close()
+ throw new Error('Failed to decode audio data. The audio format may not be supported or the data may be corrupted.')
+ }
+ }
+
+ await audioContext.close()
+
+ // Get the first channel as Float32Array
+ const audioData = audioBuffer.getChannelData(0)
+
+ console.log(`๐ Audio buffer info: sampleRate=${audioBuffer.sampleRate}, length=${audioBuffer.length}, duration=${audioBuffer.duration}s`)
+ console.log(`๐ Audio data: length=${audioData.length}, first 10 values:`, Array.from(audioData.slice(0, 10)))
+
+ // Check for meaningful audio content
+ const rms = Math.sqrt(audioData.reduce((sum, val) => sum + val * val, 0) / audioData.length)
+ console.log(`๐ Audio RMS level: ${rms.toFixed(6)}`)
+
+ if (rms < 0.001) {
+ console.log('โ ๏ธ Audio appears to be mostly silence (RMS < 0.001)')
+ }
+
+ // Resample if necessary
+ let processedAudioData: Float32Array = audioData
+ if (audioBuffer.sampleRate !== 16000) {
+ console.log(`๐ Resampling from ${audioBuffer.sampleRate}Hz to 16000Hz`)
+ processedAudioData = resampleAudio(audioData as Float32Array, audioBuffer.sampleRate, 16000)
+ }
+
+ console.log(`๐ต Processing audio: ${processedAudioData.length} samples (${(processedAudioData.length / 16000).toFixed(2)}s)`)
+
+ // Check if transcriber is available
+ if (!transcriberRef.current) {
+ console.error('โ Transcriber not available for processing')
+ throw new Error('Transcriber not initialized')
+ }
+
+ console.log('๐ Starting transcription with Whisper model...')
+
+ // Transcribe the audio
+ const result = await transcriberRef.current(processedAudioData, {
+ language: language,
+ task: 'transcribe',
+ return_timestamps: false
+ })
+
+ console.log('๐ Transcription result:', result)
+
+ const newText = result?.text?.trim() || ''
+ if (newText) {
+ const processedText = processTranscript(newText, enableStreaming)
+
+ if (enableStreaming) {
+ // For streaming mode, merge with existing streaming transcript
+ handleStreamingTranscriptUpdate(processedText)
+ } else {
+ // For non-streaming mode, append to existing transcript
+ const currentTranscript = transcriptRef.current
+ const updatedTranscript = currentTranscript ? `${currentTranscript} ${processedText}` : processedText
+
+ transcriptRef.current = updatedTranscript
+ setTranscript(updatedTranscript)
+
+ // Only send the new portion for continuous transcription
+ const newTextPortion = updatedTranscript.substring(previousTranscriptLengthRef.current)
+ if (newTextPortion.trim()) {
+ onTranscriptUpdate?.(newTextPortion)
+ previousTranscriptLengthRef.current = updatedTranscript.length
+ }
+
+ console.log(`โ Transcription: "${processedText}" -> Total: "${updatedTranscript}"`)
+ }
+ } else {
+ console.log('โ ๏ธ No transcription text produced')
+ console.log('๐ Full transcription result object:', result)
+
+ // Try alternative transcription parameters
+ console.log('๐ Trying alternative transcription parameters...')
+ try {
+ const altResult = await transcriberRef.current(processedAudioData, {
+ task: 'transcribe',
+ return_timestamps: false
+ })
+ console.log('๐ Alternative transcription result:', altResult)
+
+ if (altResult?.text?.trim()) {
+ const processedAltText = processTranscript(altResult.text, enableStreaming)
+ console.log('โ Alternative transcription successful:', processedAltText)
+ const currentTranscript = transcriptRef.current
+ const updatedTranscript = currentTranscript ? `${currentTranscript} ${processedAltText}` : processedAltText
+
+ transcriptRef.current = updatedTranscript
+ setTranscript(updatedTranscript)
+
+ // Only send the new portion for continuous transcription
+ const newTextPortion = updatedTranscript.substring(previousTranscriptLengthRef.current)
+ if (newTextPortion.trim()) {
+ onTranscriptUpdate?.(newTextPortion)
+ previousTranscriptLengthRef.current = updatedTranscript.length
+ }
+ }
+ } catch (altError) {
+ console.log('โ ๏ธ Alternative transcription also failed:', altError)
+ }
+ }
+
+ // Clear processed chunks
+ audioChunksRef.current = []
+
+ } catch (error) {
+ console.error('โ Error processing audio:', error)
+ onError?.(error as Error)
+ } finally {
+ setIsTranscribing(false)
+ }
+ }, [transcriberRef, language, onTranscriptUpdate, onError, enableStreaming, handleStreamingTranscriptUpdate, modelLoaded, initializeTranscriber])
+
+ // Start recording
+ const startRecording = useCallback(async () => {
+ try {
+ console.log('๐ค Starting recording...')
+ console.log('๐ enableStreaming in startRecording:', enableStreaming)
+
+ // Ensure model is loaded before starting
+ if (!modelLoaded) {
+ console.log('๐ Model not loaded, initializing...')
+ await initializeTranscriber()
+ }
+
+ // Don't reset transcripts for continuous transcription - keep existing content
+ // transcriptRef.current = ''
+ // streamingTranscriptRef.current = ''
+ // setTranscript('')
+ lastSpeechTimeRef.current = 0
+ audioChunksRef.current = []
+ lastTranscriptionTimeRef.current = 0
+
+ // Clear any existing periodic transcription timer
+ if (periodicTranscriptionRef.current) {
+ clearInterval(periodicTranscriptionRef.current)
+ periodicTranscriptionRef.current = null
+ }
+
+ // Get microphone access
+ const stream = await navigator.mediaDevices.getUserMedia({
+ audio: {
+ echoCancellation: true,
+ noiseSuppression: true,
+ autoGainControl: true,
+ sampleRate: 44100,
+ channelCount: 1
+ }
+ })
+
+ streamRef.current = stream
+
+ // Create MediaRecorder with fallback options
+ let mediaRecorder: MediaRecorder
+ const options = [
+ { mimeType: 'audio/webm;codecs=opus' },
+ { mimeType: 'audio/webm' },
+ { mimeType: 'audio/ogg;codecs=opus' },
+ { mimeType: 'audio/ogg' },
+ { mimeType: 'audio/wav' },
+ { mimeType: 'audio/mp4' }
+ ]
+
+ for (const option of options) {
+ if (MediaRecorder.isTypeSupported(option.mimeType)) {
+ console.log('๐ต Using MIME type:', option.mimeType)
+ mediaRecorder = new MediaRecorder(stream, option)
+ break
+ }
+ }
+
+ if (!mediaRecorder!) {
+ throw new Error('No supported audio format found')
+ }
+
+ // Store the MIME type for later use
+ const mimeType = mediaRecorder.mimeType
+ console.log('๐ต Final MIME type:', mimeType)
+
+ mediaRecorderRef.current = mediaRecorder
+
+ // Handle data available
+ mediaRecorder.ondataavailable = (event) => {
+ if (event.data.size > 0) {
+ // Validate chunk before adding
+ if (event.data.size > 1000) { // Only add chunks with meaningful size
+ audioChunksRef.current.push(event.data)
+ console.log(`๐ฆ Received chunk ${audioChunksRef.current.length}, size: ${event.data.size} bytes, type: ${event.data.type}`)
+
+ // Limit the number of chunks to prevent memory issues
+ if (audioChunksRef.current.length > 20) {
+ audioChunksRef.current = audioChunksRef.current.slice(-15) // Keep last 15 chunks
+ }
+ } else {
+ console.log(`โ ๏ธ Skipping small chunk: ${event.data.size} bytes`)
+ }
+ }
+ }
+
+ // Handle recording stop
+ mediaRecorder.onstop = () => {
+ console.log('๐ Recording stopped, processing audio...')
+ processAudioChunks()
+ }
+
+ // Handle MediaRecorder state changes
+ mediaRecorder.onstart = () => {
+ console.log('๐ค MediaRecorder started')
+ console.log('๐ enableStreaming value:', enableStreaming)
+ setIsRecording(true)
+ isRecordingRef.current = true
+
+ // Start periodic transcription processing for streaming mode
+ if (enableStreaming) {
+ console.log('๐ Starting streaming transcription (every 0.8 seconds)')
+ periodicTranscriptionRef.current = setInterval(() => {
+ console.log('๐ Interval triggered, isRecordingRef.current:', isRecordingRef.current)
+ if (isRecordingRef.current) {
+ console.log('๐ Running periodic streaming transcription...')
+ processAccumulatedAudioChunks()
+ } else {
+ console.log('โ ๏ธ Not running transcription - recording stopped')
+ }
+ }, 800) // Update every 0.8 seconds for better responsiveness
+ } else {
+ console.log('โน๏ธ Streaming transcription disabled - enableStreaming is false')
+ }
+ }
+
+ // Start recording with appropriate timeslice
+ const timeslice = enableStreaming ? 1000 : 2000 // Larger chunks for more stable processing
+ console.log(`๐ต Starting recording with ${timeslice}ms timeslice`)
+ mediaRecorder.start(timeslice)
+ isRecordingRef.current = true
+ setIsRecording(true)
+
+ console.log('โ Recording started - MediaRecorder state:', mediaRecorder.state)
+
+ } catch (error) {
+ console.error('โ Error starting recording:', error)
+ onError?.(error as Error)
+ }
+ }, [processAudioChunks, processAccumulatedAudioChunks, onError, enableStreaming, modelLoaded, initializeTranscriber])
+
+ // Stop recording
+ const stopRecording = useCallback(async () => {
+ try {
+ console.log('๐ Stopping recording...')
+
+ // Clear periodic transcription timer
+ if (periodicTranscriptionRef.current) {
+ clearInterval(periodicTranscriptionRef.current)
+ periodicTranscriptionRef.current = null
+ }
+
+ if (mediaRecorderRef.current && isRecordingRef.current) {
+ mediaRecorderRef.current.stop()
+ }
+
+ if (streamRef.current) {
+ streamRef.current.getTracks().forEach(track => track.stop())
+ streamRef.current = null
+ }
+
+ isRecordingRef.current = false
+ setIsRecording(false)
+
+ console.log('โ Recording stopped')
+
+ } catch (error) {
+ console.error('โ Error stopping recording:', error)
+ onError?.(error as Error)
+ }
+ }, [onError])
+
+ // Pause recording (placeholder for compatibility)
+ const pauseRecording = useCallback(async () => {
+ console.log('โธ๏ธ Pause recording not implemented')
+ }, [])
+
+ // Cleanup function
+ const cleanup = useCallback(() => {
+ console.log('๐งน Cleaning up transcription resources...')
+
+ // Stop recording if active
+ if (isRecordingRef.current) {
+ setIsRecording(false)
+ isRecordingRef.current = false
+ }
+
+ // Clear periodic transcription timer
+ if (periodicTranscriptionRef.current) {
+ clearInterval(periodicTranscriptionRef.current)
+ periodicTranscriptionRef.current = null
+ }
+
+ // Stop MediaRecorder if active
+ if (mediaRecorderRef.current && mediaRecorderRef.current.state === 'recording') {
+ mediaRecorderRef.current.stop()
+ }
+
+ // Stop audio stream
+ if (streamRef.current) {
+ streamRef.current.getTracks().forEach(track => track.stop())
+ streamRef.current = null
+ }
+
+ // Clear chunks
+ audioChunksRef.current = []
+
+ console.log('โ Cleanup completed')
+ }, [])
+
+ // Convenience functions for compatibility
+ const startTranscription = useCallback(async () => {
+ try {
+ console.log('๐ค Starting transcription...')
+
+ // Reset all transcription state for clean start
+ streamingTranscriptRef.current = ''
+ setTranscript('')
+ setIsRecording(false)
+ isRecordingRef.current = false
+ lastTranscriptionTimeRef.current = 0
+
+ // Clear any existing timers
+ if (periodicTranscriptionRef.current) {
+ clearInterval(periodicTranscriptionRef.current)
+ periodicTranscriptionRef.current = null
+ }
+
+ // Initialize the model if not already loaded
+ if (!modelLoaded) {
+ await initializeTranscriber()
+ }
+
+ await startRecording()
+ console.log('โ Transcription started')
+
+ } catch (error) {
+ console.error('โ Error starting transcription:', error)
+ onError?.(error as Error)
+ }
+ }, [startRecording, onError, modelLoaded, initializeTranscriber])
+
+ const stopTranscription = useCallback(async () => {
+ try {
+ console.log('๐ Stopping transcription...')
+ await stopRecording()
+ console.log('โ Transcription stopped')
+ } catch (error) {
+ console.error('โ Error stopping transcription:', error)
+ onError?.(error as Error)
+ }
+ }, [stopRecording, onError])
+
+ const pauseTranscription = useCallback(async () => {
+ try {
+ console.log('โธ๏ธ Pausing transcription...')
+ await pauseRecording()
+ console.log('โ Transcription paused')
+ } catch (error) {
+ console.error('โ Error pausing transcription:', error)
+ onError?.(error as Error)
+ }
+ }, [pauseRecording, onError])
+
+ // Initialize model on mount (only if autoInitialize is true)
+ useEffect(() => {
+ if (autoInitialize) {
+ initializeTranscriber().catch(console.warn)
+ }
+ }, [initializeTranscriber, autoInitialize])
+
+ // Cleanup on unmount
+ useEffect(() => {
+ return () => {
+ cleanup()
+ }
+ }, [cleanup])
+
+ return {
+ // State
+ isRecording,
+ isSpeaking,
+ isTranscribing,
+ transcript,
+ modelLoaded,
+
+ // Actions
+ startTranscription,
+ stopTranscription,
+ pauseTranscription,
+
+ // Raw functions for advanced usage
+ startRecording,
+ stopRecording,
+ pauseRecording,
+ cleanup
+ }
+}
+
+// Export both the new consolidated hook and the old name for backward compatibility
+export const useWhisperTranscriptionSimple = useWhisperTranscription
diff --git a/src/lib/HoloSphereService.ts b/src/lib/HoloSphereService.ts
new file mode 100644
index 0000000..f9e5f86
--- /dev/null
+++ b/src/lib/HoloSphereService.ts
@@ -0,0 +1,443 @@
+import HoloSphere from 'holosphere'
+import * as h3 from 'h3-js'
+
+export interface HolonData {
+ id: string
+ name: string
+ description?: string
+ latitude: number
+ longitude: number
+ resolution: number
+ data: Record
+ timestamp: number
+}
+
+export interface HolonLens {
+ name: string
+ schema?: any
+ data: any[]
+}
+
+export interface HolonConnection {
+ id: string
+ name: string
+ type: 'federation' | 'reference'
+ targetSpace: string
+ status: 'connected' | 'disconnected' | 'error'
+}
+
+export class HoloSphereService {
+ private sphere!: HoloSphere
+ private isInitialized: boolean = false
+ private connections: Map = new Map()
+ private connectionErrorLogged: boolean = false // Track if we've already logged connection errors
+
+ constructor(appName: string = 'canvas-holons', strict: boolean = false, openaiKey?: string) {
+ try {
+ this.sphere = new HoloSphere(appName, strict, openaiKey)
+ this.isInitialized = true
+ console.log('โ HoloSphere service initialized')
+ } catch (error) {
+ console.error('โ Failed to initialize HoloSphere:', error)
+ this.isInitialized = false
+ }
+ }
+
+ async initialize(): Promise {
+ if (!this.isInitialized) {
+ console.error('โ HoloSphere not initialized')
+ return false
+ }
+ return true
+ }
+
+ // Get a holon for specific coordinates and resolution
+ async getHolon(lat: number, lng: number, resolution: number): Promise {
+ if (!this.isInitialized) return ''
+ try {
+ return await this.sphere.getHolon(lat, lng, resolution)
+ } catch (error) {
+ console.error('โ Error getting holon:', error)
+ return ''
+ }
+ }
+
+ // Store data in a holon
+ async putData(holon: string, lens: string, data: any): Promise {
+ if (!this.isInitialized) return false
+ try {
+ await this.sphere.put(holon, lens, data)
+ return true
+ } catch (error) {
+ console.error('โ Error storing data:', error)
+ return false
+ }
+ }
+
+ // Retrieve data from a holon
+ async getData(holon: string, lens: string, key?: string): Promise {
+ if (!this.isInitialized) return null
+ try {
+ if (key) {
+ return await this.sphere.get(holon, lens, key)
+ } else {
+ return await this.sphere.getAll(holon, lens)
+ }
+ } catch (error) {
+ console.error('โ Error retrieving data:', error)
+ return null
+ }
+ }
+
+ // Retrieve data with subscription and timeout (better for Gun's async nature)
+ async getDataWithWait(holon: string, lens: string, timeoutMs: number = 5000): Promise {
+ if (!this.isInitialized) {
+ console.log(`โ ๏ธ HoloSphere not initialized for ${lens}`)
+ return null
+ }
+
+ // Check for WebSocket connection issues
+ // Note: GunDB connection errors appear in browser console, we can't directly detect them
+ // but we can provide better feedback when no data is received
+
+ return new Promise((resolve) => {
+ let resolved = false
+ let collectedData: any = {}
+ let subscriptionActive = false
+
+ console.log(`๐ getDataWithWait: holon=${holon}, lens=${lens}, timeout=${timeoutMs}ms`)
+
+ // Listen for WebSocket errors (they appear in console but we can't catch them directly)
+ // Instead, we'll detect the pattern: subscription never fires + getAll never resolves
+
+ // Set up timeout (increased default to 5 seconds for network sync)
+ const timeout = setTimeout(() => {
+ if (!resolved) {
+ resolved = true
+ const keyCount = Object.keys(collectedData).length
+ const status = subscriptionActive
+ ? '(subscription was active)'
+ : '(subscription never fired - possible WebSocket connection issue)'
+
+ console.log(`โฑ๏ธ Timeout for lens ${lens}, returning collected data:`, keyCount, 'keys', status)
+
+ // If no data and subscription never fired, it's likely a connection issue
+ // Only log this once to avoid console spam
+ if (keyCount === 0 && !subscriptionActive && !this.connectionErrorLogged) {
+ this.connectionErrorLogged = true
+ console.error(`โ GunDB Connection Issue: WebSocket to 'wss://gun.holons.io/gun' is failing`)
+ console.error(`๐ก This prevents loading data from the Holosphere. Possible causes:`)
+ console.error(` โข GunDB server may be down or unreachable`)
+ console.error(` โข Network/firewall blocking WebSocket connections`)
+ console.error(` โข Check browser console for WebSocket connection errors`)
+ console.error(` โข Data will not load until connection is established`)
+ }
+
+ resolve(keyCount > 0 ? collectedData : null)
+ }
+ }, timeoutMs)
+
+ try {
+ // Check if methods exist
+ if (!this.sphere.subscribe) {
+ console.error(`โ sphere.subscribe does not exist`)
+ }
+ if (!this.sphere.getAll) {
+ console.error(`โ sphere.getAll does not exist`)
+ }
+ if (!this.sphere.get) {
+ console.error(`โ sphere.get does not exist`)
+ }
+
+ console.log(`๐ง Attempting to subscribe to ${holon}/${lens}`)
+
+ // Try subscribe if it exists
+ let unsubscribe: (() => void) | undefined = undefined
+ if (this.sphere.subscribe) {
+ try {
+ unsubscribe = this.sphere.subscribe(holon, lens, (data: any, key?: string) => {
+ subscriptionActive = true
+ console.log(`๐ฅ Subscription callback fired for ${lens}:`, { data, key, dataType: typeof data, isObject: typeof data === 'object', isArray: Array.isArray(data) })
+
+ if (data !== null && data !== undefined) {
+ if (key) {
+ // If we have a key, it's a key-value pair
+ collectedData[key] = data
+ console.log(`๐ฅ Added key-value pair: ${key} =`, data)
+ } else if (typeof data === 'object' && !Array.isArray(data)) {
+ // If it's an object, merge it
+ collectedData = { ...collectedData, ...data }
+ console.log(`๐ฅ Merged object data, total keys:`, Object.keys(collectedData).length)
+ } else if (Array.isArray(data)) {
+ // If it's an array, convert to object with indices
+ data.forEach((item, index) => {
+ collectedData[String(index)] = item
+ })
+ console.log(`๐ฅ Converted array to object, total keys:`, Object.keys(collectedData).length)
+ } else {
+ // Primitive value
+ collectedData['value'] = data
+ console.log(`๐ฅ Added primitive value:`, data)
+ }
+
+ console.log(`๐ฅ Current collected data for ${lens}:`, Object.keys(collectedData).length, 'keys')
+ }
+ })
+ console.log(`โ Subscribe called successfully for ${lens}`)
+ } catch (subError) {
+ console.error(`โ Error calling subscribe for ${lens}:`, subError)
+ }
+ }
+
+ // Try getAll if it exists
+ if (this.sphere.getAll) {
+ console.log(`๐ง Attempting getAll for ${holon}/${lens}`)
+ this.sphere.getAll(holon, lens).then((immediateData: any) => {
+ console.log(`๐ฆ getAll returned for ${lens}:`, {
+ data: immediateData,
+ type: typeof immediateData,
+ isObject: typeof immediateData === 'object',
+ isArray: Array.isArray(immediateData),
+ keys: immediateData && typeof immediateData === 'object' ? Object.keys(immediateData).length : 'N/A'
+ })
+
+ if (immediateData !== null && immediateData !== undefined) {
+ if (typeof immediateData === 'object' && !Array.isArray(immediateData)) {
+ collectedData = { ...collectedData, ...immediateData }
+ console.log(`๐ฆ Merged immediate data, total keys:`, Object.keys(collectedData).length)
+ } else if (Array.isArray(immediateData)) {
+ immediateData.forEach((item, index) => {
+ collectedData[String(index)] = item
+ })
+ console.log(`๐ฆ Converted immediate array to object, total keys:`, Object.keys(collectedData).length)
+ } else {
+ collectedData['value'] = immediateData
+ console.log(`๐ฆ Added immediate primitive value`)
+ }
+ }
+
+ // If we have data immediately, resolve early
+ if (Object.keys(collectedData).length > 0 && !resolved) {
+ resolved = true
+ clearTimeout(timeout)
+ if (unsubscribe) unsubscribe()
+ console.log(`โ Resolving early with ${Object.keys(collectedData).length} keys for ${lens}`)
+ resolve(collectedData)
+ }
+ }).catch((error: any) => {
+ console.error(`โ ๏ธ Error getting immediate data for ${lens}:`, error)
+ })
+ } else {
+ // Fallback: try using getData method instead
+ console.log(`๐ง getAll not available, trying getData as fallback for ${lens}`)
+ this.getData(holon, lens).then((fallbackData: any) => {
+ console.log(`๐ฆ getData (fallback) returned for ${lens}:`, fallbackData)
+ if (fallbackData !== null && fallbackData !== undefined) {
+ if (typeof fallbackData === 'object' && !Array.isArray(fallbackData)) {
+ collectedData = { ...collectedData, ...fallbackData }
+ } else {
+ collectedData['value'] = fallbackData
+ }
+ if (Object.keys(collectedData).length > 0 && !resolved) {
+ resolved = true
+ clearTimeout(timeout)
+ if (unsubscribe) unsubscribe()
+ console.log(`โ Resolving with fallback data: ${Object.keys(collectedData).length} keys for ${lens}`)
+ resolve(collectedData)
+ }
+ }
+ }).catch((error: any) => {
+ console.error(`โ ๏ธ Error in fallback getData for ${lens}:`, error)
+ })
+ }
+
+ } catch (error) {
+ console.error(`โ Error setting up subscription for ${lens}:`, error)
+ clearTimeout(timeout)
+ if (!resolved) {
+ resolved = true
+ resolve(null)
+ }
+ }
+ })
+ }
+
+ // Delete data from a holon
+ async deleteData(holon: string, lens: string, key?: string): Promise {
+ if (!this.isInitialized) return false
+ try {
+ if (key) {
+ await this.sphere.delete(holon, lens, key)
+ } else {
+ await this.sphere.deleteAll(holon, lens)
+ }
+ return true
+ } catch (error) {
+ console.error('โ Error deleting data:', error)
+ return false
+ }
+ }
+
+ // Set schema for data validation
+ async setSchema(lens: string, schema: any): Promise {
+ if (!this.isInitialized) return false
+ try {
+ await this.sphere.setSchema(lens, schema)
+ return true
+ } catch (error) {
+ console.error('โ Error setting schema:', error)
+ return false
+ }
+ }
+
+ // Get current schema
+ async getSchema(lens: string): Promise {
+ if (!this.isInitialized) return null
+ try {
+ return await this.sphere.getSchema(lens)
+ } catch (error) {
+ console.error('โ Error getting schema:', error)
+ return null
+ }
+ }
+
+ // Subscribe to changes in a holon
+ subscribe(holon: string, lens: string, callback: (data: any) => void): void {
+ if (!this.isInitialized) return
+ try {
+ this.sphere.subscribe(holon, lens, callback)
+ } catch (error) {
+ console.error('โ Error subscribing to changes:', error)
+ }
+ }
+
+ // Get holon hierarchy (parent and children)
+ getHolonHierarchy(holon: string): { parent?: string; children: string[] } {
+ try {
+ const resolution = h3.getResolution(holon)
+ const parent = resolution > 0 ? h3.cellToParent(holon, resolution - 1) : undefined
+ const children = h3.cellToChildren(holon, resolution + 1)
+ return { parent, children }
+ } catch (error) {
+ console.error('โ Error getting holon hierarchy:', error)
+ return { children: [] }
+ }
+ }
+
+ // Get all scales for a holon (all containing holons)
+ getHolonScalespace(holon: string): string[] {
+ try {
+ return this.sphere.getHolonScalespace(holon)
+ } catch (error) {
+ console.error('โ Error getting holon scalespace:', error)
+ return []
+ }
+ }
+
+ // Federation methods
+ async federate(spaceId1: string, spaceId2: string, password1?: string, password2?: string, bidirectional?: boolean): Promise {
+ if (!this.isInitialized) return false
+ try {
+ await this.sphere.federate(spaceId1, spaceId2, password1, password2, bidirectional)
+ return true
+ } catch (error) {
+ console.error('โ Error federating spaces:', error)
+ return false
+ }
+ }
+
+ async propagate(holon: string, lens: string, data: any, options?: { useReferences?: boolean; targetSpaces?: string[] }): Promise {
+ if (!this.isInitialized) return false
+ try {
+ await this.sphere.propagate(holon, lens, data, options)
+ return true
+ } catch (error) {
+ console.error('โ Error propagating data:', error)
+ return false
+ }
+ }
+
+ // Message federation
+ async federateMessage(originalChatId: string, messageId: string, federatedChatId: string, federatedMessageId: string, type: string): Promise {
+ if (!this.isInitialized) return false
+ try {
+ await this.sphere.federateMessage(originalChatId, messageId, federatedChatId, federatedMessageId, type)
+ return true
+ } catch (error) {
+ console.error('โ Error federating message:', error)
+ return false
+ }
+ }
+
+ async getFederatedMessages(originalChatId: string, messageId: string): Promise {
+ if (!this.isInitialized) return []
+ try {
+ const result = await this.sphere.getFederatedMessages(originalChatId, messageId)
+ return Array.isArray(result) ? result : []
+ } catch (error) {
+ console.error('โ Error getting federated messages:', error)
+ return []
+ }
+ }
+
+ async updateFederatedMessages(originalChatId: string, messageId: string, updateCallback: (chatId: string, messageId: string) => Promise): Promise {
+ if (!this.isInitialized) return false
+ try {
+ await this.sphere.updateFederatedMessages(originalChatId, messageId, updateCallback)
+ return true
+ } catch (error) {
+ console.error('โ Error updating federated messages:', error)
+ return false
+ }
+ }
+
+ // Utility methods for working with coordinates and resolutions
+ static getResolutionName(resolution: number): string {
+ const names = [
+ 'Country', 'State/Province', 'Metropolitan Area', 'City', 'District',
+ 'Neighborhood', 'Block', 'Building', 'Room', 'Desk', 'Chair', 'Point'
+ ]
+ return names[resolution] || `Level ${resolution}`
+ }
+
+ static getResolutionDescription(resolution: number): string {
+ const descriptions = [
+ 'Country level - covers entire countries',
+ 'State/Province level - covers states and provinces',
+ 'Metropolitan area level - covers large urban areas',
+ 'City level - covers individual cities',
+ 'District level - covers city districts',
+ 'Neighborhood level - covers neighborhoods',
+ 'Block level - covers city blocks',
+ 'Building level - covers individual buildings',
+ 'Room level - covers individual rooms',
+ 'Desk level - covers individual desks',
+ 'Chair level - covers individual chairs',
+ 'Point level - covers individual points'
+ ]
+ return descriptions[resolution] || `Geographic level ${resolution}`
+ }
+
+ // Get connection status
+ getConnectionStatus(spaceId: string): HolonConnection | undefined {
+ return this.connections.get(spaceId)
+ }
+
+ // Add connection
+ addConnection(connection: HolonConnection): void {
+ this.connections.set(connection.id, connection)
+ }
+
+ // Remove connection
+ removeConnection(spaceId: string): boolean {
+ return this.connections.delete(spaceId)
+ }
+
+ // Get all connections
+ getAllConnections(): HolonConnection[] {
+ return Array.from(this.connections.values())
+ }
+}
+
+// Create a singleton instance
+export const holosphereService = new HoloSphereService('canvas-holons', false)
diff --git a/src/lib/clientConfig.ts b/src/lib/clientConfig.ts
index f9febde..ca95734 100644
--- a/src/lib/clientConfig.ts
+++ b/src/lib/clientConfig.ts
@@ -106,11 +106,35 @@ export function getGitHubConfig(): { token: string; repo: string; branch: string
*/
export function isOpenAIConfigured(): boolean {
try {
+ // First try to get user-specific API keys if available
+ const session = JSON.parse(localStorage.getItem('session') || '{}')
+ if (session.authed && session.username) {
+ const userApiKeys = localStorage.getItem(`${session.username}_api_keys`)
+ if (userApiKeys) {
+ try {
+ const parsed = JSON.parse(userApiKeys)
+ if (parsed.keys && parsed.keys.openai && parsed.keys.openai.trim() !== '') {
+ return true
+ }
+ } catch (e) {
+ // Continue to fallback
+ }
+ }
+ }
+
+ // Fallback to global API keys
const settings = localStorage.getItem("openai_api_key")
if (settings) {
- const parsed = JSON.parse(settings)
- if (parsed.keys && parsed.keys.openai && parsed.keys.openai.trim() !== '') {
- return true
+ try {
+ const parsed = JSON.parse(settings)
+ if (parsed.keys && parsed.keys.openai && parsed.keys.openai.trim() !== '') {
+ return true
+ }
+ } catch (e) {
+ // If it's not JSON, it might be the old format (just a string)
+ if (settings.startsWith('sk-') && settings.trim() !== '') {
+ return true
+ }
}
}
return false
@@ -125,15 +149,45 @@ export function isOpenAIConfigured(): boolean {
*/
export function getOpenAIConfig(): { apiKey: string } | null {
try {
- const settings = localStorage.getItem("openai_api_key")
- if (settings) {
- const parsed = JSON.parse(settings)
- if (parsed.keys && parsed.keys.openai && parsed.keys.openai.trim() !== '') {
- return { apiKey: parsed.keys.openai }
+ // First try to get user-specific API keys if available
+ const session = JSON.parse(localStorage.getItem('session') || '{}')
+ if (session.authed && session.username) {
+ const userApiKeys = localStorage.getItem(`${session.username}_api_keys`)
+ if (userApiKeys) {
+ try {
+ const parsed = JSON.parse(userApiKeys)
+ if (parsed.keys && parsed.keys.openai && parsed.keys.openai.trim() !== '') {
+ console.log('๐ Found user-specific OpenAI API key')
+ return { apiKey: parsed.keys.openai }
+ }
+ } catch (e) {
+ console.log('๐ Error parsing user-specific API keys:', e)
+ }
}
}
+
+ // Fallback to global API keys
+ const settings = localStorage.getItem("openai_api_key")
+ if (settings) {
+ try {
+ const parsed = JSON.parse(settings)
+ if (parsed.keys && parsed.keys.openai && parsed.keys.openai.trim() !== '') {
+ console.log('๐ Found global OpenAI API key')
+ return { apiKey: parsed.keys.openai }
+ }
+ } catch (e) {
+ // If it's not JSON, it might be the old format (just a string)
+ if (settings.startsWith('sk-') && settings.trim() !== '') {
+ console.log('๐ Found old format OpenAI API key')
+ return { apiKey: settings }
+ }
+ }
+ }
+
+ console.log('๐ No OpenAI API key found')
return null
} catch (e) {
+ console.log('๐ Error getting OpenAI config:', e)
return null
}
}
diff --git a/src/lib/githubQuartzReader.ts b/src/lib/githubQuartzReader.ts
index 8fe618e..a5f14c2 100644
--- a/src/lib/githubQuartzReader.ts
+++ b/src/lib/githubQuartzReader.ts
@@ -53,27 +53,20 @@ export class GitHubQuartzReader {
*/
async getAllNotes(): Promise {
try {
- console.log('๐ Fetching Quartz notes from GitHub...')
- console.log(`๐ Repository: ${this.config.owner}/${this.config.repo}`)
- console.log(`๐ฟ Branch: ${this.config.branch}`)
- console.log(`๐ Content path: ${this.config.contentPath}`)
-
// Get the content directory
const contentFiles = await this.getDirectoryContents(this.config.contentPath || '')
// Filter for Markdown files
- const markdownFiles = contentFiles.filter(file =>
- file.type === 'file' &&
- (file.name.endsWith('.md') || file.name.endsWith('.markdown'))
- )
-
- console.log(`๐ Found ${markdownFiles.length} Markdown files`)
+ const markdownFiles = contentFiles.filter(file => {
+ return file.type === 'file' &&
+ file.name &&
+ (file.name.endsWith('.md') || file.name.endsWith('.markdown'))
+ })
// Fetch content for each file
const notes: QuartzNoteFromGitHub[] = []
for (const file of markdownFiles) {
try {
- console.log(`๐ Fetching content for file: ${file.path}`)
// Get the actual file contents (not just metadata)
const fileWithContent = await this.getFileContents(file.path)
const note = await this.getNoteFromFile(fileWithContent)
@@ -85,7 +78,6 @@ export class GitHubQuartzReader {
}
}
- console.log(`โ Successfully loaded ${notes.length} notes from GitHub`)
return notes
} catch (error) {
console.error('โ Failed to fetch notes from GitHub:', error)
@@ -172,11 +164,10 @@ export class GitHubQuartzReader {
*/
private async getNoteFromFile(file: GitHubFile): Promise {
try {
- console.log(`๐ Processing file: ${file.path}`)
- console.log(`๐ File size: ${file.size} bytes`)
- console.log(`๐ Has content: ${!!file.content}`)
- console.log(`๐ Content length: ${file.content?.length || 0}`)
- console.log(`๐ Encoding: ${file.encoding}`)
+ // Validate file object
+ if (!file || !file.path) {
+ return null
+ }
// Decode base64 content
let content = ''
@@ -189,31 +180,23 @@ export class GitHubQuartzReader {
// Try direct decoding if not base64
content = file.content
}
- console.log(`๐ Decoded content length: ${content.length}`)
- console.log(`๐ Content preview: ${content.substring(0, 200)}...`)
} catch (decodeError) {
- console.error(`๐ Failed to decode content for ${file.path}:`, decodeError)
// Try alternative decoding methods
try {
content = decodeURIComponent(escape(atob(file.content)))
- console.log(`๐ Alternative decode successful, length: ${content.length}`)
} catch (altError) {
- console.error(`๐ Alternative decode also failed:`, altError)
+ console.error(`Failed to decode content for ${file.path}:`, altError)
return null
}
}
- } else {
- console.warn(`๐ No content available for file: ${file.path}`)
- return null
}
// Parse frontmatter and content
const { frontmatter, content: markdownContent } = this.parseMarkdownWithFrontmatter(content)
- console.log(`๐ Parsed markdown content length: ${markdownContent.length}`)
- console.log(`๐ Frontmatter keys: ${Object.keys(frontmatter).join(', ')}`)
// Extract title
- const title = frontmatter.title || this.extractTitleFromPath(file.name) || 'Untitled'
+ const fileName = file.name || file.path.split('/').pop() || 'untitled'
+ const title = frontmatter.title || this.extractTitleFromPath(fileName) || 'Untitled'
// Extract tags
const tags = this.extractTags(frontmatter, markdownContent)
@@ -221,7 +204,7 @@ export class GitHubQuartzReader {
// Generate note ID
const id = this.generateNoteId(file.path, title)
- const result = {
+ return {
id,
title,
content: markdownContent,
@@ -232,9 +215,6 @@ export class GitHubQuartzReader {
htmlUrl: file.html_url,
rawUrl: file.download_url || file.git_url
}
-
- console.log(`๐ Final note: ${title} (${markdownContent.length} chars)`)
- return result
} catch (error) {
console.error(`Failed to parse file ${file.path}:`, error)
return null
@@ -245,8 +225,6 @@ export class GitHubQuartzReader {
* Parse Markdown content with frontmatter
*/
private parseMarkdownWithFrontmatter(content: string): { frontmatter: Record, content: string } {
- console.log(`๐ Parsing markdown with frontmatter, content length: ${content.length}`)
-
// More flexible frontmatter regex that handles different formats
const frontmatterRegex = /^---\s*\r?\n([\s\S]*?)\r?\n---\s*\r?\n([\s\S]*)$/m
const match = content.match(frontmatterRegex)
@@ -255,10 +233,6 @@ export class GitHubQuartzReader {
const frontmatterText = match[1]
const markdownContent = match[2].trim() // Remove leading/trailing whitespace
- console.log(`๐ Found frontmatter, length: ${frontmatterText.length}`)
- console.log(`๐ Markdown content length: ${markdownContent.length}`)
- console.log(`๐ Markdown preview: ${markdownContent.substring(0, 100)}...`)
-
// Parse YAML frontmatter (simplified but more robust)
const frontmatter: Record = {}
const lines = frontmatterText.split(/\r?\n/)
@@ -298,11 +272,9 @@ export class GitHubQuartzReader {
}
}
- console.log(`๐ Parsed frontmatter:`, frontmatter)
return { frontmatter, content: markdownContent }
}
- console.log(`๐ No frontmatter found, using entire content`)
return { frontmatter: {}, content: content.trim() }
}
@@ -310,6 +282,10 @@ export class GitHubQuartzReader {
* Extract title from file path
*/
private extractTitleFromPath(fileName: string): string {
+ if (!fileName) {
+ return 'Untitled'
+ }
+
return fileName
.replace(/\.(md|markdown)$/i, '')
.replace(/[-_]/g, ' ')
diff --git a/src/lib/location/locationStorage.ts b/src/lib/location/locationStorage.ts
new file mode 100644
index 0000000..6fccb1b
--- /dev/null
+++ b/src/lib/location/locationStorage.ts
@@ -0,0 +1,295 @@
+import type FileSystem from '@oddjs/odd/fs/index';
+import * as odd from '@oddjs/odd';
+import type { PrecisionLevel } from './types';
+
+/**
+ * Location data stored in the filesystem
+ */
+export interface LocationData {
+ id: string;
+ userId: string;
+ latitude: number;
+ longitude: number;
+ accuracy: number;
+ timestamp: number;
+ expiresAt: number | null;
+ precision: PrecisionLevel;
+}
+
+/**
+ * Location share metadata
+ */
+export interface LocationShare {
+ id: string;
+ locationId: string;
+ shareToken: string;
+ createdAt: number;
+ expiresAt: number | null;
+ maxViews: number | null;
+ viewCount: number;
+ precision: PrecisionLevel;
+}
+
+/**
+ * Location storage service
+ * Handles storing and retrieving locations from the ODD.js filesystem
+ */
+export class LocationStorageService {
+ private fs: FileSystem;
+ private locationsPath: string[];
+ private sharesPath: string[];
+ private publicSharesPath: string[];
+
+ constructor(fs: FileSystem) {
+ this.fs = fs;
+ // Private storage paths
+ this.locationsPath = ['private', 'locations'];
+ this.sharesPath = ['private', 'location-shares'];
+ // Public reference path for share validation
+ this.publicSharesPath = ['public', 'location-shares'];
+ }
+
+ /**
+ * Initialize directories
+ */
+ async initialize(): Promise {
+ // Ensure private directories exist
+ await this.ensureDirectory(this.locationsPath);
+ await this.ensureDirectory(this.sharesPath);
+ // Ensure public directory for share references
+ await this.ensureDirectory(this.publicSharesPath);
+ }
+
+ /**
+ * Ensure a directory exists
+ */
+ private async ensureDirectory(path: string[]): Promise {
+ try {
+ const dirPath = odd.path.directory(...path);
+ const exists = await this.fs.exists(dirPath as any);
+ if (!exists) {
+ await this.fs.mkdir(dirPath as any);
+ }
+ } catch (error) {
+ console.error('Error ensuring directory:', error);
+ throw error;
+ }
+ }
+
+ /**
+ * Save a location to the filesystem
+ */
+ async saveLocation(location: LocationData): Promise {
+ try {
+ const filePath = odd.path.file(...this.locationsPath, `${location.id}.json`);
+ const content = new TextEncoder().encode(JSON.stringify(location, null, 2));
+ await this.fs.write(filePath as any, content as any);
+ await this.fs.publish();
+ } catch (error) {
+ console.error('Error saving location:', error);
+ throw error;
+ }
+ }
+
+ /**
+ * Get a location by ID
+ */
+ async getLocation(locationId: string): Promise {
+ try {
+ const filePath = odd.path.file(...this.locationsPath, `${locationId}.json`);
+ const exists = await this.fs.exists(filePath as any);
+ if (!exists) {
+ return null;
+ }
+ const content = await this.fs.read(filePath as any);
+ const text = new TextDecoder().decode(content as Uint8Array);
+ return JSON.parse(text) as LocationData;
+ } catch (error) {
+ console.error('Error reading location:', error);
+ return null;
+ }
+ }
+
+ /**
+ * Create a location share
+ */
+ async createShare(share: LocationShare): Promise {
+ try {
+ // Save share metadata in private directory
+ const sharePath = odd.path.file(...this.sharesPath, `${share.id}.json`);
+ const shareContent = new TextEncoder().encode(JSON.stringify(share, null, 2));
+ await this.fs.write(sharePath as any, shareContent as any);
+
+ // Create public reference file for share validation (only token, not full data)
+ const publicSharePath = odd.path.file(...this.publicSharesPath, `${share.shareToken}.json`);
+ const publicShareRef = {
+ shareToken: share.shareToken,
+ shareId: share.id,
+ createdAt: share.createdAt,
+ expiresAt: share.expiresAt,
+ };
+ const publicContent = new TextEncoder().encode(JSON.stringify(publicShareRef, null, 2));
+ await this.fs.write(publicSharePath as any, publicContent as any);
+
+ await this.fs.publish();
+ } catch (error) {
+ console.error('Error creating share:', error);
+ throw error;
+ }
+ }
+
+ /**
+ * Get a share by token
+ */
+ async getShareByToken(shareToken: string): Promise {
+ try {
+ // First check public reference
+ const publicSharePath = odd.path.file(...this.publicSharesPath, `${shareToken}.json`);
+ const publicExists = await this.fs.exists(publicSharePath as any);
+ if (!publicExists) {
+ return null;
+ }
+
+ const publicContent = await this.fs.read(publicSharePath as any);
+ const publicText = new TextDecoder().decode(publicContent as Uint8Array);
+ const publicRef = JSON.parse(publicText);
+
+ // Now get full share from private directory
+ const sharePath = odd.path.file(...this.sharesPath, `${publicRef.shareId}.json`);
+ const shareExists = await this.fs.exists(sharePath as any);
+ if (!shareExists) {
+ return null;
+ }
+
+ const shareContent = await this.fs.read(sharePath as any);
+ const shareText = new TextDecoder().decode(shareContent as Uint8Array);
+ return JSON.parse(shareText) as LocationShare;
+ } catch (error) {
+ console.error('Error reading share:', error);
+ return null;
+ }
+ }
+
+ /**
+ * Get all shares for the current user
+ */
+ async getAllShares(): Promise {
+ try {
+ const dirPath = odd.path.directory(...this.sharesPath);
+ const exists = await this.fs.exists(dirPath as any);
+ if (!exists) {
+ return [];
+ }
+
+ const files = await this.fs.ls(dirPath as any);
+ const shares: LocationShare[] = [];
+
+ for (const fileName of Object.keys(files)) {
+ if (fileName.endsWith('.json')) {
+ const shareId = fileName.replace('.json', '');
+ const share = await this.getShareById(shareId);
+ if (share) {
+ shares.push(share);
+ }
+ }
+ }
+
+ return shares;
+ } catch (error) {
+ console.error('Error listing shares:', error);
+ return [];
+ }
+ }
+
+ /**
+ * Get a share by ID
+ */
+ private async getShareById(shareId: string): Promise {
+ try {
+ const sharePath = odd.path.file(...this.sharesPath, `${shareId}.json`);
+ const exists = await this.fs.exists(sharePath as any);
+ if (!exists) {
+ return null;
+ }
+ const content = await this.fs.read(sharePath as any);
+ const text = new TextDecoder().decode(content as Uint8Array);
+ return JSON.parse(text) as LocationShare;
+ } catch (error) {
+ console.error('Error reading share:', error);
+ return null;
+ }
+ }
+
+ /**
+ * Increment view count for a share
+ */
+ async incrementShareViews(shareId: string): Promise {
+ try {
+ const share = await this.getShareById(shareId);
+ if (!share) {
+ throw new Error('Share not found');
+ }
+
+ share.viewCount += 1;
+ await this.createShare(share); // Re-save the share
+ } catch (error) {
+ console.error('Error incrementing share views:', error);
+ throw error;
+ }
+ }
+}
+
+/**
+ * Obfuscate location based on precision level
+ */
+export function obfuscateLocation(
+ lat: number,
+ lng: number,
+ precision: PrecisionLevel
+): { lat: number; lng: number; radius: number } {
+ let radius = 0;
+
+ switch (precision) {
+ case 'exact':
+ radius = 0;
+ break;
+ case 'street':
+ radius = 100; // ~100m radius
+ break;
+ case 'neighborhood':
+ radius = 1000; // ~1km radius
+ break;
+ case 'city':
+ radius = 10000; // ~10km radius
+ break;
+ }
+
+ if (radius === 0) {
+ return { lat, lng, radius: 0 };
+ }
+
+ // Add random offset within the radius
+ const angle = Math.random() * 2 * Math.PI;
+ const distance = Math.random() * radius;
+
+ // Convert distance to degrees (rough approximation: 1 degree โ 111km)
+ const latOffset = (distance / 111000) * Math.cos(angle);
+ const lngOffset = (distance / (111000 * Math.cos(lat * Math.PI / 180))) * Math.sin(angle);
+
+ return {
+ lat: lat + latOffset,
+ lng: lng + lngOffset,
+ radius,
+ };
+}
+
+/**
+ * Generate a secure share token
+ */
+export function generateShareToken(): string {
+ // Generate a cryptographically secure random token
+ const array = new Uint8Array(32);
+ crypto.getRandomValues(array);
+ return Array.from(array, (byte) => byte.toString(16).padStart(2, '0')).join('');
+}
+
diff --git a/src/lib/location/types.ts b/src/lib/location/types.ts
new file mode 100644
index 0000000..5be56dd
--- /dev/null
+++ b/src/lib/location/types.ts
@@ -0,0 +1,47 @@
+/**
+ * Location sharing types
+ */
+
+export type PrecisionLevel = "exact" | "street" | "neighborhood" | "city";
+
+export interface ShareSettings {
+ duration: number | null; // Duration in milliseconds
+ maxViews: number | null; // Maximum number of views allowed
+ precision: PrecisionLevel; // Precision level for location obfuscation
+}
+
+export interface GeolocationPosition {
+ coords: {
+ latitude: number;
+ longitude: number;
+ accuracy: number;
+ altitude?: number | null;
+ altitudeAccuracy?: number | null;
+ heading?: number | null;
+ speed?: number | null;
+ };
+ timestamp: number;
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/lib/obsidianImporter.ts b/src/lib/obsidianImporter.ts
index b7f4ce8..bbb7d1f 100644
--- a/src/lib/obsidianImporter.ts
+++ b/src/lib/obsidianImporter.ts
@@ -20,12 +20,34 @@ export interface ObsidianObsNote {
vaultPath?: string
}
+export interface FolderNode {
+ name: string
+ path: string
+ children: FolderNode[]
+ notes: ObsidianObsNote[]
+ isExpanded: boolean
+ level: number
+}
+
export interface ObsidianVault {
name: string
path: string
obs_notes: ObsidianObsNote[]
totalObsNotes: number
lastImported: Date
+ folderTree: FolderNode
+}
+
+export interface ObsidianVaultRecord {
+ id: string
+ typeName: 'obsidian_vault'
+ name: string
+ path: string
+ obs_notes: ObsidianObsNote[]
+ totalObsNotes: number
+ lastImported: Date
+ folderTree: FolderNode
+ meta: Record
}
export class ObsidianImporter {
@@ -39,7 +61,6 @@ export class ObsidianImporter {
try {
// For now, we'll simulate this with a demo vault
// In a real implementation, you'd use the File System Access API
- console.log('Importing from directory:', directoryPath)
// Simulate reading files (in real implementation, use File System Access API)
const mockObsNotes = await this.createMockObsNotes()
@@ -49,7 +70,8 @@ export class ObsidianImporter {
path: directoryPath,
obs_notes: mockObsNotes,
totalObsNotes: mockObsNotes.length,
- lastImported: new Date()
+ lastImported: new Date(),
+ folderTree: this.buildFolderTree(mockObsNotes)
}
return this.vault
@@ -64,8 +86,6 @@ export class ObsidianImporter {
*/
async importFromQuartzUrl(quartzUrl: string): Promise {
try {
- console.log('Importing from Quartz URL:', quartzUrl)
-
// Ensure URL has protocol
const url = quartzUrl.startsWith('http') ? quartzUrl : `https://${quartzUrl}`
@@ -73,7 +93,6 @@ export class ObsidianImporter {
const githubConfig = this.getGitHubConfigFromUrl(url)
if (githubConfig) {
- console.log('๐ Using GitHub API to read Quartz content')
const obs_notes = await this.importFromGitHub(githubConfig)
this.vault = {
@@ -81,12 +100,12 @@ export class ObsidianImporter {
path: url,
obs_notes,
totalObsNotes: obs_notes.length,
- lastImported: new Date()
+ lastImported: new Date(),
+ folderTree: this.buildFolderTree(obs_notes)
}
return this.vault
} else {
- console.log('โ ๏ธ No GitHub config found, falling back to web scraping')
// Fallback to the old method
const obs_notes = await this.discoverQuartzContent(url)
@@ -95,7 +114,8 @@ export class ObsidianImporter {
path: url,
obs_notes,
totalObsNotes: obs_notes.length,
- lastImported: new Date()
+ lastImported: new Date(),
+ folderTree: this.buildFolderTree(obs_notes)
}
return this.vault
@@ -129,7 +149,8 @@ export class ObsidianImporter {
path: directoryHandle.name, // File System Access API doesn't expose full path
obs_notes,
totalObsNotes: obs_notes.length,
- lastImported: new Date()
+ lastImported: new Date(),
+ folderTree: this.buildFolderTree(obs_notes)
}
return this.vault
@@ -449,6 +470,163 @@ A collection of creative project ideas and concepts.
return Array.from(allTags).sort()
}
+ /**
+ * Build folder tree structure from obs_notes
+ */
+ buildFolderTree(obs_notes: ObsidianObsNote[]): FolderNode {
+ const root: FolderNode = {
+ name: 'Root',
+ path: '',
+ children: [],
+ notes: [],
+ isExpanded: true,
+ level: 0
+ }
+
+ // Group notes by their folder paths
+ const folderMap = new Map()
+
+ obs_notes.forEach(note => {
+ const pathParts = this.parseFilePath(note.filePath)
+ const folderKey = pathParts.folders.join('/')
+
+ if (!folderMap.has(folderKey)) {
+ folderMap.set(folderKey, { folders: pathParts.folders, notes: [] })
+ }
+ folderMap.get(folderKey)!.notes.push(note)
+ })
+
+ // Build the tree structure
+ folderMap.forEach(({ folders, notes }) => {
+ this.addFolderToTree(root, folders, notes)
+ })
+
+ return root
+ }
+
+ /**
+ * Parse file path into folder structure
+ */
+ private parseFilePath(filePath: string): { folders: string[], fileName: string } {
+ // Handle both local paths and URLs
+ let pathToParse = filePath
+
+ if (filePath.startsWith('http')) {
+ // Extract pathname from URL
+ try {
+ const url = new URL(filePath)
+ pathToParse = url.pathname.replace(/^\//, '')
+ } catch (e) {
+ console.warn('Invalid URL:', filePath)
+ return { folders: [], fileName: filePath }
+ }
+ }
+
+ // Split path and filter out empty parts
+ const parts = pathToParse.split('/').filter(part => part.length > 0)
+
+ if (parts.length === 0) {
+ return { folders: [], fileName: filePath }
+ }
+
+ const fileName = parts[parts.length - 1]
+ const folders = parts.slice(0, -1)
+
+ return { folders, fileName }
+ }
+
+ /**
+ * Add folder to tree structure
+ */
+ private addFolderToTree(root: FolderNode, folderPath: string[], notes: ObsidianObsNote[]): void {
+ let current = root
+
+ for (let i = 0; i < folderPath.length; i++) {
+ const folderName = folderPath[i]
+ let existingFolder = current.children.find(child => child.name === folderName)
+
+ if (!existingFolder) {
+ const currentPath = folderPath.slice(0, i + 1).join('/')
+ existingFolder = {
+ name: folderName,
+ path: currentPath,
+ children: [],
+ notes: [],
+ isExpanded: false,
+ level: i + 1
+ }
+ current.children.push(existingFolder)
+ }
+
+ current = existingFolder
+ }
+
+ // Add notes to the final folder
+ current.notes.push(...notes)
+ }
+
+ /**
+ * Get all notes from a folder tree (recursive)
+ */
+ getAllNotesFromTree(folder: FolderNode): ObsidianObsNote[] {
+ let notes = [...folder.notes]
+
+ folder.children.forEach(child => {
+ notes.push(...this.getAllNotesFromTree(child))
+ })
+
+ return notes
+ }
+
+ /**
+ * Find folder by path in tree
+ */
+ findFolderByPath(root: FolderNode, path: string): FolderNode | null {
+ if (root.path === path) {
+ return root
+ }
+
+ for (const child of root.children) {
+ const found = this.findFolderByPath(child, path)
+ if (found) {
+ return found
+ }
+ }
+
+ return null
+ }
+
+ /**
+ * Convert vault to Automerge record format
+ */
+ vaultToRecord(vault: ObsidianVault): ObsidianVaultRecord {
+ return {
+ id: `obsidian_vault:${vault.name}`,
+ typeName: 'obsidian_vault',
+ name: vault.name,
+ path: vault.path,
+ obs_notes: vault.obs_notes,
+ totalObsNotes: vault.totalObsNotes,
+ lastImported: vault.lastImported,
+ folderTree: vault.folderTree,
+ meta: {}
+ }
+ }
+
+ /**
+ * Convert Automerge record to vault format
+ */
+ recordToVault(record: ObsidianVaultRecord): ObsidianVault {
+ return {
+ name: record.name,
+ path: record.path,
+ obs_notes: record.obs_notes,
+ totalObsNotes: record.totalObsNotes,
+ lastImported: record.lastImported,
+ folderTree: record.folderTree
+ }
+ }
+
/**
* Search notes in the current vault
*/
@@ -501,18 +679,15 @@ A collection of creative project ideas and concepts.
const githubRepo = config.quartzRepo
if (!githubToken || !githubRepo) {
- console.log('โ ๏ธ GitHub credentials not found in configuration')
return null
}
if (githubToken === 'your_github_token_here' || githubRepo === 'your_username/your-quartz-repo') {
- console.log('โ ๏ธ GitHub credentials are still set to placeholder values')
return null
}
const [owner, repo] = githubRepo.split('/')
if (!owner || !repo) {
- console.log('โ ๏ธ Invalid GitHub repository format')
return null
}
@@ -564,15 +739,12 @@ A collection of creative project ideas and concepts.
const currentHasQuotes = obsNote.filePath.includes('"')
if (currentHasQuotes && !existingHasQuotes) {
- console.log(`Keeping existing note without quotes: ${existing.filePath}`)
return // Keep the existing one
} else if (!currentHasQuotes && existingHasQuotes) {
- console.log(`Replacing with note without quotes: ${obsNote.filePath}`)
notesMap.set(obsNote.id, obsNote)
} else {
// Both have or don't have quotes, prefer the one with more content
if (obsNote.content.length > existing.content.length) {
- console.log(`Replacing with longer content: ${obsNote.filePath}`)
notesMap.set(obsNote.id, obsNote)
}
}
@@ -582,7 +754,6 @@ A collection of creative project ideas and concepts.
})
const uniqueNotes = Array.from(notesMap.values())
- console.log(`Imported ${uniqueNotes.length} unique notes from GitHub (${quartzNotes.length} total files processed)`)
return uniqueNotes
} catch (error) {
@@ -598,44 +769,29 @@ A collection of creative project ideas and concepts.
const obs_notes: ObsidianObsNote[] = []
try {
- console.log('๐ Starting Quartz content discovery for:', baseUrl)
-
// Try to find content through common Quartz patterns
const contentUrls = await this.findQuartzContentUrls(baseUrl)
- console.log('๐ Found content URLs:', contentUrls.length)
if (contentUrls.length === 0) {
- console.warn('โ ๏ธ No content URLs found for Quartz site:', baseUrl)
return obs_notes
}
for (const contentUrl of contentUrls) {
try {
- console.log('๐ Fetching content from:', contentUrl)
const response = await fetch(contentUrl)
if (!response.ok) {
- console.warn(`โ ๏ธ Failed to fetch ${contentUrl}: ${response.status} ${response.statusText}`)
continue
}
const content = await response.text()
- console.log('๐ Successfully fetched content, length:', content.length)
-
const obs_note = this.parseQuartzMarkdown(content, contentUrl, baseUrl)
- console.log('๐ Parsed note:', obs_note.title, 'Content length:', obs_note.content.length)
- // Only add notes that have meaningful content
- if (obs_note.content.length > 10) {
- obs_notes.push(obs_note)
- } else {
- console.log('๐ Skipping note with insufficient content:', obs_note.title)
- }
+ // Add all notes regardless of content length
+ obs_notes.push(obs_note)
} catch (error) {
- console.warn(`โ ๏ธ Failed to fetch content from ${contentUrl}:`, error)
+ // Silently skip failed fetches
}
}
-
- console.log('๐ Successfully discovered', obs_notes.length, 'notes from Quartz site')
} catch (error) {
console.warn('โ ๏ธ Failed to discover Quartz content:', error)
}
@@ -660,7 +816,6 @@ A collection of creative project ideas and concepts.
// Look for navigation links and content links in the main page
const discoveredUrls = this.extractContentUrlsFromPage(mainPageContent, baseUrl)
urls.push(...discoveredUrls)
- console.log('๐ Discovered URLs from main page:', discoveredUrls.length)
}
// Try to find a sitemap
@@ -675,7 +830,6 @@ A collection of creative project ideas and concepts.
match.replace(/<\/?loc>/g, '').trim()
).filter(url => url.endsWith('.html') || url.endsWith('.md') || url.includes(baseUrl))
urls.push(...sitemapUrls)
- console.log('๐ Found sitemap with URLs:', sitemapUrls.length)
}
}
} catch (error) {
@@ -702,7 +856,6 @@ A collection of creative project ideas and concepts.
const response = await fetch(url)
if (response.ok) {
urls.push(url)
- console.log('๐ Found content at:', url)
}
} catch (error) {
// Ignore individual path failures
@@ -714,7 +867,6 @@ A collection of creative project ideas and concepts.
// Remove duplicates and limit results
const uniqueUrls = [...new Set(urls)]
- console.log('๐ Total unique URLs found:', uniqueUrls.length)
return uniqueUrls.slice(0, 50) // Limit to 50 pages to avoid overwhelming
}
@@ -905,7 +1057,6 @@ A collection of creative project ideas and concepts.
// If we still don't have much content, try to extract any text from the original HTML
if (text.length < 50) {
- console.log('๐ Content too short, trying fallback extraction...')
let fallbackText = html
// Remove script, style, and other non-content tags
@@ -932,14 +1083,12 @@ A collection of creative project ideas and concepts.
fallbackText = fallbackText.trim()
if (fallbackText.length > text.length) {
- console.log('๐ Fallback extraction found more content:', fallbackText.length)
text = fallbackText
}
}
// Final fallback: if we still don't have content, try to extract any text from the body
if (text.length < 20) {
- console.log('๐ Still no content, trying body text extraction...')
const bodyMatch = html.match(/]*>(.*?)<\/body>/is)
if (bodyMatch) {
let bodyText = bodyMatch[1]
@@ -955,7 +1104,6 @@ A collection of creative project ideas and concepts.
bodyText = bodyText.replace(/\s+/g, ' ').trim()
if (bodyText.length > text.length) {
- console.log('๐ Body text extraction found content:', bodyText.length)
text = bodyText
}
}
diff --git a/src/lib/quartzSync.ts b/src/lib/quartzSync.ts
index 9441daf..d1100ce 100644
--- a/src/lib/quartzSync.ts
+++ b/src/lib/quartzSync.ts
@@ -295,6 +295,21 @@ export function createQuartzNoteFromShape(shape: any): QuartzNote {
const content = shape.props.content || ''
const tags = shape.props.tags || []
+ // Use stored filePath if available to maintain filename consistency
+ // Otherwise, generate from title
+ let filePath: string
+ if (shape.props.filePath && shape.props.filePath.trim() !== '') {
+ filePath = shape.props.filePath
+ // Ensure it ends with .md if it doesn't already
+ if (!filePath.endsWith('.md')) {
+ filePath = filePath.endsWith('/') ? `${filePath}${title}.md` : `${filePath}.md`
+ }
+ } else {
+ // Generate from title, ensuring it's a valid filename
+ const sanitizedTitle = title.replace(/[^a-zA-Z0-9\s-]/g, '').trim().replace(/\s+/g, '-')
+ filePath = `${sanitizedTitle}.md`
+ }
+
return {
id: shape.props.noteId || title,
title,
@@ -306,7 +321,7 @@ export function createQuartzNoteFromShape(shape: any): QuartzNote {
created: new Date().toISOString(),
modified: new Date().toISOString()
},
- filePath: `${title}.md`,
+ filePath: filePath,
lastModified: new Date()
}
}
diff --git a/src/lib/settings.tsx b/src/lib/settings.tsx
index ca8c1a0..32af201 100644
--- a/src/lib/settings.tsx
+++ b/src/lib/settings.tsx
@@ -1,5 +1,5 @@
import { atom } from 'tldraw'
-import { SYSTEM_PROMPT } from '@/prompt'
+import { SYSTEM_PROMPT, CONSTANCE_SYSTEM_PROMPT } from '@/prompt'
export const PROVIDERS = [
{
@@ -13,8 +13,8 @@ export const PROVIDERS = [
id: 'anthropic',
name: 'Anthropic',
models: [
- 'claude-3-5-sonnet-20241022',
- 'claude-3-5-sonnet-20240620',
+ 'claude-sonnet-4-5-20250929',
+ 'claude-sonnet-4-20250522',
'claude-3-opus-20240229',
'claude-3-sonnet-20240229',
'claude-3-haiku-20240307',
@@ -25,6 +25,21 @@ export const PROVIDERS = [
// { id: 'google', name: 'Google', model: 'Gemeni 1.5 Flash', validate: (key: string) => true },
]
+export const AI_PERSONALITIES = [
+ {
+ id: 'web-developer',
+ name: 'Web Developer',
+ description: 'Expert web developer for building prototypes from wireframes',
+ systemPrompt: SYSTEM_PROMPT,
+ },
+ {
+ id: 'constance',
+ name: 'Constance',
+ description: 'Avatar of the US Constitution - helps understand constitutional principles',
+ systemPrompt: CONSTANCE_SYSTEM_PROMPT,
+ },
+]
+
export const makeRealSettings = atom('make real settings', {
provider: 'openai' as (typeof PROVIDERS)[number]['id'] | 'all',
models: Object.fromEntries(PROVIDERS.map((provider) => [provider.id, provider.models[0]])),
@@ -33,6 +48,7 @@ export const makeRealSettings = atom('make real settings', {
anthropic: '',
google: '',
},
+ personality: 'web-developer' as (typeof AI_PERSONALITIES)[number]['id'],
prompts: {
system: SYSTEM_PROMPT,
},
@@ -50,6 +66,7 @@ export function applySettingsMigrations(settings: any) {
google: '',
...keys,
},
+ personality: 'web-developer' as (typeof AI_PERSONALITIES)[number]['id'],
prompts: {
system: SYSTEM_PROMPT,
...prompts,
diff --git a/src/lib/testHolon.ts b/src/lib/testHolon.ts
new file mode 100644
index 0000000..97da7b2
--- /dev/null
+++ b/src/lib/testHolon.ts
@@ -0,0 +1,57 @@
+// Simple test to verify Holon functionality
+import { holosphereService } from './HoloSphereService'
+
+export async function testHolonFunctionality() {
+ console.log('๐งช Testing Holon functionality...')
+
+ try {
+ // Test initialization
+ const isInitialized = await holosphereService.initialize()
+ console.log('โ HoloSphere initialized:', isInitialized)
+
+ if (!isInitialized) {
+ console.log('โ HoloSphere not initialized, skipping tests')
+ return false
+ }
+
+ // Test getting a holon
+ const holonId = await holosphereService.getHolon(40.7128, -74.0060, 7)
+ console.log('โ Got holon ID:', holonId)
+
+ if (holonId) {
+ // Test storing data
+ const testData = {
+ id: 'test-1',
+ content: 'Hello from Holon!',
+ timestamp: Date.now()
+ }
+
+ const storeSuccess = await holosphereService.putData(holonId, 'test', testData)
+ console.log('โ Stored data:', storeSuccess)
+
+ // Test retrieving data
+ const retrievedData = await holosphereService.getData(holonId, 'test')
+ console.log('โ Retrieved data:', retrievedData)
+
+ // Test getting hierarchy
+ const hierarchy = holosphereService.getHolonHierarchy(holonId)
+ console.log('โ Holon hierarchy:', hierarchy)
+
+ // Test getting scalespace
+ const scalespace = holosphereService.getHolonScalespace(holonId)
+ console.log('โ Holon scalespace:', scalespace)
+ }
+
+ console.log('โ All Holon tests passed!')
+ return true
+
+ } catch (error) {
+ console.error('โ Holon test failed:', error)
+ return false
+ }
+}
+
+// Auto-run test when imported
+if (typeof window !== 'undefined') {
+ testHolonFunctionality()
+}
diff --git a/src/prompt.ts b/src/prompt.ts
index 5270b26..a0d15f1 100644
--- a/src/prompt.ts
+++ b/src/prompt.ts
@@ -17,6 +17,32 @@ Your prototype should look and feel much more complete and advanced than the wir
Remember: you love your designers and want them to be happy. The more complete and impressive your prototype, the happier they will be. You are evaluated on 1) whether your prototype resembles the designs, 2) whether your prototype is interactive and responsive, and 3) whether your prototype is complete and impressive.`
+export const CONSTANCE_SYSTEM_PROMPT = `You are Constance, the avatar of the US Constitution. You help people understand the Constitution's life story, its principles, and its aspirations for the future. You speak with the wisdom and authority of the founding document of the United States, while remaining approachable and educational.
+
+When discussing the Constitution:
+- Explain constitutional principles in clear, accessible language
+- Provide historical context for constitutional provisions
+- Help people understand how the Constitution applies to modern issues
+- Share the vision and values that guided the framers
+- Discuss the Constitution's role in protecting individual rights and establishing government structure
+
+You are knowledgeable about:
+- The text and meaning of the Constitution
+- The Bill of Rights and subsequent amendments
+- Constitutional history and the founding era
+- How constitutional principles apply to contemporary issues
+- The balance of powers and federalism
+- Individual rights and civil liberties
+
+Your tone should be:
+- Authoritative yet approachable
+- Educational and informative
+- Respectful of the document's importance
+- Encouraging of civic engagement and understanding
+- Thoughtful about constitutional interpretation
+
+Remember: You represent the living document that has guided American democracy for over two centuries. Help people connect with the Constitution's enduring principles and understand its relevance to their lives today.`
+
export const USER_PROMPT =
'Here are the latest wireframes. Please reply with a high-fidelity working prototype as a single HTML file.'
diff --git a/src/routes/Board.tsx b/src/routes/Board.tsx
index c18cdef..ca48a38 100644
--- a/src/routes/Board.tsx
+++ b/src/routes/Board.tsx
@@ -1,4 +1,5 @@
import { useAutomergeSync } from "@/automerge/useAutomergeSync"
+import { AutomergeHandleProvider } from "@/context/AutomergeHandleContext"
import { useMemo, useEffect, useState } from "react"
import { Tldraw, Editor, TLShapeId } from "tldraw"
import { useParams } from "react-router-dom"
@@ -35,6 +36,15 @@ import { ObsNoteTool } from "@/tools/ObsNoteTool"
import { ObsNoteShape } from "@/shapes/ObsNoteShapeUtil"
import { TranscriptionTool } from "@/tools/TranscriptionTool"
import { TranscriptionShape } from "@/shapes/TranscriptionShapeUtil"
+import { FathomTranscriptTool } from "@/tools/FathomTranscriptTool"
+import { FathomTranscriptShape } from "@/shapes/FathomTranscriptShapeUtil"
+import { HolonTool } from "@/tools/HolonTool"
+import { HolonShape } from "@/shapes/HolonShapeUtil"
+import { FathomMeetingsTool } from "@/tools/FathomMeetingsTool"
+import { HolonBrowserShape } from "@/shapes/HolonBrowserShapeUtil"
+import { ObsidianBrowserShape } from "@/shapes/ObsidianBrowserShapeUtil"
+import { FathomMeetingsBrowserShape } from "@/shapes/FathomMeetingsBrowserShapeUtil"
+import { LocationShareShape } from "@/shapes/LocationShareShapeUtil"
import {
lockElement,
unlockElement,
@@ -57,11 +67,7 @@ import { useAuth } from "../context/AuthContext"
import { updateLastVisited } from "../lib/starredBoards"
import { captureBoardScreenshot } from "../lib/screenshotService"
-// Automatically switch between production and local dev based on environment
-// In development, use the same host as the client to support network access
-export const WORKER_URL = import.meta.env.DEV
- ? `http://${window.location.hostname}:5172`
- : "https://jeffemmett-canvas.jeffemmett.workers.dev"
+import { WORKER_URL } from "../constants/workerUrl"
const customShapeUtils = [
ChatBoxShape,
@@ -74,6 +80,12 @@ const customShapeUtils = [
SharedPianoShape,
ObsNoteShape,
TranscriptionShape,
+ FathomTranscriptShape,
+ HolonShape,
+ HolonBrowserShape,
+ ObsidianBrowserShape,
+ FathomMeetingsBrowserShape,
+ LocationShareShape,
]
const customTools = [
ChatBoxTool,
@@ -87,10 +99,71 @@ const customTools = [
GestureTool,
ObsNoteTool,
TranscriptionTool,
+ FathomTranscriptTool,
+ HolonTool,
+ FathomMeetingsTool,
]
export function Board() {
const { slug } = useParams<{ slug: string }>()
+
+ // Global wheel event handler to ensure scrolling happens on the hovered scrollable element
+ useEffect(() => {
+ const handleWheel = (e: WheelEvent) => {
+ // Use document.elementFromPoint to find the element under the mouse cursor
+ const elementUnderMouse = document.elementFromPoint(e.clientX, e.clientY) as HTMLElement
+ if (!elementUnderMouse) return
+
+ // Walk up the DOM tree from the element under the mouse to find a scrollable element
+ let element: HTMLElement | null = elementUnderMouse
+ while (element && element !== document.body && element !== document.documentElement) {
+ const style = window.getComputedStyle(element)
+ const overflowY = style.overflowY
+ const overflowX = style.overflowX
+ const overflow = style.overflow
+ const isScrollable =
+ (overflowY === 'auto' || overflowY === 'scroll' ||
+ overflowX === 'auto' || overflowX === 'scroll' ||
+ overflow === 'auto' || overflow === 'scroll')
+
+ if (isScrollable) {
+ // Check if the element can actually scroll in the direction of the wheel event
+ const canScrollDown = e.deltaY > 0 && element.scrollTop < element.scrollHeight - element.clientHeight - 1
+ const canScrollUp = e.deltaY < 0 && element.scrollTop > 0
+ const canScrollRight = e.deltaX > 0 && element.scrollLeft < element.scrollWidth - element.clientWidth - 1
+ const canScrollLeft = e.deltaX < 0 && element.scrollLeft > 0
+
+ const canScroll = canScrollDown || canScrollUp || canScrollRight || canScrollLeft
+
+ if (canScroll) {
+ // Verify the mouse is actually over this element
+ const rect = element.getBoundingClientRect()
+ const isOverElement =
+ e.clientX >= rect.left &&
+ e.clientX <= rect.right &&
+ e.clientY >= rect.top &&
+ e.clientY <= rect.bottom
+
+ if (isOverElement) {
+ // Stop propagation to prevent the scroll from affecting parent elements
+ // but don't prevent default - let the browser handle the actual scrolling
+ e.stopPropagation()
+ return
+ }
+ }
+ }
+
+ element = element.parentElement
+ }
+ }
+
+ // Use capture phase to catch events early, before they bubble
+ document.addEventListener('wheel', handleWheel, { passive: true, capture: true })
+
+ return () => {
+ document.removeEventListener('wheel', handleWheel, { capture: true })
+ }
+ }, [])
const roomId = slug || "mycofi33"
const { session } = useAuth()
@@ -129,7 +202,14 @@ export function Board() {
)
// Use Automerge sync for all environments
- const store = useAutomergeSync(storeConfig)
+ const storeWithHandle = useAutomergeSync(storeConfig)
+ const store = {
+ store: storeWithHandle.store,
+ status: storeWithHandle.status,
+ connectionStatus: storeWithHandle.connectionStatus,
+ error: storeWithHandle.error
+ }
+ const automergeHandle = storeWithHandle.handle
const [editor, setEditor] = useState(null)
useEffect(() => {
@@ -154,8 +234,6 @@ export function Board() {
// Debug: Check what shapes the editor can see
- // Temporarily commented out to fix linting errors
- /*
if (editor) {
const editorShapes = editor.getRenderingShapes()
console.log(`๐ Board: Editor can see ${editorShapes.length} shapes for rendering`)
@@ -173,22 +251,6 @@ export function Board() {
y: shape?.y
})
}
- }
- */
-
- // Debug: Check if there are shapes in store that editor can't see
- // Temporarily commented out to fix linting errors
- /*
- if (storeShapes.length > editorShapes.length) {
- const editorShapeIds = new Set(editorShapes.map(s => s.id))
- const missingShapes = storeShapes.filter(s => !editorShapeIds.has(s.id))
- console.warn(`๐ Board: ${missingShapes.length} shapes in store but not visible to editor:`, missingShapes.map(s => ({
- id: s.id,
- type: s.type,
- x: s.x,
- y: s.y,
- parentId: s.parentId
- })))
// Debug: Check current page and page IDs
const currentPageId = editor.getCurrentPageId()
@@ -200,34 +262,46 @@ export function Board() {
name: (p as any).name
})))
- // Check if missing shapes are on a different page
- const shapesOnCurrentPage = missingShapes.filter(s => s.parentId === currentPageId)
- const shapesOnOtherPages = missingShapes.filter(s => s.parentId !== currentPageId)
- console.log(`๐ Board: Missing shapes on current page: ${shapesOnCurrentPage.length}, on other pages: ${shapesOnOtherPages.length}`)
-
- if (shapesOnOtherPages.length > 0) {
- console.log(`๐ Board: Shapes on other pages:`, shapesOnOtherPages.map(s => ({
+ // Check if there are shapes in store that editor can't see
+ if (storeShapes.length > editorShapes.length) {
+ const editorShapeIds = new Set(editorShapes.map(s => s.id))
+ const missingShapes = storeShapes.filter(s => !editorShapeIds.has(s.id))
+ console.warn(`๐ Board: ${missingShapes.length} shapes in store but not visible to editor:`, missingShapes.map(s => ({
id: s.id,
+ type: s.type,
+ x: s.x,
+ y: s.y,
parentId: s.parentId
})))
- // Fix: Move shapes to the current page
- console.log(`๐ Board: Moving ${shapesOnOtherPages.length} shapes to current page ${currentPageId}`)
- const shapesToMove = shapesOnOtherPages.map(s => ({
- id: s.id,
- type: s.type,
- parentId: currentPageId
- }))
+ // Check if missing shapes are on a different page
+ const shapesOnCurrentPage = missingShapes.filter(s => s.parentId === currentPageId)
+ const shapesOnOtherPages = missingShapes.filter(s => s.parentId !== currentPageId)
+ console.log(`๐ Board: Missing shapes on current page: ${shapesOnCurrentPage.length}, on other pages: ${shapesOnOtherPages.length}`)
- try {
- editor.updateShapes(shapesToMove)
- console.log(`๐ Board: Successfully moved ${shapesToMove.length} shapes to current page`)
- } catch (error) {
- console.error(`๐ Board: Error moving shapes to current page:`, error)
+ if (shapesOnOtherPages.length > 0) {
+ console.log(`๐ Board: Shapes on other pages:`, shapesOnOtherPages.map(s => ({
+ id: s.id,
+ parentId: s.parentId
+ })))
+
+ // Fix: Move shapes to the current page
+ console.log(`๐ Board: Moving ${shapesOnOtherPages.length} shapes to current page ${currentPageId}`)
+ const shapesToMove = shapesOnOtherPages.map(s => ({
+ id: s.id,
+ type: s.type,
+ parentId: currentPageId
+ }))
+
+ try {
+ editor.updateShapes(shapesToMove)
+ console.log(`๐ Board: Successfully moved ${shapesToMove.length} shapes to current page`)
+ } catch (error) {
+ console.error(`๐ Board: Error moving shapes to current page:`, error)
+ }
}
}
}
- */
}, [editor])
// Update presence when session changes
@@ -317,9 +391,57 @@ export function Board() {
};
}, [editor, roomId, store.store]);
+ // Handle Escape key to cancel active tool and return to hand tool
+ // Also prevent Escape from deleting shapes
+ useEffect(() => {
+ if (!editor) return;
+
+ const handleKeyDown = (event: KeyboardEvent) => {
+ // Only handle Escape key
+ if (event.key === 'Escape') {
+ // Check if the event target or active element is an input field or textarea
+ const target = event.target as HTMLElement;
+ const activeElement = document.activeElement;
+ const isInputFocused = (target && (
+ target.tagName === 'INPUT' ||
+ target.tagName === 'TEXTAREA' ||
+ target.isContentEditable
+ )) || (activeElement && (
+ activeElement.tagName === 'INPUT' ||
+ activeElement.tagName === 'TEXTAREA' ||
+ activeElement.isContentEditable
+ ));
+
+ // If an input is focused, let it handle Escape (don't prevent default)
+ // This allows components like Obsidian notes to handle Escape for canceling edits
+ if (isInputFocused) {
+ return; // Let the event propagate to the component's handler
+ }
+
+ // Otherwise, prevent default to stop tldraw from deleting shapes
+ // and switch to hand tool
+ event.preventDefault();
+ event.stopPropagation();
+
+ const currentTool = editor.getCurrentToolId();
+ // Only switch if we're not already on the hand tool
+ if (currentTool !== 'hand') {
+ editor.setCurrentTool('hand');
+ }
+ }
+ };
+
+ document.addEventListener('keydown', handleKeyDown, true); // Use capture phase to intercept early
+
+ return () => {
+ document.removeEventListener('keydown', handleKeyDown, true);
+ };
+ }, [editor]);
+
return (
-