Compare commits
426 Commits
main
...
feature/go
| Author | SHA1 | Date |
|---|---|---|
|
|
f8092d804f | |
|
|
8892a9cf3a | |
|
|
09bce4dd94 | |
|
|
84c6bf834c | |
|
|
052c98417d | |
|
|
33f5dc7e7f | |
|
|
a754ffab57 | |
|
|
c9c8c008b2 | |
|
|
8bc3924a10 | |
|
|
e69ed0e867 | |
|
|
966e1855c1 | |
|
|
8c90727b93 | |
|
|
7a471b0e37 | |
|
|
068ff7d3be | |
|
|
075e317a0d | |
|
|
de770a4f91 | |
|
|
aa742919cb | |
|
|
5a6e04f26b | |
|
|
f15f46f742 | |
|
|
9c1e8cbcaa | |
|
|
60a6825dfd | |
|
|
0edac2968b | |
|
|
1ccaf32d1a | |
|
|
a6f2d6e015 | |
|
|
f5c775e417 | |
|
|
7191e2543d | |
|
|
cd58b1c1cd | |
|
|
01b5a84e42 | |
|
|
478c1f6774 | |
|
|
420ad28d9a | |
|
|
b1c3ceeab7 | |
|
|
a5c5c7f441 | |
|
|
abb80c05d8 | |
|
|
5ca4b19aec | |
|
|
1d212c385d | |
|
|
26a15b7aaf | |
|
|
2cee2cb31b | |
|
|
26a931acf5 | |
|
|
f3a9a28724 | |
|
|
b2a7879d2c | |
|
|
5f2166075b | |
|
|
ebf602fd21 | |
|
|
0906704693 | |
|
|
343f408661 | |
|
|
49cf763858 | |
|
|
d73aee3530 | |
|
|
8bb87075ad | |
|
|
42c3ec7587 | |
|
|
55ac9381fa | |
|
|
d89fb4baaf | |
|
|
b8d014a0af | |
|
|
c396bbca85 | |
|
|
090646f893 | |
|
|
829dd4f642 | |
|
|
f2a77580b1 | |
|
|
e230d571e4 | |
|
|
d42bae52d7 | |
|
|
dba981c2af | |
|
|
179a03057e | |
|
|
439a7f07e8 | |
|
|
c070e673ad | |
|
|
c175c0f29a | |
|
|
928a535aec | |
|
|
33ff5216cc | |
|
|
5cd36c3d3c | |
|
|
a8c3988e3f | |
|
|
5151b474be | |
|
|
8ea3490fb4 | |
|
|
67d4db7281 | |
|
|
083095c821 | |
|
|
6507adc36d | |
|
|
05197f8430 | |
|
|
e479413363 | |
|
|
6a0a53796c | |
|
|
3549326122 | |
|
|
28b8ebdc72 | |
|
|
f4e72452f1 | |
|
|
cae367db28 | |
|
|
7e09d22843 | |
|
|
943162899e | |
|
|
e70cba82b4 | |
|
|
63b19f7db8 | |
|
|
c18d204814 | |
|
|
3231720004 | |
|
|
ca9f250de8 | |
|
|
6ab966cc95 | |
|
|
02808d170e | |
|
|
2cc7c52755 | |
|
|
1e68150b60 | |
|
|
dd190824d2 | |
|
|
69ecaf2335 | |
|
|
693d38eb7d | |
|
|
7a921d8477 | |
|
|
831d463b5c | |
|
|
8c9eaa1d2b | |
|
|
b52b715340 | |
|
|
0882648565 | |
|
|
f963152238 | |
|
|
99ee87b6d6 | |
|
|
c2a56f08e1 | |
|
|
e2619f4aae | |
|
|
88a162d6ae | |
|
|
fde305abf6 | |
|
|
48dac00f59 | |
|
|
caf831cce7 | |
|
|
776526c65e | |
|
|
64d4a65613 | |
|
|
432e90d9ef | |
|
|
52c1af6864 | |
|
|
13ceb2ebbd | |
|
|
71e7e5de05 | |
|
|
9b7cde262a | |
|
|
f0f7c47775 | |
|
|
90605bee09 | |
|
|
0de17476d5 | |
|
|
7a17b0944a | |
|
|
cb5045984b | |
|
|
f5582fc7d1 | |
|
|
7c0c888276 | |
|
|
f66fac74d0 | |
|
|
4d7b05efa2 | |
|
|
7065626e71 | |
|
|
ab1d0344a5 | |
|
|
04f6fe5192 | |
|
|
2528ad4726 | |
|
|
ffef04df50 | |
|
|
7919d34dfa | |
|
|
5fd83944fc | |
|
|
a3950baf17 | |
|
|
ef4a84e8f1 | |
|
|
d1179169cc | |
|
|
0e90e2d097 | |
|
|
eafbf6c9fe | |
|
|
edbe76ebda | |
|
|
ef39328d95 | |
|
|
229f4d6b41 | |
|
|
0fa1652f72 | |
|
|
1b172d7529 | |
|
|
c1df50c49b | |
|
|
053bd95d4a | |
|
|
73ac456e17 | |
|
|
92cac8dee5 | |
|
|
b8fb64c01b | |
|
|
680b6a5359 | |
|
|
fec80ddd18 | |
|
|
5b32184012 | |
|
|
be5f1a5a3a | |
|
|
bf5d214e45 | |
|
|
f8e4fa3802 | |
|
|
a063abdf77 | |
|
|
04135a5487 | |
|
|
5e11183557 | |
|
|
b5463d4d64 | |
|
|
bda2523e3b | |
|
|
3072dc70c0 | |
|
|
62afed445e | |
|
|
f2b05a8fe6 | |
|
|
0a34c0ab3e | |
|
|
0c2ca28d0e | |
|
|
5cfa2d683c | |
|
|
b5785f059f | |
|
|
fa6b874313 | |
|
|
657df72534 | |
|
|
9664439f31 | |
|
|
8cce96ea20 | |
|
|
5375f63e70 | |
|
|
663c845cab | |
|
|
a82f8faa00 | |
|
|
065a3b3483 | |
|
|
f688851764 | |
|
|
5c99a82c14 | |
|
|
39c1e2251b | |
|
|
8a8568d042 | |
|
|
822b979864 | |
|
|
067dae1ba6 | |
|
|
d1ad51c8ab | |
|
|
d3f2029521 | |
|
|
119146e094 | |
|
|
38d1f28e35 | |
|
|
4815fa4a23 | |
|
|
f8e4647e1a | |
|
|
368732e3b1 | |
|
|
719a4eb918 | |
|
|
8fa8c388d9 | |
|
|
356a262114 | |
|
|
1abeeaea10 | |
|
|
808b37425a | |
|
|
8385e30d25 | |
|
|
391e13c350 | |
|
|
d0233c0eb6 | |
|
|
3b137b0b55 | |
|
|
ec9db36a50 | |
|
|
e78f9a8281 | |
|
|
c99b9710b5 | |
|
|
a8c9bd845b | |
|
|
9a9cab1b8e | |
|
|
1d1b64fe7c | |
|
|
17ba57ce6e | |
|
|
fa2f16c019 | |
|
|
0c980f5f48 | |
|
|
fdc14a1a92 | |
|
|
956463d43f | |
|
|
125e565c55 | |
|
|
129d72cd58 | |
|
|
b01cb9abf8 | |
|
|
f949f323de | |
|
|
5eb5789c23 | |
|
|
15fa9b8d19 | |
|
|
7e3cca656e | |
|
|
6e373e57f1 | |
|
|
545372dcba | |
|
|
d7fcf121f8 | |
|
|
c5e606e326 | |
|
|
bb144428d0 | |
|
|
33f1aa4e90 | |
|
|
411fc99201 | |
|
|
4364743555 | |
|
|
6dd387613b | |
|
|
04705665f5 | |
|
|
c13d8720d2 | |
|
|
df72890577 | |
|
|
4e88428706 | |
|
|
52736e9812 | |
|
|
7b84d34c98 | |
|
|
e936d1c597 | |
|
|
b0beefe516 | |
|
|
49f11dc6e5 | |
|
|
30c0dfc3ba | |
|
|
d7b1e348e9 | |
|
|
2a3b79df15 | |
|
|
b11aecffa4 | |
|
|
4b5ba9eab3 | |
|
|
0add9bd514 | |
|
|
a770d516df | |
|
|
47db716af3 | |
|
|
e7e911c5bb | |
|
|
1126fc4a1c | |
|
|
59e9025336 | |
|
|
7d6afb6c6b | |
|
|
3a99af257d | |
|
|
12256c5b9c | |
|
|
87854883c6 | |
|
|
ebe2d4c0a2 | |
|
|
d733b61a66 | |
|
|
61143d2c20 | |
|
|
f47c3e0007 | |
|
|
536e1e7a87 | |
|
|
ab2a9f6a79 | |
|
|
9b33efdcb3 | |
|
|
86b37b9cc8 | |
|
|
7805a1e961 | |
|
|
fdb96b6ae1 | |
|
|
1783d1b6eb | |
|
|
bfbe7b8325 | |
|
|
e3e2c474ac | |
|
|
7b1fe2b803 | |
|
|
02f816e613 | |
|
|
198109a919 | |
|
|
c6370c0fde | |
|
|
c75acca85b | |
|
|
d7f4d61b55 | |
|
|
221a453411 | |
|
|
ce3063e9ba | |
|
|
7987c3a8e4 | |
|
|
8f94ee3a6f | |
|
|
201e489cef | |
|
|
d23dca3ba8 | |
|
|
42e5afbb21 | |
|
|
997f690d22 | |
|
|
7978772d7b | |
|
|
9f54400f18 | |
|
|
34681a3f4f | |
|
|
3bb7eda655 | |
|
|
72a7a54866 | |
|
|
e714233f67 | |
|
|
cca1a06b9f | |
|
|
84e737216d | |
|
|
bf5b3239dd | |
|
|
5858775483 | |
|
|
b74ae75fa8 | |
|
|
6e1e03d05b | |
|
|
ce50366985 | |
|
|
d9fb9637bd | |
|
|
5d39baaea8 | |
|
|
9def6c52b5 | |
|
|
1f6b693ec1 | |
|
|
b2e06ad76b | |
|
|
ac69e09aca | |
|
|
08f31a0bbd | |
|
|
2bdd6a8dba | |
|
|
9ff366c80b | |
|
|
cc216eb07f | |
|
|
d2ff445ddf | |
|
|
a8ca366bb6 | |
|
|
4901a56d61 | |
|
|
2d562b3e4c | |
|
|
a9a23e27e3 | |
|
|
cee2bfa336 | |
|
|
5924b0cc97 | |
|
|
4ec6b73fb3 | |
|
|
ce50026cc3 | |
|
|
0ff9c64908 | |
|
|
cf722c2490 | |
|
|
64d7581e6b | |
|
|
1190848222 | |
|
|
11c88ec0de | |
|
|
95307ed453 | |
|
|
bfe6b238e9 | |
|
|
fe4b40a3fe | |
|
|
4fda800e8b | |
|
|
7c28758204 | |
|
|
75c769a774 | |
|
|
5d8781462d | |
|
|
b2d6b1599b | |
|
|
c81238c45a | |
|
|
f012632cde | |
|
|
78e396d11e | |
|
|
cba62a453b | |
|
|
923f61ac9e | |
|
|
94bec533c4 | |
|
|
e286a120f1 | |
|
|
2e0a05ab32 | |
|
|
110fc19b94 | |
|
|
111be03907 | |
|
|
39e6cccc3f | |
|
|
08175d3a7c | |
|
|
3006e85375 | |
|
|
632e7979a2 | |
|
|
71fc07133a | |
|
|
97b00c1569 | |
|
|
c4198e1faf | |
|
|
6f6c924f66 | |
|
|
0eb4407219 | |
|
|
3a2a38c0b6 | |
|
|
02124ce920 | |
|
|
b700846a9c | |
|
|
f7310919f8 | |
|
|
949062941f | |
|
|
7f497ae8d8 | |
|
|
1d817c8e0f | |
|
|
7dd045bb33 | |
|
|
11d13a03d3 | |
|
|
3bcfa83168 | |
|
|
b0a3cd7328 | |
|
|
c71b67e24c | |
|
|
d582be49b2 | |
|
|
46ee4e7906 | |
|
|
c34418e964 | |
|
|
1c8909ce69 | |
|
|
5f2c90219d | |
|
|
fef2ca0eb3 | |
|
|
eab574e130 | |
|
|
b2656c911b | |
|
|
6ba124b038 | |
|
|
1cd7208ddf | |
|
|
d555910c77 | |
|
|
d1a8407a9b | |
|
|
db3205f97a | |
|
|
100b88268b | |
|
|
202971f343 | |
|
|
b26b9e6384 | |
|
|
4d69340a6b | |
|
|
14e0126995 | |
|
|
04782854d2 | |
|
|
4eff918bd3 | |
|
|
4e2103aab2 | |
|
|
895d02a19c | |
|
|
375f69b365 | |
|
|
09a729c787 | |
|
|
bb8a76026e | |
|
|
4319a6b1ee | |
|
|
2ca6705599 | |
|
|
07556dd53a | |
|
|
c93b3066bd | |
|
|
d282f6b650 | |
|
|
c34cae40b6 | |
|
|
46b54394ad | |
|
|
b05aa413e3 | |
|
|
2435f3f495 | |
|
|
49bca38b5f | |
|
|
0d7ee5889c | |
|
|
a0bba93055 | |
|
|
a2d7ab4af0 | |
|
|
99f7f131ed | |
|
|
c369762001 | |
|
|
d81ae56de0 | |
|
|
f384673cf9 | |
|
|
670c9ff0b0 | |
|
|
2ac4ec8de3 | |
|
|
7e16f6e6b0 | |
|
|
63cd76e919 | |
|
|
91df5214c6 | |
|
|
900833c06c | |
|
|
700875434f | |
|
|
9d5d0d6655 | |
|
|
8ce8dec8f7 | |
|
|
836d37df76 | |
|
|
2c35a0c53c | |
|
|
a8c8d62e63 | |
|
|
807637eae0 | |
|
|
572608f878 | |
|
|
6747c5df02 | |
|
|
2c4b2f6c91 | |
|
|
80cda32cba | |
|
|
032e4e1199 | |
|
|
04676b3788 | |
|
|
d6f3830884 | |
|
|
50c7c52c3d | |
|
|
a6eb2abed0 | |
|
|
1c38cb1bdb | |
|
|
932c9935d5 | |
|
|
249031619d | |
|
|
408df0d11e | |
|
|
fc602ff943 | |
|
|
d34e586215 | |
|
|
ee2484f1d0 | |
|
|
0ac03dec60 | |
|
|
5f3cf2800c | |
|
|
206d2a57ec | |
|
|
87118b86d5 | |
|
|
58cb4da348 | |
|
|
d087b61ce5 | |
|
|
9d73295702 | |
|
|
3e6db31c69 | |
|
|
b8038a6a97 | |
|
|
ee49689416 |
|
|
@ -0,0 +1,4 @@
|
|||
# Ignore Cloudflare Worker configuration files during Pages deployment
|
||||
# These are only used for separate Worker deployments
|
||||
worker/
|
||||
*.toml
|
||||
|
|
@ -0,0 +1,25 @@
|
|||
# Frontend (VITE) Public Variables
|
||||
VITE_GOOGLE_CLIENT_ID='your_google_client_id'
|
||||
VITE_GOOGLE_MAPS_API_KEY='your_google_maps_api_key'
|
||||
VITE_DAILY_DOMAIN='your_daily_domain'
|
||||
VITE_TLDRAW_WORKER_URL='your_worker_url'
|
||||
|
||||
# AI Configuration
|
||||
# AI Orchestrator with Ollama (FREE local AI - highest priority)
|
||||
VITE_OLLAMA_URL='https://ai.jeffemmett.com'
|
||||
|
||||
# RunPod API (Primary AI provider when Ollama unavailable)
|
||||
# Users don't need their own API keys - RunPod is pre-configured
|
||||
VITE_RUNPOD_API_KEY='your_runpod_api_key_here'
|
||||
VITE_RUNPOD_TEXT_ENDPOINT_ID='your_text_endpoint_id' # vLLM for chat/text
|
||||
VITE_RUNPOD_IMAGE_ENDPOINT_ID='your_image_endpoint_id' # Automatic1111/SD
|
||||
VITE_RUNPOD_VIDEO_ENDPOINT_ID='your_video_endpoint_id' # Wan2.2
|
||||
VITE_RUNPOD_WHISPER_ENDPOINT_ID='your_whisper_endpoint_id' # WhisperX
|
||||
|
||||
# Worker-only Variables (Do not prefix with VITE_)
|
||||
CLOUDFLARE_API_TOKEN='your_cloudflare_token'
|
||||
CLOUDFLARE_ACCOUNT_ID='your_account_id'
|
||||
CLOUDFLARE_ZONE_ID='your_zone_id'
|
||||
R2_BUCKET_NAME='your_bucket_name'
|
||||
R2_PREVIEW_BUCKET_NAME='your_preview_bucket_name'
|
||||
DAILY_API_KEY=your_daily_api_key_here
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
*.pdf filter=lfs diff=lfs merge=lfs -text
|
||||
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
||||
*.mov filter=lfs diff=lfs merge=lfs -text
|
||||
*.png filter=lfs diff=lfs merge=lfs -text
|
||||
*.gif filter=lfs diff=lfs merge=lfs -text
|
||||
|
|
@ -0,0 +1,64 @@
|
|||
name: Deploy Worker
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main # Production deployment
|
||||
- 'automerge/**' # Dev deployment for automerge branches (matches automerge/*, automerge/**/*, etc.)
|
||||
workflow_dispatch: # Allows manual triggering from GitHub UI
|
||||
inputs:
|
||||
environment:
|
||||
description: 'Environment to deploy to'
|
||||
required: true
|
||||
default: 'dev'
|
||||
type: choice
|
||||
options:
|
||||
- dev
|
||||
- production
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
name: Deploy Worker
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "20"
|
||||
cache: "npm"
|
||||
|
||||
- name: Install Dependencies
|
||||
run: npm ci
|
||||
|
||||
- name: Determine Environment
|
||||
id: env
|
||||
run: |
|
||||
if [ "${{ github.event_name }}" == "workflow_dispatch" ]; then
|
||||
echo "environment=${{ github.event.inputs.environment }}" >> $GITHUB_OUTPUT
|
||||
elif [ "${{ github.ref }}" == "refs/heads/main" ]; then
|
||||
echo "environment=production" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "environment=dev" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Deploy to Cloudflare Workers (Production)
|
||||
if: steps.env.outputs.environment == 'production'
|
||||
run: |
|
||||
npm install -g wrangler@latest
|
||||
# Uses default wrangler.toml (production config) from root directory
|
||||
wrangler deploy
|
||||
env:
|
||||
CLOUDFLARE_API_TOKEN: ${{ secrets.CLOUDFLARE_API_TOKEN }}
|
||||
CLOUDFLARE_ACCOUNT_ID: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
|
||||
|
||||
- name: Deploy to Cloudflare Workers (Dev)
|
||||
if: steps.env.outputs.environment == 'dev'
|
||||
run: |
|
||||
npm install -g wrangler@latest
|
||||
# Uses wrangler.dev.toml for dev environment
|
||||
wrangler deploy --config wrangler.dev.toml
|
||||
env:
|
||||
CLOUDFLARE_API_TOKEN: ${{ secrets.CLOUDFLARE_API_TOKEN }}
|
||||
CLOUDFLARE_ACCOUNT_ID: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
|
||||
|
|
@ -0,0 +1,28 @@
|
|||
name: Mirror to Gitea
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- master
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
mirror:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Mirror to Gitea
|
||||
env:
|
||||
GITEA_TOKEN: ${{ secrets.GITEA_TOKEN }}
|
||||
GITEA_USERNAME: ${{ secrets.GITEA_USERNAME }}
|
||||
run: |
|
||||
REPO_NAME=$(basename $GITHUB_REPOSITORY)
|
||||
git remote add gitea https://$GITEA_USERNAME:$GITEA_TOKEN@gitea.jeffemmett.com/jeffemmett/$REPO_NAME.git || true
|
||||
git push gitea --all --force
|
||||
git push gitea --tags --force
|
||||
|
||||
|
|
@ -0,0 +1,60 @@
|
|||
# DISABLED: This workflow is preserved for future use in another repository
|
||||
# To re-enable: Remove the `if: false` condition below
|
||||
# This workflow syncs notes to a Quartz static site (separate from the canvas website)
|
||||
|
||||
name: Quartz Sync
|
||||
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- 'content/**'
|
||||
- 'src/lib/quartzSync.ts'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
note_id:
|
||||
description: 'Specific note ID to sync'
|
||||
required: false
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
sync-quartz:
|
||||
# DISABLED: Set to false to prevent this workflow from running
|
||||
if: false
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '22'
|
||||
cache: 'npm'
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
|
||||
- name: Build Quartz
|
||||
run: |
|
||||
npx quartz build
|
||||
env:
|
||||
QUARTZ_PUBLISH: true
|
||||
|
||||
- name: Deploy to GitHub Pages
|
||||
uses: peaceiris/actions-gh-pages@v3
|
||||
if: github.ref == 'refs/heads/main'
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
publish_dir: ./public
|
||||
cname: ${{ secrets.QUARTZ_DOMAIN }}
|
||||
|
||||
- name: Notify sync completion
|
||||
if: always()
|
||||
run: |
|
||||
echo "Quartz sync completed at $(date)"
|
||||
echo "Triggered by: ${{ github.event_name }}"
|
||||
echo "Commit: ${{ github.sha }}"
|
||||
|
|
@ -0,0 +1,178 @@
|
|||
dist/
|
||||
.DS_Store
|
||||
bun.lockb
|
||||
|
||||
|
||||
logs
|
||||
_.log
|
||||
npm-debug.log_
|
||||
lerna-debug.log*
|
||||
.pnpm-debug.log*
|
||||
|
||||
# Diagnostic reports (https://nodejs.org/api/report.html)
|
||||
|
||||
report.[0-9]_.[0-9]_.[0-9]_.[0-9]_.json
|
||||
|
||||
# Runtime data
|
||||
|
||||
pids
|
||||
_.pid
|
||||
_.seed
|
||||
\*.pid.lock
|
||||
|
||||
# Directory for instrumented libs generated by jscoverage/JSCover
|
||||
|
||||
lib-cov
|
||||
|
||||
# Coverage directory used by tools like istanbul
|
||||
|
||||
coverage
|
||||
\*.lcov
|
||||
|
||||
# nyc test coverage
|
||||
|
||||
.nyc_output
|
||||
|
||||
# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
|
||||
|
||||
.grunt
|
||||
|
||||
# Bower dependency directory (https://bower.io/)
|
||||
|
||||
bower_components
|
||||
|
||||
# node-waf configuration
|
||||
|
||||
.lock-wscript
|
||||
|
||||
# Compiled binary addons (https://nodejs.org/api/addons.html)
|
||||
|
||||
build/Release
|
||||
|
||||
# Dependency directories
|
||||
|
||||
node_modules/
|
||||
jspm_packages/
|
||||
|
||||
# Snowpack dependency directory (https://snowpack.dev/)
|
||||
|
||||
web_modules/
|
||||
|
||||
# TypeScript cache
|
||||
|
||||
\*.tsbuildinfo
|
||||
|
||||
# Optional npm cache directory
|
||||
|
||||
.npm
|
||||
|
||||
# Optional eslint cache
|
||||
|
||||
.eslintcache
|
||||
|
||||
# Optional stylelint cache
|
||||
|
||||
.stylelintcache
|
||||
|
||||
# Microbundle cache
|
||||
|
||||
.rpt2_cache/
|
||||
.rts2_cache_cjs/
|
||||
.rts2_cache_es/
|
||||
.rts2_cache_umd/
|
||||
|
||||
# Optional REPL history
|
||||
|
||||
.node_repl_history
|
||||
|
||||
# Output of 'npm pack'
|
||||
|
||||
\*.tgz
|
||||
|
||||
|
||||
|
||||
# dotenv environment variable files
|
||||
|
||||
.env
|
||||
.env.development.local
|
||||
.env.test.local
|
||||
.env.production.local
|
||||
.env.local
|
||||
|
||||
# parcel-bundler cache (https://parceljs.org/)
|
||||
|
||||
.cache
|
||||
.parcel-cache
|
||||
|
||||
# Next.js build output
|
||||
|
||||
.next
|
||||
out
|
||||
|
||||
# Nuxt.js build / generate output
|
||||
|
||||
.nuxt
|
||||
dist
|
||||
|
||||
# Gatsby files
|
||||
|
||||
.cache/
|
||||
|
||||
# Comment in the public line in if your project uses Gatsby and not Next.js
|
||||
|
||||
# https://nextjs.org/blog/next-9-1#public-directory-support
|
||||
|
||||
# public
|
||||
|
||||
# vuepress build output
|
||||
|
||||
.vuepress/dist
|
||||
|
||||
# vuepress v2.x temp and cache directory
|
||||
|
||||
.temp
|
||||
.cache
|
||||
|
||||
# Docusaurus cache and generated files
|
||||
|
||||
.docusaurus
|
||||
|
||||
# Serverless directories
|
||||
|
||||
.serverless/
|
||||
|
||||
# FuseBox cache
|
||||
|
||||
.fusebox/
|
||||
|
||||
# DynamoDB Local files
|
||||
|
||||
.dynamodb/
|
||||
|
||||
# TernJS port file
|
||||
|
||||
.tern-port
|
||||
|
||||
# Stores VSCode versions used for testing VSCode extensions
|
||||
|
||||
.vscode-test
|
||||
|
||||
.wrangler/
|
||||
|
||||
# Vercel
|
||||
.vercel/
|
||||
.dev.vars
|
||||
|
||||
# Environment variables
|
||||
.env*
|
||||
.env.development
|
||||
!.env.example
|
||||
.vercel
|
||||
|
||||
# Environment files
|
||||
.env
|
||||
.env.local
|
||||
.env.*.local
|
||||
.dev.vars
|
||||
.env.production
|
||||
.aider*
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
legacy-peer-deps=true
|
||||
strict-peer-dependencies=false
|
||||
auto-install-peers=true
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
"semi": false,
|
||||
"trailingComma": "all"
|
||||
}
|
||||
|
|
@ -0,0 +1,30 @@
|
|||
const urls = new Set();
|
||||
|
||||
function checkURL(request, init) {
|
||||
const url =
|
||||
request instanceof URL
|
||||
? request
|
||||
: new URL(
|
||||
(typeof request === "string"
|
||||
? new Request(request, init)
|
||||
: request
|
||||
).url
|
||||
);
|
||||
if (url.port && url.port !== "443" && url.protocol === "https:") {
|
||||
if (!urls.has(url.toString())) {
|
||||
urls.add(url.toString());
|
||||
console.warn(
|
||||
`WARNING: known issue with \`fetch()\` requests to custom HTTPS ports in published Workers:\n` +
|
||||
` - ${url.toString()} - the custom port will be ignored when the Worker is published using the \`wrangler deploy\` command.\n`
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
globalThis.fetch = new Proxy(globalThis.fetch, {
|
||||
apply(target, thisArg, argArray) {
|
||||
const [request, init] = argArray;
|
||||
checkURL(request, init);
|
||||
return Reflect.apply(target, thisArg, argArray);
|
||||
},
|
||||
});
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
import worker, * as OTHER_EXPORTS from "C:\\Users\\jeffe\\Documents\\GitHub\\canvas-website\\worker\\worker.ts";
|
||||
import * as __MIDDLEWARE_0__ from "C:\\Users\\jeffe\\Documents\\GitHub\\canvas-website\\node_modules\\wrangler\\templates\\middleware\\middleware-ensure-req-body-drained.ts";
|
||||
import * as __MIDDLEWARE_1__ from "C:\\Users\\jeffe\\Documents\\GitHub\\canvas-website\\node_modules\\wrangler\\templates\\middleware\\middleware-miniflare3-json-error.ts";
|
||||
|
||||
export * from "C:\\Users\\jeffe\\Documents\\GitHub\\canvas-website\\worker\\worker.ts";
|
||||
|
||||
export const __INTERNAL_WRANGLER_MIDDLEWARE__ = [
|
||||
|
||||
__MIDDLEWARE_0__.default,__MIDDLEWARE_1__.default
|
||||
]
|
||||
export default worker;
|
||||
|
|
@ -0,0 +1,134 @@
|
|||
// This loads all middlewares exposed on the middleware object and then starts
|
||||
// the invocation chain. The big idea is that we can add these to the middleware
|
||||
// export dynamically through wrangler, or we can potentially let users directly
|
||||
// add them as a sort of "plugin" system.
|
||||
|
||||
import ENTRY, { __INTERNAL_WRANGLER_MIDDLEWARE__ } from "C:\\Users\\jeffe\\Documents\\GitHub\\canvas-website\\.wrangler\\tmp\\bundle-VlWfGj\\middleware-insertion-facade.js";
|
||||
import { __facade_invoke__, __facade_register__, Dispatcher } from "C:\\Users\\jeffe\\Documents\\GitHub\\canvas-website\\node_modules\\wrangler\\templates\\middleware\\common.ts";
|
||||
import type { WorkerEntrypointConstructor } from "C:\\Users\\jeffe\\Documents\\GitHub\\canvas-website\\.wrangler\\tmp\\bundle-VlWfGj\\middleware-insertion-facade.js";
|
||||
|
||||
// Preserve all the exports from the worker
|
||||
export * from "C:\\Users\\jeffe\\Documents\\GitHub\\canvas-website\\.wrangler\\tmp\\bundle-VlWfGj\\middleware-insertion-facade.js";
|
||||
|
||||
class __Facade_ScheduledController__ implements ScheduledController {
|
||||
readonly #noRetry: ScheduledController["noRetry"];
|
||||
|
||||
constructor(
|
||||
readonly scheduledTime: number,
|
||||
readonly cron: string,
|
||||
noRetry: ScheduledController["noRetry"]
|
||||
) {
|
||||
this.#noRetry = noRetry;
|
||||
}
|
||||
|
||||
noRetry() {
|
||||
if (!(this instanceof __Facade_ScheduledController__)) {
|
||||
throw new TypeError("Illegal invocation");
|
||||
}
|
||||
// Need to call native method immediately in case uncaught error thrown
|
||||
this.#noRetry();
|
||||
}
|
||||
}
|
||||
|
||||
function wrapExportedHandler(worker: ExportedHandler): ExportedHandler {
|
||||
// If we don't have any middleware defined, just return the handler as is
|
||||
if (
|
||||
__INTERNAL_WRANGLER_MIDDLEWARE__ === undefined ||
|
||||
__INTERNAL_WRANGLER_MIDDLEWARE__.length === 0
|
||||
) {
|
||||
return worker;
|
||||
}
|
||||
// Otherwise, register all middleware once
|
||||
for (const middleware of __INTERNAL_WRANGLER_MIDDLEWARE__) {
|
||||
__facade_register__(middleware);
|
||||
}
|
||||
|
||||
const fetchDispatcher: ExportedHandlerFetchHandler = function (
|
||||
request,
|
||||
env,
|
||||
ctx
|
||||
) {
|
||||
if (worker.fetch === undefined) {
|
||||
throw new Error("Handler does not export a fetch() function.");
|
||||
}
|
||||
return worker.fetch(request, env, ctx);
|
||||
};
|
||||
|
||||
return {
|
||||
...worker,
|
||||
fetch(request, env, ctx) {
|
||||
const dispatcher: Dispatcher = function (type, init) {
|
||||
if (type === "scheduled" && worker.scheduled !== undefined) {
|
||||
const controller = new __Facade_ScheduledController__(
|
||||
Date.now(),
|
||||
init.cron ?? "",
|
||||
() => {}
|
||||
);
|
||||
return worker.scheduled(controller, env, ctx);
|
||||
}
|
||||
};
|
||||
return __facade_invoke__(request, env, ctx, dispatcher, fetchDispatcher);
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
function wrapWorkerEntrypoint(
|
||||
klass: WorkerEntrypointConstructor
|
||||
): WorkerEntrypointConstructor {
|
||||
// If we don't have any middleware defined, just return the handler as is
|
||||
if (
|
||||
__INTERNAL_WRANGLER_MIDDLEWARE__ === undefined ||
|
||||
__INTERNAL_WRANGLER_MIDDLEWARE__.length === 0
|
||||
) {
|
||||
return klass;
|
||||
}
|
||||
// Otherwise, register all middleware once
|
||||
for (const middleware of __INTERNAL_WRANGLER_MIDDLEWARE__) {
|
||||
__facade_register__(middleware);
|
||||
}
|
||||
|
||||
// `extend`ing `klass` here so other RPC methods remain callable
|
||||
return class extends klass {
|
||||
#fetchDispatcher: ExportedHandlerFetchHandler<Record<string, unknown>> = (
|
||||
request,
|
||||
env,
|
||||
ctx
|
||||
) => {
|
||||
this.env = env;
|
||||
this.ctx = ctx;
|
||||
if (super.fetch === undefined) {
|
||||
throw new Error("Entrypoint class does not define a fetch() function.");
|
||||
}
|
||||
return super.fetch(request);
|
||||
};
|
||||
|
||||
#dispatcher: Dispatcher = (type, init) => {
|
||||
if (type === "scheduled" && super.scheduled !== undefined) {
|
||||
const controller = new __Facade_ScheduledController__(
|
||||
Date.now(),
|
||||
init.cron ?? "",
|
||||
() => {}
|
||||
);
|
||||
return super.scheduled(controller);
|
||||
}
|
||||
};
|
||||
|
||||
fetch(request: Request<unknown, IncomingRequestCfProperties>) {
|
||||
return __facade_invoke__(
|
||||
request,
|
||||
this.env,
|
||||
this.ctx,
|
||||
this.#dispatcher,
|
||||
this.#fetchDispatcher
|
||||
);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
let WRAPPED_ENTRY: ExportedHandler | WorkerEntrypointConstructor | undefined;
|
||||
if (typeof ENTRY === "object") {
|
||||
WRAPPED_ENTRY = wrapExportedHandler(ENTRY);
|
||||
} else if (typeof ENTRY === "function") {
|
||||
WRAPPED_ENTRY = wrapWorkerEntrypoint(ENTRY);
|
||||
}
|
||||
export default WRAPPED_ENTRY;
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
|
|
@ -0,0 +1,626 @@
|
|||
# AI Services Deployment & Testing Guide
|
||||
|
||||
Complete guide for deploying and testing the AI services integration in canvas-website with Netcup RS 8000 and RunPod.
|
||||
|
||||
---
|
||||
|
||||
## 🎯 Overview
|
||||
|
||||
This project integrates multiple AI services with smart routing:
|
||||
|
||||
**Smart Routing Strategy:**
|
||||
- **Text/Code (70-80% workload)**: Local Ollama on RS 8000 → **FREE**
|
||||
- **Images - Low Priority**: Local Stable Diffusion on RS 8000 → **FREE** (slow ~60s)
|
||||
- **Images - High Priority**: RunPod GPU (SDXL) → **$0.02/image** (fast ~5s)
|
||||
- **Video Generation**: RunPod GPU (Wan2.1) → **$0.50/video** (30-90s)
|
||||
|
||||
**Expected Cost Savings:** $86-350/month compared to persistent GPU instances
|
||||
|
||||
---
|
||||
|
||||
## 📦 What's Included
|
||||
|
||||
### AI Services:
|
||||
1. ✅ **Text Generation (LLM)**
|
||||
- RunPod integration via `src/lib/runpodApi.ts`
|
||||
- Enhanced LLM utilities in `src/utils/llmUtils.ts`
|
||||
- AI Orchestrator client in `src/lib/aiOrchestrator.ts`
|
||||
- Prompt shapes, arrow LLM actions, command palette
|
||||
|
||||
2. ✅ **Image Generation**
|
||||
- ImageGenShapeUtil in `src/shapes/ImageGenShapeUtil.tsx`
|
||||
- ImageGenTool in `src/tools/ImageGenTool.ts`
|
||||
- Mock mode **DISABLED** (ready for production)
|
||||
- Smart routing: low priority → local CPU, high priority → RunPod GPU
|
||||
|
||||
3. ✅ **Video Generation (NEW!)**
|
||||
- VideoGenShapeUtil in `src/shapes/VideoGenShapeUtil.tsx`
|
||||
- VideoGenTool in `src/tools/VideoGenTool.ts`
|
||||
- Wan2.1 I2V 14B 720p model on RunPod
|
||||
- Always uses GPU (no local option)
|
||||
|
||||
4. ✅ **Voice Transcription**
|
||||
- WhisperX integration via `src/hooks/useWhisperTranscriptionSimple.ts`
|
||||
- Automatic fallback to local Whisper model
|
||||
|
||||
---
|
||||
|
||||
## 🚀 Deployment Steps
|
||||
|
||||
### Step 1: Deploy AI Orchestrator on Netcup RS 8000
|
||||
|
||||
**Prerequisites:**
|
||||
- SSH access to Netcup RS 8000: `ssh netcup`
|
||||
- Docker and Docker Compose installed
|
||||
- RunPod API key
|
||||
|
||||
**1.1 Create AI Orchestrator Directory:**
|
||||
|
||||
```bash
|
||||
ssh netcup << 'EOF'
|
||||
mkdir -p /opt/ai-orchestrator/{services/{router,workers,monitor},configs,data/{redis,postgres,prometheus}}
|
||||
cd /opt/ai-orchestrator
|
||||
EOF
|
||||
```
|
||||
|
||||
**1.2 Copy Configuration Files:**
|
||||
|
||||
From your local machine, copy the AI orchestrator files created in `NETCUP_MIGRATION_PLAN.md`:
|
||||
|
||||
```bash
|
||||
# Copy docker-compose.yml
|
||||
scp /path/to/docker-compose.yml netcup:/opt/ai-orchestrator/
|
||||
|
||||
# Copy service files
|
||||
scp -r /path/to/services/* netcup:/opt/ai-orchestrator/services/
|
||||
```
|
||||
|
||||
**1.3 Configure Environment Variables:**
|
||||
|
||||
```bash
|
||||
ssh netcup "cat > /opt/ai-orchestrator/.env" << 'EOF'
|
||||
# PostgreSQL
|
||||
POSTGRES_PASSWORD=$(openssl rand -hex 16)
|
||||
|
||||
# RunPod API Keys
|
||||
RUNPOD_API_KEY=your_runpod_api_key_here
|
||||
RUNPOD_TEXT_ENDPOINT_ID=your_text_endpoint_id
|
||||
RUNPOD_IMAGE_ENDPOINT_ID=your_image_endpoint_id
|
||||
RUNPOD_VIDEO_ENDPOINT_ID=your_video_endpoint_id
|
||||
|
||||
# Grafana
|
||||
GRAFANA_PASSWORD=$(openssl rand -hex 16)
|
||||
|
||||
# Monitoring
|
||||
ALERT_EMAIL=your@email.com
|
||||
COST_ALERT_THRESHOLD=100
|
||||
EOF
|
||||
```
|
||||
|
||||
**1.4 Deploy the Stack:**
|
||||
|
||||
```bash
|
||||
ssh netcup << 'EOF'
|
||||
cd /opt/ai-orchestrator
|
||||
|
||||
# Start all services
|
||||
docker-compose up -d
|
||||
|
||||
# Check status
|
||||
docker-compose ps
|
||||
|
||||
# View logs
|
||||
docker-compose logs -f router
|
||||
EOF
|
||||
```
|
||||
|
||||
**1.5 Verify Deployment:**
|
||||
|
||||
```bash
|
||||
# Check health endpoint
|
||||
ssh netcup "curl http://localhost:8000/health"
|
||||
|
||||
# Check API documentation
|
||||
ssh netcup "curl http://localhost:8000/docs"
|
||||
|
||||
# Check queue status
|
||||
ssh netcup "curl http://localhost:8000/queue/status"
|
||||
```
|
||||
|
||||
### Step 2: Setup Local AI Models on RS 8000
|
||||
|
||||
**2.1 Download Ollama Models:**
|
||||
|
||||
```bash
|
||||
ssh netcup << 'EOF'
|
||||
# Download recommended models
|
||||
docker exec ai-ollama ollama pull llama3:70b
|
||||
docker exec ai-ollama ollama pull codellama:34b
|
||||
docker exec ai-ollama ollama pull deepseek-coder:33b
|
||||
docker exec ai-ollama ollama pull mistral:7b
|
||||
|
||||
# Verify
|
||||
docker exec ai-ollama ollama list
|
||||
|
||||
# Test a model
|
||||
docker exec ai-ollama ollama run llama3:70b "Hello, how are you?"
|
||||
EOF
|
||||
```
|
||||
|
||||
**2.2 Download Stable Diffusion Models:**
|
||||
|
||||
```bash
|
||||
ssh netcup << 'EOF'
|
||||
mkdir -p /data/models/stable-diffusion/sd-v2.1
|
||||
cd /data/models/stable-diffusion/sd-v2.1
|
||||
|
||||
# Download SD 2.1 weights
|
||||
wget https://huggingface.co/stabilityai/stable-diffusion-2-1/resolve/main/v2-1_768-ema-pruned.safetensors
|
||||
|
||||
# Verify
|
||||
ls -lh v2-1_768-ema-pruned.safetensors
|
||||
EOF
|
||||
```
|
||||
|
||||
**2.3 Download Wan2.1 Video Generation Model:**
|
||||
|
||||
```bash
|
||||
ssh netcup << 'EOF'
|
||||
# Install huggingface-cli
|
||||
pip install huggingface-hub
|
||||
|
||||
# Download Wan2.1 I2V 14B 720p
|
||||
mkdir -p /data/models/video-generation
|
||||
cd /data/models/video-generation
|
||||
|
||||
huggingface-cli download Wan-AI/Wan2.1-I2V-14B-720P \
|
||||
--include "*.safetensors" \
|
||||
--local-dir wan2.1_i2v_14b
|
||||
|
||||
# Check size (~28GB)
|
||||
du -sh wan2.1_i2v_14b
|
||||
EOF
|
||||
```
|
||||
|
||||
**Note:** The Wan2.1 model will be deployed to RunPod, not run locally on CPU.
|
||||
|
||||
### Step 3: Setup RunPod Endpoints
|
||||
|
||||
**3.1 Create RunPod Serverless Endpoints:**
|
||||
|
||||
Go to [RunPod Serverless](https://www.runpod.io/console/serverless) and create endpoints for:
|
||||
|
||||
1. **Text Generation Endpoint** (optional, fallback)
|
||||
- Model: Any LLM (Llama, Mistral, etc.)
|
||||
- GPU: Optional (we use local CPU primarily)
|
||||
|
||||
2. **Image Generation Endpoint**
|
||||
- Model: SDXL or SD3
|
||||
- GPU: A4000/A5000 (good price/performance)
|
||||
- Expected cost: ~$0.02/image
|
||||
|
||||
3. **Video Generation Endpoint**
|
||||
- Model: Wan2.1-I2V-14B-720P
|
||||
- GPU: A100 or H100 (required for video)
|
||||
- Expected cost: ~$0.50/video
|
||||
|
||||
**3.2 Get Endpoint IDs:**
|
||||
|
||||
For each endpoint, copy the endpoint ID from the URL or endpoint details.
|
||||
|
||||
Example: If URL is `https://api.runpod.ai/v2/jqd16o7stu29vq/run`, then `jqd16o7stu29vq` is your endpoint ID.
|
||||
|
||||
**3.3 Update Environment Variables:**
|
||||
|
||||
Update `/opt/ai-orchestrator/.env` with your endpoint IDs:
|
||||
|
||||
```bash
|
||||
ssh netcup "nano /opt/ai-orchestrator/.env"
|
||||
|
||||
# Add your endpoint IDs:
|
||||
RUNPOD_TEXT_ENDPOINT_ID=your_text_endpoint_id
|
||||
RUNPOD_IMAGE_ENDPOINT_ID=your_image_endpoint_id
|
||||
RUNPOD_VIDEO_ENDPOINT_ID=your_video_endpoint_id
|
||||
|
||||
# Restart services
|
||||
cd /opt/ai-orchestrator && docker-compose restart
|
||||
```
|
||||
|
||||
### Step 4: Configure canvas-website
|
||||
|
||||
**4.1 Create .env.local:**
|
||||
|
||||
In your canvas-website directory:
|
||||
|
||||
```bash
|
||||
cd /home/jeffe/Github/canvas-website-branch-worktrees/add-runpod-AI-API
|
||||
|
||||
cat > .env.local << 'EOF'
|
||||
# AI Orchestrator (Primary - Netcup RS 8000)
|
||||
VITE_AI_ORCHESTRATOR_URL=http://159.195.32.209:8000
|
||||
# Or use domain when DNS is configured:
|
||||
# VITE_AI_ORCHESTRATOR_URL=https://ai-api.jeffemmett.com
|
||||
|
||||
# RunPod API (Fallback/Direct Access)
|
||||
VITE_RUNPOD_API_KEY=your_runpod_api_key_here
|
||||
VITE_RUNPOD_TEXT_ENDPOINT_ID=your_text_endpoint_id
|
||||
VITE_RUNPOD_IMAGE_ENDPOINT_ID=your_image_endpoint_id
|
||||
VITE_RUNPOD_VIDEO_ENDPOINT_ID=your_video_endpoint_id
|
||||
|
||||
# Other existing vars...
|
||||
VITE_GOOGLE_CLIENT_ID=your_google_client_id
|
||||
VITE_GOOGLE_MAPS_API_KEY=your_google_maps_api_key
|
||||
VITE_DAILY_DOMAIN=your_daily_domain
|
||||
VITE_TLDRAW_WORKER_URL=your_worker_url
|
||||
EOF
|
||||
```
|
||||
|
||||
**4.2 Install Dependencies:**
|
||||
|
||||
```bash
|
||||
npm install
|
||||
```
|
||||
|
||||
**4.3 Build and Start:**
|
||||
|
||||
```bash
|
||||
# Development
|
||||
npm run dev
|
||||
|
||||
# Production build
|
||||
npm run build
|
||||
npm run start
|
||||
```
|
||||
|
||||
### Step 5: Register Video Generation Tool
|
||||
|
||||
You need to register the VideoGen shape and tool with tldraw. Find where shapes and tools are registered (likely in `src/routes/Board.tsx` or similar):
|
||||
|
||||
**Add to shape utilities array:**
|
||||
```typescript
|
||||
import { VideoGenShapeUtil } from '@/shapes/VideoGenShapeUtil'
|
||||
|
||||
const shapeUtils = [
|
||||
// ... existing shapes
|
||||
VideoGenShapeUtil,
|
||||
]
|
||||
```
|
||||
|
||||
**Add to tools array:**
|
||||
```typescript
|
||||
import { VideoGenTool } from '@/tools/VideoGenTool'
|
||||
|
||||
const tools = [
|
||||
// ... existing tools
|
||||
VideoGenTool,
|
||||
]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 🧪 Testing
|
||||
|
||||
### Test 1: Verify AI Orchestrator
|
||||
|
||||
```bash
|
||||
# Test health endpoint
|
||||
curl http://159.195.32.209:8000/health
|
||||
|
||||
# Expected response:
|
||||
# {"status":"healthy","timestamp":"2025-11-25T12:00:00.000Z"}
|
||||
|
||||
# Test text generation
|
||||
curl -X POST http://159.195.32.209:8000/generate/text \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"prompt": "Write a hello world program in Python",
|
||||
"priority": "normal"
|
||||
}'
|
||||
|
||||
# Expected response:
|
||||
# {"job_id":"abc123","status":"queued","message":"Job queued on local provider"}
|
||||
|
||||
# Check job status
|
||||
curl http://159.195.32.209:8000/job/abc123
|
||||
|
||||
# Check queue status
|
||||
curl http://159.195.32.209:8000/queue/status
|
||||
|
||||
# Check costs
|
||||
curl http://159.195.32.209:8000/costs/summary
|
||||
```
|
||||
|
||||
### Test 2: Test Text Generation in Canvas
|
||||
|
||||
1. Open canvas-website in browser
|
||||
2. Open browser console (F12)
|
||||
3. Look for log messages:
|
||||
- `✅ AI Orchestrator is available at http://159.195.32.209:8000`
|
||||
4. Create a Prompt shape or use arrow LLM action
|
||||
5. Enter a prompt and submit
|
||||
6. Verify response appears
|
||||
7. Check console for routing info:
|
||||
- Should see `Using local Ollama (FREE)`
|
||||
|
||||
### Test 3: Test Image Generation
|
||||
|
||||
**Low Priority (Local CPU - FREE):**
|
||||
|
||||
1. Use ImageGen tool from toolbar
|
||||
2. Click on canvas to create ImageGen shape
|
||||
3. Enter prompt: "A beautiful mountain landscape"
|
||||
4. Select priority: "Low"
|
||||
5. Click "Generate"
|
||||
6. Wait 30-60 seconds
|
||||
7. Verify image appears
|
||||
8. Check console: Should show `Using local Stable Diffusion CPU`
|
||||
|
||||
**High Priority (RunPod GPU - $0.02):**
|
||||
|
||||
1. Create new ImageGen shape
|
||||
2. Enter prompt: "A futuristic city at sunset"
|
||||
3. Select priority: "High"
|
||||
4. Click "Generate"
|
||||
5. Wait 5-10 seconds
|
||||
6. Verify image appears
|
||||
7. Check console: Should show `Using RunPod SDXL`
|
||||
8. Check cost: Should show `~$0.02`
|
||||
|
||||
### Test 4: Test Video Generation
|
||||
|
||||
1. Use VideoGen tool from toolbar
|
||||
2. Click on canvas to create VideoGen shape
|
||||
3. Enter prompt: "A cat walking through a garden"
|
||||
4. Set duration: 3 seconds
|
||||
5. Click "Generate"
|
||||
6. Wait 30-90 seconds
|
||||
7. Verify video appears and plays
|
||||
8. Check console: Should show `Using RunPod Wan2.1`
|
||||
9. Check cost: Should show `~$0.50`
|
||||
10. Test download button
|
||||
|
||||
### Test 5: Test Voice Transcription
|
||||
|
||||
1. Use Transcription tool from toolbar
|
||||
2. Click to create Transcription shape
|
||||
3. Click "Start Recording"
|
||||
4. Speak into microphone
|
||||
5. Click "Stop Recording"
|
||||
6. Verify transcription appears
|
||||
7. Check if using RunPod or local Whisper
|
||||
|
||||
### Test 6: Monitor Costs and Performance
|
||||
|
||||
**Access monitoring dashboards:**
|
||||
|
||||
```bash
|
||||
# API Documentation
|
||||
http://159.195.32.209:8000/docs
|
||||
|
||||
# Queue Status
|
||||
http://159.195.32.209:8000/queue/status
|
||||
|
||||
# Cost Tracking
|
||||
http://159.195.32.209:3000/api/costs/summary
|
||||
|
||||
# Grafana Dashboard
|
||||
http://159.195.32.209:3001
|
||||
# Default login: admin / admin (change this!)
|
||||
```
|
||||
|
||||
**Check daily costs:**
|
||||
|
||||
```bash
|
||||
curl http://159.195.32.209:3000/api/costs/summary
|
||||
```
|
||||
|
||||
Expected response:
|
||||
```json
|
||||
{
|
||||
"today": {
|
||||
"local": 0.00,
|
||||
"runpod": 2.45,
|
||||
"total": 2.45
|
||||
},
|
||||
"this_month": {
|
||||
"local": 0.00,
|
||||
"runpod": 45.20,
|
||||
"total": 45.20
|
||||
},
|
||||
"breakdown": {
|
||||
"text": 0.00,
|
||||
"image": 12.50,
|
||||
"video": 32.70,
|
||||
"code": 0.00
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 🐛 Troubleshooting
|
||||
|
||||
### Issue: AI Orchestrator not available
|
||||
|
||||
**Symptoms:**
|
||||
- Console shows: `⚠️ AI Orchestrator configured but not responding`
|
||||
- Health check fails
|
||||
|
||||
**Solutions:**
|
||||
```bash
|
||||
# 1. Check if services are running
|
||||
ssh netcup "cd /opt/ai-orchestrator && docker-compose ps"
|
||||
|
||||
# 2. Check logs
|
||||
ssh netcup "cd /opt/ai-orchestrator && docker-compose logs -f router"
|
||||
|
||||
# 3. Restart services
|
||||
ssh netcup "cd /opt/ai-orchestrator && docker-compose restart"
|
||||
|
||||
# 4. Check firewall
|
||||
ssh netcup "sudo ufw status"
|
||||
ssh netcup "sudo ufw allow 8000/tcp"
|
||||
```
|
||||
|
||||
### Issue: Image generation fails with "No output found"
|
||||
|
||||
**Symptoms:**
|
||||
- Job completes but no image URL returned
|
||||
- Error: `Job completed but no output data found`
|
||||
|
||||
**Solutions:**
|
||||
1. Check RunPod endpoint configuration
|
||||
2. Verify endpoint handler returns correct format:
|
||||
```json
|
||||
{"output": {"image": "base64_or_url"}}
|
||||
```
|
||||
3. Check endpoint logs in RunPod console
|
||||
4. Test endpoint directly with curl
|
||||
|
||||
### Issue: Video generation timeout
|
||||
|
||||
**Symptoms:**
|
||||
- Job stuck in "processing" state
|
||||
- Timeout after 120 attempts
|
||||
|
||||
**Solutions:**
|
||||
1. Video generation takes 30-90 seconds, ensure patience
|
||||
2. Check RunPod GPU availability (might be cold start)
|
||||
3. Increase timeout in VideoGenShapeUtil if needed
|
||||
4. Check RunPod endpoint logs for errors
|
||||
|
||||
### Issue: High costs
|
||||
|
||||
**Symptoms:**
|
||||
- Monthly costs exceed budget
|
||||
- Too many RunPod requests
|
||||
|
||||
**Solutions:**
|
||||
```bash
|
||||
# 1. Check cost breakdown
|
||||
curl http://159.195.32.209:3000/api/costs/summary
|
||||
|
||||
# 2. Review routing decisions
|
||||
curl http://159.195.32.209:8000/queue/status
|
||||
|
||||
# 3. Adjust routing thresholds
|
||||
# Edit router configuration to prefer local more
|
||||
ssh netcup "nano /opt/ai-orchestrator/services/router/main.py"
|
||||
|
||||
# 4. Set cost alerts
|
||||
ssh netcup "nano /opt/ai-orchestrator/.env"
|
||||
# COST_ALERT_THRESHOLD=50 # Alert if daily cost > $50
|
||||
```
|
||||
|
||||
### Issue: Local models slow or failing
|
||||
|
||||
**Symptoms:**
|
||||
- Text generation slow (>30s)
|
||||
- Image generation very slow (>2min)
|
||||
- Out of memory errors
|
||||
|
||||
**Solutions:**
|
||||
```bash
|
||||
# 1. Check system resources
|
||||
ssh netcup "htop"
|
||||
ssh netcup "free -h"
|
||||
|
||||
# 2. Reduce model size
|
||||
ssh netcup << 'EOF'
|
||||
# Use smaller models
|
||||
docker exec ai-ollama ollama pull llama3:8b # Instead of 70b
|
||||
docker exec ai-ollama ollama pull mistral:7b # Lighter model
|
||||
EOF
|
||||
|
||||
# 3. Limit concurrent workers
|
||||
ssh netcup "nano /opt/ai-orchestrator/docker-compose.yml"
|
||||
# Reduce worker replicas if needed
|
||||
|
||||
# 4. Increase swap (if low RAM)
|
||||
ssh netcup "sudo fallocate -l 8G /swapfile"
|
||||
ssh netcup "sudo chmod 600 /swapfile"
|
||||
ssh netcup "sudo mkswap /swapfile"
|
||||
ssh netcup "sudo swapon /swapfile"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 📊 Performance Expectations
|
||||
|
||||
### Text Generation:
|
||||
- **Local (Llama3-70b)**: 2-10 seconds
|
||||
- **Local (Mistral-7b)**: 1-3 seconds
|
||||
- **RunPod (fallback)**: 3-8 seconds
|
||||
- **Cost**: $0.00 (local) or $0.001-0.01 (RunPod)
|
||||
|
||||
### Image Generation:
|
||||
- **Local SD CPU (low priority)**: 30-60 seconds
|
||||
- **RunPod GPU (high priority)**: 3-10 seconds
|
||||
- **Cost**: $0.00 (local) or $0.02 (RunPod)
|
||||
|
||||
### Video Generation:
|
||||
- **RunPod Wan2.1**: 30-90 seconds
|
||||
- **Cost**: ~$0.50 per video
|
||||
|
||||
### Expected Monthly Costs:
|
||||
|
||||
**Light Usage (100 requests/day):**
|
||||
- 70 text (local): $0
|
||||
- 20 images (15 local + 5 RunPod): $0.10
|
||||
- 10 videos: $5.00
|
||||
- **Total: ~$5-10/month**
|
||||
|
||||
**Medium Usage (500 requests/day):**
|
||||
- 350 text (local): $0
|
||||
- 100 images (60 local + 40 RunPod): $0.80
|
||||
- 50 videos: $25.00
|
||||
- **Total: ~$25-35/month**
|
||||
|
||||
**Heavy Usage (2000 requests/day):**
|
||||
- 1400 text (local): $0
|
||||
- 400 images (200 local + 200 RunPod): $4.00
|
||||
- 200 videos: $100.00
|
||||
- **Total: ~$100-120/month**
|
||||
|
||||
Compare to persistent GPU pod: $200-300/month regardless of usage!
|
||||
|
||||
---
|
||||
|
||||
## 🎯 Next Steps
|
||||
|
||||
1. ✅ Deploy AI Orchestrator on Netcup RS 8000
|
||||
2. ✅ Setup local AI models (Ollama, SD)
|
||||
3. ✅ Configure RunPod endpoints
|
||||
4. ✅ Test all AI services
|
||||
5. 📋 Setup monitoring and alerts
|
||||
6. 📋 Configure DNS for ai-api.jeffemmett.com
|
||||
7. 📋 Setup SSL with Let's Encrypt
|
||||
8. 📋 Migrate canvas-website to Netcup
|
||||
9. 📋 Monitor costs and optimize routing
|
||||
10. 📋 Decommission DigitalOcean droplets
|
||||
|
||||
---
|
||||
|
||||
## 📚 Additional Resources
|
||||
|
||||
- **Migration Plan**: See `NETCUP_MIGRATION_PLAN.md`
|
||||
- **RunPod Setup**: See `RUNPOD_SETUP.md`
|
||||
- **Test Guide**: See `TEST_RUNPOD_AI.md`
|
||||
- **API Documentation**: http://159.195.32.209:8000/docs
|
||||
- **Monitoring**: http://159.195.32.209:3001 (Grafana)
|
||||
|
||||
---
|
||||
|
||||
## 💡 Tips for Cost Optimization
|
||||
|
||||
1. **Prefer low priority for batch jobs**: Use `priority: "low"` for non-urgent tasks
|
||||
2. **Use local models first**: 70-80% of workload can run locally for $0
|
||||
3. **Monitor queue depth**: Auto-scales to RunPod when local is backed up
|
||||
4. **Set cost alerts**: Get notified if daily costs exceed threshold
|
||||
5. **Review cost breakdown weekly**: Identify optimization opportunities
|
||||
6. **Batch similar requests**: Process multiple items together
|
||||
7. **Cache results**: Store and reuse common queries
|
||||
|
||||
---
|
||||
|
||||
**Ready to deploy?** Start with Step 1 and follow the guide! 🚀
|
||||
|
|
@ -0,0 +1,372 @@
|
|||
# AI Services Setup - Complete Summary
|
||||
|
||||
## ✅ What We've Built
|
||||
|
||||
You now have a **complete, production-ready AI orchestration system** that intelligently routes between your Netcup RS 8000 (local CPU - FREE) and RunPod (serverless GPU - pay-per-use).
|
||||
|
||||
---
|
||||
|
||||
## 📦 Files Created/Modified
|
||||
|
||||
### New Files:
|
||||
1. **`NETCUP_MIGRATION_PLAN.md`** - Complete migration plan from DigitalOcean to Netcup
|
||||
2. **`AI_SERVICES_DEPLOYMENT_GUIDE.md`** - Step-by-step deployment and testing guide
|
||||
3. **`src/lib/aiOrchestrator.ts`** - AI Orchestrator client library
|
||||
4. **`src/shapes/VideoGenShapeUtil.tsx`** - Video generation shape (Wan2.1)
|
||||
5. **`src/tools/VideoGenTool.ts`** - Video generation tool
|
||||
|
||||
### Modified Files:
|
||||
1. **`src/shapes/ImageGenShapeUtil.tsx`** - Disabled mock mode (line 13: `USE_MOCK_API = false`)
|
||||
2. **`.env.example`** - Added AI Orchestrator and RunPod configuration
|
||||
|
||||
### Existing Files (Already Working):
|
||||
- `src/lib/runpodApi.ts` - RunPod API client for transcription
|
||||
- `src/utils/llmUtils.ts` - Enhanced LLM utilities with RunPod support
|
||||
- `src/hooks/useWhisperTranscriptionSimple.ts` - WhisperX transcription
|
||||
- `RUNPOD_SETUP.md` - RunPod setup documentation
|
||||
- `TEST_RUNPOD_AI.md` - Testing documentation
|
||||
|
||||
---
|
||||
|
||||
## 🎯 Features & Capabilities
|
||||
|
||||
### 1. Text Generation (LLM)
|
||||
- ✅ Smart routing to local Ollama (FREE)
|
||||
- ✅ Fallback to RunPod if needed
|
||||
- ✅ Works with: Prompt shapes, arrow LLM actions, command palette
|
||||
- ✅ Models: Llama3-70b, CodeLlama-34b, Mistral-7b, etc.
|
||||
- 💰 **Cost: $0** (99% of requests use local CPU)
|
||||
|
||||
### 2. Image Generation
|
||||
- ✅ Priority-based routing:
|
||||
- Low priority → Local SD CPU (slow but FREE)
|
||||
- High priority → RunPod GPU (fast, $0.02)
|
||||
- ✅ Auto-scaling based on queue depth
|
||||
- ✅ ImageGenShapeUtil and ImageGenTool
|
||||
- ✅ Mock mode **DISABLED** - ready for production
|
||||
- 💰 **Cost: $0-0.02** per image
|
||||
|
||||
### 3. Video Generation (NEW!)
|
||||
- ✅ Wan2.1 I2V 14B 720p model on RunPod
|
||||
- ✅ VideoGenShapeUtil with video player
|
||||
- ✅ VideoGenTool for canvas
|
||||
- ✅ Download generated videos
|
||||
- ✅ Configurable duration (1-10 seconds)
|
||||
- 💰 **Cost: ~$0.50** per video
|
||||
|
||||
### 4. Voice Transcription
|
||||
- ✅ WhisperX on RunPod (primary)
|
||||
- ✅ Automatic fallback to local Whisper
|
||||
- ✅ TranscriptionShapeUtil
|
||||
- 💰 **Cost: $0.01-0.05** per transcription
|
||||
|
||||
---
|
||||
|
||||
## 🏗️ Architecture
|
||||
|
||||
```
|
||||
User Request
|
||||
│
|
||||
▼
|
||||
AI Orchestrator (RS 8000)
|
||||
│
|
||||
├─── Text/Code ───────▶ Local Ollama (FREE)
|
||||
│
|
||||
├─── Images (low) ────▶ Local SD CPU (FREE, slow)
|
||||
│
|
||||
├─── Images (high) ───▶ RunPod GPU ($0.02, fast)
|
||||
│
|
||||
└─── Video ───────────▶ RunPod GPU ($0.50)
|
||||
```
|
||||
|
||||
### Smart Routing Benefits:
|
||||
- **70-80% of workload runs for FREE** (local CPU)
|
||||
- **No idle GPU costs** (serverless = pay only when generating)
|
||||
- **Auto-scaling** (queue-based, handles spikes)
|
||||
- **Cost tracking** (per job, per user, per day/month)
|
||||
- **Graceful fallback** (local → RunPod → error)
|
||||
|
||||
---
|
||||
|
||||
## 💰 Cost Analysis
|
||||
|
||||
### Before (DigitalOcean + Persistent GPU):
|
||||
- Main Droplet: $18-36/mo
|
||||
- AI Droplet: $36/mo
|
||||
- RunPod persistent pods: $100-200/mo
|
||||
- **Total: $154-272/mo**
|
||||
|
||||
### After (Netcup RS 8000 + Serverless GPU):
|
||||
- RS 8000 G12 Pro: €55.57/mo (~$60/mo)
|
||||
- RunPod serverless: $30-60/mo (70% reduction)
|
||||
- **Total: $90-120/mo**
|
||||
|
||||
### Savings:
|
||||
- **Monthly: $64-152**
|
||||
- **Annual: $768-1,824**
|
||||
|
||||
### Plus You Get:
|
||||
- 10x CPU cores (20 vs 2)
|
||||
- 32x RAM (64GB vs 2GB)
|
||||
- 25x storage (3TB vs 120GB)
|
||||
- Better EU latency (Germany)
|
||||
|
||||
---
|
||||
|
||||
## 📋 Quick Start Checklist
|
||||
|
||||
### Phase 1: Deploy AI Orchestrator (1-2 hours)
|
||||
- [ ] SSH into Netcup RS 8000: `ssh netcup`
|
||||
- [ ] Create directory: `/opt/ai-orchestrator`
|
||||
- [ ] Deploy docker-compose stack (see NETCUP_MIGRATION_PLAN.md Phase 2)
|
||||
- [ ] Configure environment variables (.env)
|
||||
- [ ] Start services: `docker-compose up -d`
|
||||
- [ ] Verify: `curl http://localhost:8000/health`
|
||||
|
||||
### Phase 2: Setup Local AI Models (2-4 hours)
|
||||
- [ ] Download Ollama models (Llama3-70b, CodeLlama-34b)
|
||||
- [ ] Download Stable Diffusion 2.1 weights
|
||||
- [ ] Download Wan2.1 model weights (optional, runs on RunPod)
|
||||
- [ ] Test Ollama: `docker exec ai-ollama ollama run llama3:70b "Hello"`
|
||||
|
||||
### Phase 3: Configure RunPod Endpoints (30 min)
|
||||
- [ ] Create text generation endpoint (optional)
|
||||
- [ ] Create image generation endpoint (SDXL)
|
||||
- [ ] Create video generation endpoint (Wan2.1)
|
||||
- [ ] Copy endpoint IDs
|
||||
- [ ] Update .env with endpoint IDs
|
||||
- [ ] Restart services: `docker-compose restart`
|
||||
|
||||
### Phase 4: Configure canvas-website (15 min)
|
||||
- [ ] Create `.env.local` with AI Orchestrator URL
|
||||
- [ ] Add RunPod API keys (fallback)
|
||||
- [ ] Install dependencies: `npm install`
|
||||
- [ ] Register VideoGenShapeUtil and VideoGenTool (see deployment guide)
|
||||
- [ ] Build: `npm run build`
|
||||
- [ ] Start: `npm run dev`
|
||||
|
||||
### Phase 5: Test Everything (1 hour)
|
||||
- [ ] Test AI Orchestrator health check
|
||||
- [ ] Test text generation (local Ollama)
|
||||
- [ ] Test image generation (low priority - local)
|
||||
- [ ] Test image generation (high priority - RunPod)
|
||||
- [ ] Test video generation (RunPod Wan2.1)
|
||||
- [ ] Test voice transcription (WhisperX)
|
||||
- [ ] Check cost tracking dashboard
|
||||
- [ ] Monitor queue status
|
||||
|
||||
### Phase 6: Production Deployment (2-4 hours)
|
||||
- [ ] Setup nginx reverse proxy
|
||||
- [ ] Configure DNS: ai-api.jeffemmett.com → 159.195.32.209
|
||||
- [ ] Setup SSL with Let's Encrypt
|
||||
- [ ] Deploy canvas-website to RS 8000
|
||||
- [ ] Setup monitoring dashboards (Grafana)
|
||||
- [ ] Configure cost alerts
|
||||
- [ ] Test from production domain
|
||||
|
||||
---
|
||||
|
||||
## 🧪 Testing Commands
|
||||
|
||||
### Test AI Orchestrator:
|
||||
```bash
|
||||
# Health check
|
||||
curl http://159.195.32.209:8000/health
|
||||
|
||||
# Text generation
|
||||
curl -X POST http://159.195.32.209:8000/generate/text \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"prompt":"Hello world in Python","priority":"normal"}'
|
||||
|
||||
# Image generation (low priority)
|
||||
curl -X POST http://159.195.32.209:8000/generate/image \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"prompt":"A beautiful sunset","priority":"low"}'
|
||||
|
||||
# Video generation
|
||||
curl -X POST http://159.195.32.209:8000/generate/video \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"prompt":"A cat walking","duration":3}'
|
||||
|
||||
# Queue status
|
||||
curl http://159.195.32.209:8000/queue/status
|
||||
|
||||
# Costs
|
||||
curl http://159.195.32.209:3000/api/costs/summary
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 📊 Monitoring Dashboards
|
||||
|
||||
Access your monitoring at:
|
||||
|
||||
- **API Docs**: http://159.195.32.209:8000/docs
|
||||
- **Queue Status**: http://159.195.32.209:8000/queue/status
|
||||
- **Cost Tracking**: http://159.195.32.209:3000/api/costs/summary
|
||||
- **Grafana**: http://159.195.32.209:3001 (login: admin/admin)
|
||||
- **Prometheus**: http://159.195.32.209:9090
|
||||
|
||||
---
|
||||
|
||||
## 🔧 Configuration Files
|
||||
|
||||
### Environment Variables (.env.local):
|
||||
```bash
|
||||
# AI Orchestrator (Primary)
|
||||
VITE_AI_ORCHESTRATOR_URL=http://159.195.32.209:8000
|
||||
|
||||
# RunPod (Fallback)
|
||||
VITE_RUNPOD_API_KEY=your_api_key
|
||||
VITE_RUNPOD_TEXT_ENDPOINT_ID=xxx
|
||||
VITE_RUNPOD_IMAGE_ENDPOINT_ID=xxx
|
||||
VITE_RUNPOD_VIDEO_ENDPOINT_ID=xxx
|
||||
```
|
||||
|
||||
### AI Orchestrator (.env on RS 8000):
|
||||
```bash
|
||||
# PostgreSQL
|
||||
POSTGRES_PASSWORD=generated_password
|
||||
|
||||
# RunPod
|
||||
RUNPOD_API_KEY=your_api_key
|
||||
RUNPOD_TEXT_ENDPOINT_ID=xxx
|
||||
RUNPOD_IMAGE_ENDPOINT_ID=xxx
|
||||
RUNPOD_VIDEO_ENDPOINT_ID=xxx
|
||||
|
||||
# Monitoring
|
||||
GRAFANA_PASSWORD=generated_password
|
||||
COST_ALERT_THRESHOLD=100
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 🐛 Common Issues & Solutions
|
||||
|
||||
### 1. "AI Orchestrator not available"
|
||||
```bash
|
||||
# Check if running
|
||||
ssh netcup "cd /opt/ai-orchestrator && docker-compose ps"
|
||||
|
||||
# Restart
|
||||
ssh netcup "cd /opt/ai-orchestrator && docker-compose restart"
|
||||
|
||||
# Check logs
|
||||
ssh netcup "cd /opt/ai-orchestrator && docker-compose logs -f router"
|
||||
```
|
||||
|
||||
### 2. "Image generation fails"
|
||||
- Check RunPod endpoint configuration
|
||||
- Verify endpoint returns: `{"output": {"image": "url"}}`
|
||||
- Test endpoint directly in RunPod console
|
||||
|
||||
### 3. "Video generation timeout"
|
||||
- Normal processing time: 30-90 seconds
|
||||
- Check RunPod GPU availability (cold start can add 30s)
|
||||
- Verify Wan2.1 endpoint is deployed correctly
|
||||
|
||||
### 4. "High costs"
|
||||
```bash
|
||||
# Check cost breakdown
|
||||
curl http://159.195.32.209:3000/api/costs/summary
|
||||
|
||||
# Adjust routing to prefer local more
|
||||
# Edit /opt/ai-orchestrator/services/router/main.py
|
||||
# Increase queue_depth threshold from 10 to 20+
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 📚 Documentation Index
|
||||
|
||||
1. **NETCUP_MIGRATION_PLAN.md** - Complete migration guide (8 phases)
|
||||
2. **AI_SERVICES_DEPLOYMENT_GUIDE.md** - Deployment and testing guide
|
||||
3. **AI_SERVICES_SUMMARY.md** - This file (quick reference)
|
||||
4. **RUNPOD_SETUP.md** - RunPod WhisperX setup
|
||||
5. **TEST_RUNPOD_AI.md** - Testing guide for RunPod integration
|
||||
|
||||
---
|
||||
|
||||
## 🎯 Next Actions
|
||||
|
||||
**Immediate (Today):**
|
||||
1. Review the migration plan (NETCUP_MIGRATION_PLAN.md)
|
||||
2. Verify SSH access to Netcup RS 8000
|
||||
3. Get RunPod API keys and endpoint IDs
|
||||
|
||||
**This Week:**
|
||||
1. Deploy AI Orchestrator on Netcup (Phase 2)
|
||||
2. Download local AI models (Phase 3)
|
||||
3. Configure RunPod endpoints
|
||||
4. Test basic functionality
|
||||
|
||||
**Next Week:**
|
||||
1. Full testing of all AI services
|
||||
2. Deploy canvas-website to Netcup
|
||||
3. Setup monitoring and alerts
|
||||
4. Configure DNS and SSL
|
||||
|
||||
**Future:**
|
||||
1. Migrate remaining services from DigitalOcean
|
||||
2. Decommission DigitalOcean droplets
|
||||
3. Optimize costs based on usage patterns
|
||||
4. Scale workers based on demand
|
||||
|
||||
---
|
||||
|
||||
## 💡 Pro Tips
|
||||
|
||||
1. **Start small**: Deploy text generation first, then images, then video
|
||||
2. **Monitor costs daily**: Use the cost dashboard to track spending
|
||||
3. **Use low priority for batch jobs**: Save 100% on images that aren't urgent
|
||||
4. **Cache common results**: Store and reuse frequent queries
|
||||
5. **Set cost alerts**: Get email when daily costs exceed threshold
|
||||
6. **Test locally first**: Use mock API during development
|
||||
7. **Review queue depths**: Optimize routing thresholds based on your usage
|
||||
|
||||
---
|
||||
|
||||
## 🚀 Expected Performance
|
||||
|
||||
### Text Generation:
|
||||
- **Latency**: 2-10s (local), 3-8s (RunPod)
|
||||
- **Throughput**: 10-20 requests/min (local)
|
||||
- **Cost**: $0 (local), $0.001-0.01 (RunPod)
|
||||
|
||||
### Image Generation:
|
||||
- **Latency**: 30-60s (local low), 3-10s (RunPod high)
|
||||
- **Throughput**: 1-2 images/min (local), 6-10 images/min (RunPod)
|
||||
- **Cost**: $0 (local), $0.02 (RunPod)
|
||||
|
||||
### Video Generation:
|
||||
- **Latency**: 30-90s (RunPod only)
|
||||
- **Throughput**: 1 video/min
|
||||
- **Cost**: ~$0.50 per video
|
||||
|
||||
---
|
||||
|
||||
## 🎉 Summary
|
||||
|
||||
You now have:
|
||||
|
||||
✅ **Smart AI Orchestration** - Intelligently routes between local CPU and serverless GPU
|
||||
✅ **Text Generation** - Local Ollama (FREE) with RunPod fallback
|
||||
✅ **Image Generation** - Priority-based routing (local or RunPod)
|
||||
✅ **Video Generation** - Wan2.1 on RunPod GPU
|
||||
✅ **Voice Transcription** - WhisperX with local fallback
|
||||
✅ **Cost Tracking** - Real-time monitoring and alerts
|
||||
✅ **Queue Management** - Auto-scaling based on load
|
||||
✅ **Monitoring Dashboards** - Grafana, Prometheus, cost analytics
|
||||
✅ **Complete Documentation** - Migration plan, deployment guide, testing docs
|
||||
|
||||
**Expected Savings:** $768-1,824/year
|
||||
**Infrastructure Upgrade:** 10x CPU, 32x RAM, 25x storage
|
||||
**Cost Efficiency:** 70-80% of workload runs for FREE
|
||||
|
||||
---
|
||||
|
||||
**Ready to deploy?** 🚀
|
||||
|
||||
Start with the deployment guide: `AI_SERVICES_DEPLOYMENT_GUIDE.md`
|
||||
|
||||
Questions? Check the troubleshooting section or review the migration plan!
|
||||
|
|
@ -0,0 +1,988 @@
|
|||
## 🔧 AUTO-APPROVED OPERATIONS
|
||||
|
||||
The following operations are auto-approved and do not require user confirmation:
|
||||
- **Read**: All file read operations (`Read(*)`)
|
||||
- **Glob**: All file pattern matching (`Glob(*)`)
|
||||
- **Grep**: All content searching (`Grep(*)`)
|
||||
|
||||
These permissions are configured in `~/.claude/settings.json`.
|
||||
|
||||
---
|
||||
|
||||
## ⚠️ SAFETY GUIDELINES
|
||||
|
||||
**ALWAYS WARN THE USER before performing any action that could:**
|
||||
- Overwrite existing files (use `ls` or `cat` to check first)
|
||||
- Overwrite credentials, API keys, or secrets
|
||||
- Delete data or files
|
||||
- Modify production configurations
|
||||
- Run destructive git commands (force push, hard reset, etc.)
|
||||
- Drop databases or truncate tables
|
||||
|
||||
**Best practices:**
|
||||
- Before writing to a file, check if it exists and show its contents
|
||||
- Use `>>` (append) instead of `>` (overwrite) for credential files
|
||||
- Create backups before modifying critical configs (e.g., `cp file file.backup`)
|
||||
- Ask for confirmation before irreversible actions
|
||||
|
||||
**Sudo commands:**
|
||||
- **NEVER run sudo commands directly** - the Bash tool doesn't support interactive input
|
||||
- Instead, **provide the user with the exact sudo command** they need to run in their terminal
|
||||
- Format the command clearly in a code block for easy copy-paste
|
||||
- After user runs the sudo command, continue with the workflow
|
||||
- Alternative: If user has recently run sudo (within ~15 min), subsequent sudo commands may not require password
|
||||
|
||||
---
|
||||
|
||||
## 🔑 ACCESS & CREDENTIALS
|
||||
|
||||
### Version Control & Code Hosting
|
||||
- **Gitea**: Self-hosted at `gitea.jeffemmett.com` - PRIMARY repository
|
||||
- Push here FIRST, then mirror to GitHub
|
||||
- Private repos and source of truth
|
||||
- SSH Key: `~/.ssh/gitea_ed25519` (private), `~/.ssh/gitea_ed25519.pub` (public)
|
||||
- Public Key: `ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIE2+2UZElEYptgZ9GFs2CXW0PIA57BfQcU9vlyV6fz4 gitea@jeffemmett.com`
|
||||
- **Gitea CLI (tea)**: ✅ Installed at `~/bin/tea` (added to PATH)
|
||||
|
||||
- **GitHub**: Public mirror and collaboration
|
||||
- Receives pushes from Gitea via mirror sync
|
||||
- Token: `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`
|
||||
- SSH Key: `~/.ssh/github_deploy_key` (private), `~/.ssh/github_deploy_key.pub` (public)
|
||||
- **GitHub CLI (gh)**: ✅ Installed and available for PR/issue management
|
||||
|
||||
### Git Workflow
|
||||
**Two-way sync between Gitea and GitHub:**
|
||||
|
||||
**Gitea-Primary Repos (Default):**
|
||||
1. Develop locally in `/home/jeffe/Github/`
|
||||
2. Commit and push to Gitea first
|
||||
3. Gitea automatically mirrors TO GitHub (built-in push mirror)
|
||||
4. GitHub used for public collaboration and visibility
|
||||
|
||||
**GitHub-Primary Repos (Mirror Repos):**
|
||||
For repos where GitHub is source of truth (v0.dev exports, client collabs):
|
||||
1. Push to GitHub
|
||||
2. Deploy webhook pulls from GitHub and deploys
|
||||
3. Webhook triggers Gitea to sync FROM GitHub
|
||||
|
||||
### 🔀 DEV BRANCH WORKFLOW (MANDATORY)
|
||||
|
||||
**CRITICAL: All development work on canvas-website (and other active projects) MUST use a dev branch.**
|
||||
|
||||
#### Branch Strategy
|
||||
```
|
||||
main (production)
|
||||
└── dev (integration/staging)
|
||||
└── feature/* (optional feature branches)
|
||||
```
|
||||
|
||||
#### Development Rules
|
||||
|
||||
1. **ALWAYS work on the `dev` branch** for new features and changes:
|
||||
```bash
|
||||
cd /home/jeffe/Github/canvas-website
|
||||
git checkout dev
|
||||
git pull origin dev
|
||||
```
|
||||
|
||||
2. **After completing a feature**, push to dev:
|
||||
```bash
|
||||
git add .
|
||||
git commit -m "feat: description of changes"
|
||||
git push origin dev
|
||||
```
|
||||
|
||||
3. **Update backlog task** immediately after pushing:
|
||||
```bash
|
||||
backlog task edit <task-id> --status "Done" --append-notes "Pushed to dev branch"
|
||||
```
|
||||
|
||||
4. **NEVER push directly to main** - main is for tested, verified features only
|
||||
|
||||
5. **Merge dev → main manually** when features are verified working:
|
||||
```bash
|
||||
git checkout main
|
||||
git pull origin main
|
||||
git merge dev
|
||||
git push origin main
|
||||
git checkout dev # Return to dev for continued work
|
||||
```
|
||||
|
||||
#### Complete Feature Deployment Checklist
|
||||
|
||||
- [ ] Work on `dev` branch (not main)
|
||||
- [ ] Test locally before committing
|
||||
- [ ] Commit with descriptive message
|
||||
- [ ] Push to `dev` branch on Gitea
|
||||
- [ ] Update backlog task status to "Done"
|
||||
- [ ] Add notes to backlog task about what was implemented
|
||||
- [ ] (Later) When verified working: merge dev → main manually
|
||||
|
||||
#### Why This Matters
|
||||
- **Protects production**: main branch always has known-working code
|
||||
- **Enables testing**: dev branch can be deployed to staging for verification
|
||||
- **Clean history**: main only gets complete, tested features
|
||||
- **Easy rollback**: if dev breaks, main is still stable
|
||||
|
||||
### Server Infrastructure
|
||||
- **Netcup RS 8000 G12 Pro**: Primary application & AI server
|
||||
- IP: `159.195.32.209`
|
||||
- 20 cores, 64GB RAM, 3TB storage
|
||||
- Hosts local AI models (Ollama, Stable Diffusion)
|
||||
- All websites and apps deployed here in Docker containers
|
||||
- Location: Germany (low latency EU)
|
||||
- SSH Key (local): `~/.ssh/netcup_ed25519` (private), `~/.ssh/netcup_ed25519.pub` (public)
|
||||
- Public Key: `ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKmp4A2klKv/YIB1C6JAsb2UzvlzzE+0EcJ0jtkyFuhO netcup-rs8000@jeffemmett.com`
|
||||
- SSH Access: `ssh netcup`
|
||||
- **SSH Keys ON the server** (for git operations):
|
||||
- Gitea: `~/.ssh/gitea_ed25519` → `ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIE2+2UZElEYptgZ9GFs2CXW0PIA57BfQcU9vlyV6fz4 gitea@jeffemmett.com`
|
||||
- GitHub: `~/.ssh/github_ed25519` → `ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIC6xXNICy0HXnqHO+U7+y7ui+pZBGe0bm0iRMS23pR1E github-deploy@netcup-rs8000`
|
||||
|
||||
- **RunPod**: GPU burst capacity for AI workloads
|
||||
- Host: `ssh.runpod.io`
|
||||
- Serverless GPU pods (pay-per-use)
|
||||
- Used for: SDXL/SD3, video generation, training
|
||||
- Smart routing from RS 8000 orchestrator
|
||||
- SSH Key: `~/.ssh/runpod_ed25519` (private), `~/.ssh/runpod_ed25519.pub` (public)
|
||||
- Public Key: `ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAC7NYjI0U/2ChGaZBBWP7gKt/V12Ts6FgatinJOQ8JG runpod@jeffemmett.com`
|
||||
- SSH Access: `ssh runpod`
|
||||
- **API Key**: `rpa_YYOARL5MEBTTKKWGABRKTW2CVHQYRBTOBZNSGIL3lwwfdz`
|
||||
- **CLI Config**: `~/.runpod/config.toml`
|
||||
- **Serverless Endpoints**:
|
||||
- Image (SD): `tzf1j3sc3zufsy` (Automatic1111)
|
||||
- Video (Wan2.2): `4jql4l7l0yw0f3`
|
||||
- Text (vLLM): `03g5hz3hlo8gr2`
|
||||
- Whisper: `lrtisuv8ixbtub`
|
||||
- ComfyUI: `5zurj845tbf8he`
|
||||
|
||||
### API Keys & Services
|
||||
|
||||
**IMPORTANT**: All API keys and tokens are stored securely on the Netcup server. Never store credentials locally.
|
||||
- Access credentials via: `ssh netcup "cat ~/.cloudflare-credentials.env"` or `ssh netcup "cat ~/.porkbun_credentials"`
|
||||
- All API operations should be performed FROM the Netcup server, not locally
|
||||
|
||||
#### Credential Files on Netcup (`/root/`)
|
||||
| File | Contents |
|
||||
|------|----------|
|
||||
| `~/.cloudflare-credentials.env` | Cloudflare API tokens, account ID, tunnel token |
|
||||
| `~/.cloudflare_credentials` | Legacy/DNS token |
|
||||
| `~/.porkbun_credentials` | Porkbun API key and secret |
|
||||
| `~/.v0_credentials` | V0.dev API key |
|
||||
|
||||
#### Cloudflare
|
||||
- **Account ID**: `0e7b3338d5278ed1b148e6456b940913`
|
||||
- **Tokens stored on Netcup** - source `~/.cloudflare-credentials.env`:
|
||||
- `CLOUDFLARE_API_TOKEN` - Zone read, Worker:read/edit, R2:read/edit
|
||||
- `CLOUDFLARE_TUNNEL_TOKEN` - Tunnel management
|
||||
- `CLOUDFLARE_ZONE_TOKEN` - Zone:Edit, DNS:Edit (for adding domains)
|
||||
|
||||
#### Porkbun (Domain Registrar)
|
||||
- **Credentials stored on Netcup** - source `~/.porkbun_credentials`:
|
||||
- `PORKBUN_API_KEY` and `PORKBUN_SECRET_KEY`
|
||||
- **API Endpoint**: `https://api-ipv4.porkbun.com/api/json/v3/`
|
||||
- **API Docs**: https://porkbun.com/api/json/v3/documentation
|
||||
- **Important**: JSON must have `secretapikey` before `apikey` in requests
|
||||
- **Capabilities**: Update nameservers, get auth codes for transfers, manage DNS
|
||||
- **Note**: Each domain must have "API Access" enabled individually in Porkbun dashboard
|
||||
|
||||
#### Domain Onboarding Workflow (Porkbun → Cloudflare)
|
||||
Run these commands FROM Netcup (`ssh netcup`):
|
||||
1. Add domain to Cloudflare (creates zone, returns nameservers)
|
||||
2. Update nameservers at Porkbun to point to Cloudflare
|
||||
3. Add CNAME record pointing to Cloudflare tunnel
|
||||
4. Add hostname to tunnel config and restart cloudflared
|
||||
5. Domain is live through the tunnel!
|
||||
|
||||
#### V0.dev (AI UI Generation)
|
||||
- **Credentials stored on Netcup** - source `~/.v0_credentials`:
|
||||
- `V0_API_KEY` - Platform API access
|
||||
- **API Key**: `v1:5AwJbit4j9rhGcAKPU4XlVWs:05vyCcJLiWRVQW7Xu4u5E03G`
|
||||
- **SDK**: `npm install v0-sdk` (use `v0` CLI for adding components)
|
||||
- **Docs**: https://v0.app/docs/v0-platform-api
|
||||
- **Capabilities**:
|
||||
- List/create/update/delete projects
|
||||
- Manage chats and versions
|
||||
- Download generated code
|
||||
- Create deployments
|
||||
- Manage environment variables
|
||||
- **Limitations**: GitHub-only for git integration (no Gitea/GitLab support)
|
||||
- **Usage**:
|
||||
```javascript
|
||||
const { v0 } = require('v0-sdk');
|
||||
// Uses V0_API_KEY env var automatically
|
||||
const projects = await v0.projects.find();
|
||||
const chats = await v0.chats.find();
|
||||
```
|
||||
|
||||
#### Other Services
|
||||
- **HuggingFace**: CLI access available for model downloads
|
||||
- **RunPod**: API access for serverless GPU orchestration (see Server Infrastructure above)
|
||||
|
||||
### Dev Ops Stack & Principles
|
||||
- **Platform**: Linux WSL2 (Ubuntu on Windows) for development
|
||||
- **Working Directory**: `/home/jeffe/Github`
|
||||
- **Container Strategy**:
|
||||
- ALL repos should be Dockerized
|
||||
- Optimized containers for production deployment
|
||||
- Docker Compose for multi-service orchestration
|
||||
- **Process Management**: PM2 available for Node.js services
|
||||
- **Version Control**: Git configured with GitHub + Gitea mirrors
|
||||
- **Package Managers**: npm/pnpm/yarn available
|
||||
|
||||
### 🚀 Traefik Reverse Proxy (Central Routing)
|
||||
All HTTP services on Netcup RS 8000 route through Traefik for automatic service discovery.
|
||||
|
||||
**Architecture:**
|
||||
```
|
||||
Internet → Cloudflare Tunnel → Traefik (:80/:443) → Docker Services
|
||||
│
|
||||
├── gitea.jeffemmett.com → gitea:3000
|
||||
├── mycofi.earth → mycofi:3000
|
||||
├── games.jeffemmett.com → games:80
|
||||
└── [auto-discovered via Docker labels]
|
||||
```
|
||||
|
||||
**Location:** `/root/traefik/` on Netcup RS 8000
|
||||
|
||||
**Adding a New Service:**
|
||||
```yaml
|
||||
# In your docker-compose.yml, add these labels:
|
||||
services:
|
||||
myapp:
|
||||
image: myapp:latest
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.myapp.rule=Host(`myapp.jeffemmett.com`)"
|
||||
- "traefik.http.services.myapp.loadbalancer.server.port=3000"
|
||||
networks:
|
||||
- traefik-public
|
||||
networks:
|
||||
traefik-public:
|
||||
external: true
|
||||
```
|
||||
|
||||
**Traefik Dashboard:** `http://159.195.32.209:8888` (internal only)
|
||||
|
||||
**SSH Git Access:**
|
||||
- SSH goes direct (not through Traefik): `git.jeffemmett.com:223` → `159.195.32.209:223`
|
||||
- Web UI goes through Traefik: `gitea.jeffemmett.com` → Traefik → gitea:3000
|
||||
|
||||
### ☁️ Cloudflare Tunnel Configuration
|
||||
**Location:** `/root/cloudflared/` on Netcup RS 8000
|
||||
|
||||
The tunnel uses a token-based configuration managed via Cloudflare Zero Trust Dashboard.
|
||||
All public hostnames should point to `http://localhost:80` (Traefik), which routes based on Host header.
|
||||
|
||||
**Managed hostnames:**
|
||||
- `gitea.jeffemmett.com` → Traefik → Gitea
|
||||
- `photos.jeffemmett.com` → Traefik → Immich
|
||||
- `movies.jeffemmett.com` → Traefik → Jellyfin
|
||||
- `search.jeffemmett.com` → Traefik → Semantic Search
|
||||
- `mycofi.earth` → Traefik → MycoFi
|
||||
- `games.jeffemmett.com` → Traefik → Games Platform
|
||||
- `decolonizeti.me` → Traefik → Decolonize Time
|
||||
|
||||
**Tunnel ID:** `a838e9dc-0af5-4212-8af2-6864eb15e1b5`
|
||||
**Tunnel CNAME Target:** `a838e9dc-0af5-4212-8af2-6864eb15e1b5.cfargotunnel.com`
|
||||
|
||||
**To deploy a new website/service:**
|
||||
|
||||
1. **Dockerize the project** with Traefik labels in `docker-compose.yml`:
|
||||
```yaml
|
||||
services:
|
||||
myapp:
|
||||
build: .
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.myapp.rule=Host(`mydomain.com`) || Host(`www.mydomain.com`)"
|
||||
- "traefik.http.services.myapp.loadbalancer.server.port=3000"
|
||||
networks:
|
||||
- traefik-public
|
||||
networks:
|
||||
traefik-public:
|
||||
external: true
|
||||
```
|
||||
|
||||
2. **Deploy to Netcup:**
|
||||
```bash
|
||||
ssh netcup "cd /opt/websites && git clone <repo-url>"
|
||||
ssh netcup "cd /opt/websites/<project> && docker compose up -d --build"
|
||||
```
|
||||
|
||||
3. **Add hostname to tunnel config** (`/root/cloudflared/config.yml`):
|
||||
```yaml
|
||||
- hostname: mydomain.com
|
||||
service: http://localhost:80
|
||||
- hostname: www.mydomain.com
|
||||
service: http://localhost:80
|
||||
```
|
||||
Then restart: `ssh netcup "docker restart cloudflared"`
|
||||
|
||||
4. **Configure DNS in Cloudflare dashboard** (CRITICAL - prevents 525 SSL errors):
|
||||
- Go to Cloudflare Dashboard → select domain → DNS → Records
|
||||
- Delete any existing A/AAAA records for `@` and `www`
|
||||
- Add CNAME records:
|
||||
| Type | Name | Target | Proxy |
|
||||
|------|------|--------|-------|
|
||||
| CNAME | `@` | `a838e9dc-0af5-4212-8af2-6864eb15e1b5.cfargotunnel.com` | Proxied ✓ |
|
||||
| CNAME | `www` | `a838e9dc-0af5-4212-8af2-6864eb15e1b5.cfargotunnel.com` | Proxied ✓ |
|
||||
|
||||
**API Credentials** (on Netcup at `~/.cloudflare*`):
|
||||
- `CLOUDFLARE_API_TOKEN` - Zone read access only
|
||||
- `CLOUDFLARE_TUNNEL_TOKEN` - Tunnel management only
|
||||
- See **API Keys & Services** section above for Domain Management Token (required for DNS automation)
|
||||
|
||||
### 🔄 Auto-Deploy Webhook System
|
||||
**Location:** `/opt/deploy-webhook/` on Netcup RS 8000
|
||||
**Endpoint:** `https://deploy.jeffemmett.com/deploy/<repo-name>`
|
||||
**Secret:** `gitea-deploy-secret-2025`
|
||||
|
||||
Pushes to Gitea automatically trigger rebuilds. The webhook receiver:
|
||||
1. Validates HMAC signature from Gitea
|
||||
2. Runs `git pull && docker compose up -d --build`
|
||||
3. Returns build status
|
||||
|
||||
**Adding a new repo to auto-deploy:**
|
||||
1. Add entry to `/opt/deploy-webhook/webhook.py` REPOS dict
|
||||
2. Restart: `ssh netcup "cd /opt/deploy-webhook && docker compose up -d --build"`
|
||||
3. Add Gitea webhook:
|
||||
```bash
|
||||
curl -X POST "https://gitea.jeffemmett.com/api/v1/repos/jeffemmett/<repo>/hooks" \
|
||||
-H "Authorization: token <gitea-token>" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"type":"gitea","active":true,"events":["push"],"config":{"url":"https://deploy.jeffemmett.com/deploy/<repo>","content_type":"json","secret":"gitea-deploy-secret-2025"}}'
|
||||
```
|
||||
|
||||
**Currently auto-deploying:**
|
||||
- `decolonize-time-website` → /opt/websites/decolonize-time-website
|
||||
- `mycofi-earth-website` → /opt/websites/mycofi-earth-website
|
||||
- `games-platform` → /opt/apps/games-platform
|
||||
|
||||
### 🔐 SSH Keys Quick Reference
|
||||
|
||||
**Local keys** (in `~/.ssh/` on your laptop):
|
||||
|
||||
| Service | Private Key | Public Key | Purpose |
|
||||
|---------|-------------|------------|---------|
|
||||
| **Gitea** | `gitea_ed25519` | `gitea_ed25519.pub` | Primary git repository |
|
||||
| **GitHub** | `github_deploy_key` | `github_deploy_key.pub` | Public mirror sync |
|
||||
| **Netcup RS 8000** | `netcup_ed25519` | `netcup_ed25519.pub` | Primary server SSH |
|
||||
| **RunPod** | `runpod_ed25519` | `runpod_ed25519.pub` | GPU pods SSH |
|
||||
| **Default** | `id_ed25519` | `id_ed25519.pub` | General purpose/legacy |
|
||||
|
||||
**Server-side keys** (in `/root/.ssh/` on Netcup RS 8000):
|
||||
|
||||
| Service | Key File | Purpose |
|
||||
|---------|----------|---------|
|
||||
| **Gitea** | `gitea_ed25519` | Server pulls from Gitea repos |
|
||||
| **GitHub** | `github_ed25519` | Server pulls from GitHub (mirror repos) |
|
||||
|
||||
**SSH Config**: `~/.ssh/config` contains all host configurations
|
||||
**Quick Access**:
|
||||
- `ssh netcup` - Connect to Netcup RS 8000
|
||||
- `ssh runpod` - Connect to RunPod
|
||||
- `ssh gitea.jeffemmett.com` - Git operations
|
||||
|
||||
---
|
||||
|
||||
## 🤖 AI ORCHESTRATION ARCHITECTURE
|
||||
|
||||
### Smart Routing Strategy
|
||||
All AI requests go through intelligent orchestration layer on RS 8000:
|
||||
|
||||
**Routing Logic:**
|
||||
- **Text/Code (70-80% of workload)**: Always local RS 8000 CPU (Ollama) → FREE
|
||||
- **Images - Low Priority**: RS 8000 CPU (SD 1.5/2.1) → FREE but slow (~60s)
|
||||
- **Images - High Priority**: RunPod GPU (SDXL/SD3) → $0.02/image, fast
|
||||
- **Video Generation**: Always RunPod GPU → $0.50/video (only option)
|
||||
- **Training/Fine-tuning**: RunPod GPU on-demand
|
||||
|
||||
**Queue System:**
|
||||
- Redis-based queues: text, image, code, video
|
||||
- Priority-based routing (low/normal/high)
|
||||
- Worker pools scale based on load
|
||||
- Cost tracking per job, per user
|
||||
|
||||
**Cost Optimization:**
|
||||
- Target: $90-120/mo (vs $136-236/mo current)
|
||||
- Savings: $552-1,392/year
|
||||
- 70-80% of workload FREE (local CPU)
|
||||
- GPU only when needed (serverless = no idle costs)
|
||||
|
||||
### Deployment Architecture
|
||||
```
|
||||
RS 8000 G12 Pro (Netcup)
|
||||
├── Cloudflare Tunnel (secure ingress)
|
||||
├── Traefik Reverse Proxy (auto-discovery)
|
||||
│ └── Routes to all services via Docker labels
|
||||
├── Core Services
|
||||
│ ├── Gitea (git hosting) - gitea.jeffemmett.com
|
||||
│ └── Other internal tools
|
||||
├── AI Services
|
||||
│ ├── Ollama (text/code models)
|
||||
│ ├── Stable Diffusion (CPU fallback)
|
||||
│ └── Smart Router API (FastAPI)
|
||||
├── Queue Infrastructure
|
||||
│ ├── Redis (job queues)
|
||||
│ └── PostgreSQL (job history/analytics)
|
||||
├── Monitoring
|
||||
│ ├── Prometheus (metrics)
|
||||
│ ├── Grafana (dashboards)
|
||||
│ └── Cost tracking API
|
||||
└── Application Hosting
|
||||
├── All websites (Dockerized + Traefik labels)
|
||||
├── All apps (Dockerized + Traefik labels)
|
||||
└── Backend services (Dockerized)
|
||||
|
||||
RunPod Serverless (GPU Burst)
|
||||
├── SDXL/SD3 endpoints
|
||||
├── Video generation (Wan2.1)
|
||||
└── Training/fine-tuning jobs
|
||||
```
|
||||
|
||||
### Integration Pattern for Projects
|
||||
All projects use unified AI client SDK:
|
||||
```python
|
||||
from orchestrator_client import AIOrchestrator
|
||||
ai = AIOrchestrator("http://rs8000-ip:8000")
|
||||
|
||||
# Automatically routes based on priority & model
|
||||
result = await ai.generate_text(prompt, priority="low") # → FREE CPU
|
||||
result = await ai.generate_image(prompt, priority="high") # → RunPod GPU
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 💰 GPU COST ANALYSIS & MIGRATION PLAN
|
||||
|
||||
### Current Infrastructure Costs (Monthly)
|
||||
|
||||
| Service | Type | Cost | Notes |
|
||||
|---------|------|------|-------|
|
||||
| Netcup RS 8000 G12 Pro | Fixed | ~€45 | 20 cores, 64GB RAM, 3TB (CPU-only) |
|
||||
| RunPod Serverless | Variable | $50-100 | Pay-per-use GPU (images, video) |
|
||||
| DigitalOcean Droplets | Fixed | ~$48 | ⚠️ DEPRECATED - migrate ASAP |
|
||||
| **Current Total** | | **~$140-190/mo** | |
|
||||
|
||||
### GPU Provider Comparison
|
||||
|
||||
#### Netcup vGPU (NEW - Early Access, Ends July 7, 2025)
|
||||
|
||||
| Plan | GPU | VRAM | vCores | RAM | Storage | Price/mo | Price/hr equiv |
|
||||
|------|-----|------|--------|-----|---------|----------|----------------|
|
||||
| RS 2000 vGPU 7 | H200 | 7 GB dedicated | 8 | 16 GB DDR5 | 512 GB NVMe | €137.31 (~$150) | $0.21/hr |
|
||||
| RS 4000 vGPU 14 | H200 | 14 GB dedicated | 12 | 32 GB DDR5 | 1 TB NVMe | €261.39 (~$285) | $0.40/hr |
|
||||
|
||||
**Pros:**
|
||||
- NVIDIA H200 (latest gen, better than H100 for inference)
|
||||
- Dedicated VRAM (no noisy neighbors)
|
||||
- Germany location (EU data sovereignty, low latency to RS 8000)
|
||||
- Fixed monthly cost = predictable budgeting
|
||||
- 24/7 availability, no cold starts
|
||||
|
||||
**Cons:**
|
||||
- Pay even when idle
|
||||
- Limited to 7GB or 14GB VRAM options
|
||||
- Early access = limited availability
|
||||
|
||||
#### RunPod Serverless (Current)
|
||||
|
||||
| GPU | VRAM | Price/hr | Typical Use |
|
||||
|-----|------|----------|-------------|
|
||||
| RTX 4090 | 24 GB | ~$0.44/hr | SDXL, medium models |
|
||||
| A100 40GB | 40 GB | ~$1.14/hr | Large models, training |
|
||||
| H100 80GB | 80 GB | ~$2.49/hr | Largest models |
|
||||
|
||||
**Current Endpoint Costs:**
|
||||
- Image (SD/SDXL): ~$0.02/image (~2s compute)
|
||||
- Video (Wan2.2): ~$0.50/video (~60s compute)
|
||||
- Text (vLLM): ~$0.001/request
|
||||
- Whisper: ~$0.01/minute audio
|
||||
|
||||
**Pros:**
|
||||
- Zero idle costs
|
||||
- Unlimited burst capacity
|
||||
- Wide GPU selection (up to 80GB VRAM)
|
||||
- Pay only for actual compute
|
||||
|
||||
**Cons:**
|
||||
- Cold start delays (10-30s first request)
|
||||
- Variable availability during peak times
|
||||
- Per-request costs add up at scale
|
||||
|
||||
### Break-even Analysis
|
||||
|
||||
**When does Netcup vGPU become cheaper than RunPod?**
|
||||
|
||||
| Scenario | RunPod Cost | Netcup RS 2000 vGPU 7 | Netcup RS 4000 vGPU 14 |
|
||||
|----------|-------------|----------------------|------------------------|
|
||||
| 1,000 images/mo | $20 | $150 ❌ | $285 ❌ |
|
||||
| 5,000 images/mo | $100 | $150 ❌ | $285 ❌ |
|
||||
| **7,500 images/mo** | **$150** | **$150 ✅** | $285 ❌ |
|
||||
| 10,000 images/mo | $200 | $150 ✅ | $285 ❌ |
|
||||
| **14,250 images/mo** | **$285** | $150 ✅ | **$285 ✅** |
|
||||
| 100 videos/mo | $50 | $150 ❌ | $285 ❌ |
|
||||
| **300 videos/mo** | **$150** | **$150 ✅** | $285 ❌ |
|
||||
| 500 videos/mo | $250 | $150 ✅ | $285 ❌ |
|
||||
|
||||
**Recommendation by Usage Pattern:**
|
||||
|
||||
| Monthly Usage | Best Option | Est. Cost |
|
||||
|---------------|-------------|-----------|
|
||||
| < 5,000 images OR < 250 videos | RunPod Serverless | $50-100 |
|
||||
| 5,000-10,000 images OR 250-500 videos | **Netcup RS 2000 vGPU 7** | $150 fixed |
|
||||
| > 10,000 images OR > 500 videos + training | **Netcup RS 4000 vGPU 14** | $285 fixed |
|
||||
| Unpredictable/bursty workloads | RunPod Serverless | Variable |
|
||||
|
||||
### Migration Strategy
|
||||
|
||||
#### Phase 1: Immediate (Before July 7, 2025)
|
||||
**Decision Point: Secure Netcup vGPU Early Access?**
|
||||
|
||||
- [ ] Monitor actual GPU usage for 2-4 weeks
|
||||
- [ ] Calculate average monthly image/video generation
|
||||
- [ ] If consistently > 5,000 images/mo → Consider RS 2000 vGPU 7
|
||||
- [ ] If consistently > 10,000 images/mo → Consider RS 4000 vGPU 14
|
||||
- [ ] **ACTION**: Redeem early access code if usage justifies fixed GPU
|
||||
|
||||
#### Phase 2: Hybrid Architecture (If vGPU Acquired)
|
||||
|
||||
```
|
||||
RS 8000 G12 Pro (CPU - Current)
|
||||
├── Ollama (text/code) → FREE
|
||||
├── SD 1.5/2.1 CPU fallback → FREE
|
||||
└── Orchestrator API
|
||||
|
||||
Netcup vGPU Server (NEW - If purchased)
|
||||
├── Primary GPU workloads
|
||||
├── SDXL/SD3 generation
|
||||
├── Video generation (Wan2.1 I2V)
|
||||
├── Model inference (14B params with 14GB VRAM)
|
||||
└── Connected via internal netcup network (low latency)
|
||||
|
||||
RunPod Serverless (Burst Only)
|
||||
├── Overflow capacity
|
||||
├── Models requiring > 14GB VRAM
|
||||
├── Training/fine-tuning jobs
|
||||
└── Geographic distribution needs
|
||||
```
|
||||
|
||||
#### Phase 3: Cost Optimization Targets
|
||||
|
||||
| Scenario | Current | With vGPU Migration | Savings |
|
||||
|----------|---------|---------------------|---------|
|
||||
| Low usage | $140/mo | $95/mo (RS8000 + minimal RunPod) | $540/yr |
|
||||
| Medium usage | $190/mo | $195/mo (RS8000 + vGPU 7) | Break-even |
|
||||
| High usage | $250/mo | $195/mo (RS8000 + vGPU 7) | $660/yr |
|
||||
| Very high usage | $350/mo | $330/mo (RS8000 + vGPU 14) | $240/yr |
|
||||
|
||||
### Model VRAM Requirements Reference
|
||||
|
||||
| Model | VRAM Needed | Fits vGPU 7? | Fits vGPU 14? |
|
||||
|-------|-------------|--------------|---------------|
|
||||
| SD 1.5 | ~4 GB | ✅ | ✅ |
|
||||
| SD 2.1 | ~5 GB | ✅ | ✅ |
|
||||
| SDXL | ~7 GB | ⚠️ Tight | ✅ |
|
||||
| SD3 Medium | ~8 GB | ❌ | ✅ |
|
||||
| Wan2.1 I2V 14B | ~12 GB | ❌ | ✅ |
|
||||
| Wan2.1 T2V 14B | ~14 GB | ❌ | ⚠️ Tight |
|
||||
| Flux.1 Dev | ~12 GB | ❌ | ✅ |
|
||||
| LLaMA 3 8B (Q4) | ~6 GB | ✅ | ✅ |
|
||||
| LLaMA 3 70B (Q4) | ~40 GB | ❌ | ❌ (RunPod) |
|
||||
|
||||
### Decision Framework
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────┐
|
||||
│ GPU WORKLOAD DECISION TREE │
|
||||
├─────────────────────────────────────────────────────────┤
|
||||
│ │
|
||||
│ Is usage predictable and consistent? │
|
||||
│ ├── YES → Is monthly GPU spend > $150? │
|
||||
│ │ ├── YES → Netcup vGPU (fixed cost wins) │
|
||||
│ │ └── NO → RunPod Serverless (no idle cost) │
|
||||
│ └── NO → RunPod Serverless (pay for what you use) │
|
||||
│ │
|
||||
│ Does model require > 14GB VRAM? │
|
||||
│ ├── YES → RunPod (A100/H100 on-demand) │
|
||||
│ └── NO → Netcup vGPU or RS 8000 CPU │
|
||||
│ │
|
||||
│ Is low latency critical? │
|
||||
│ ├── YES → Netcup vGPU (same datacenter as RS 8000) │
|
||||
│ └── NO → RunPod Serverless (acceptable for batch) │
|
||||
│ │
|
||||
└─────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### Monitoring & Review Schedule
|
||||
|
||||
- **Weekly**: Review RunPod spend dashboard
|
||||
- **Monthly**: Calculate total GPU costs, compare to vGPU break-even
|
||||
- **Quarterly**: Re-evaluate architecture, consider plan changes
|
||||
- **Annually**: Full infrastructure cost audit
|
||||
|
||||
### Action Items
|
||||
|
||||
- [ ] **URGENT**: Decide on Netcup vGPU early access before July 7, 2025
|
||||
- [ ] Set up GPU usage tracking in orchestrator
|
||||
- [ ] Create Grafana dashboard for cost monitoring
|
||||
- [ ] Test Wan2.1 I2V 14B model on vGPU 14 (if acquired)
|
||||
- [ ] Document migration runbook for vGPU setup
|
||||
- [ ] Complete DigitalOcean deprecation (separate from GPU decision)
|
||||
|
||||
---
|
||||
|
||||
## 📁 PROJECT PORTFOLIO STRUCTURE
|
||||
|
||||
### Repository Organization
|
||||
- **Location**: `/home/jeffe/Github/`
|
||||
- **Primary Flow**: Gitea (source of truth) → GitHub (public mirror)
|
||||
- **Containerization**: ALL repos must be Dockerized with optimized production containers
|
||||
|
||||
### 🎯 MAIN PROJECT: canvas-website
|
||||
**Location**: `/home/jeffe/Github/canvas-website`
|
||||
**Description**: Collaborative canvas deployment - the integration hub where all tools come together
|
||||
- Tldraw-based collaborative canvas platform
|
||||
- Integrates Hyperindex, rSpace, MycoFi, and other tools
|
||||
- Real-time collaboration features
|
||||
- Deployed on RS 8000 in Docker
|
||||
- Uses AI orchestrator for intelligent features
|
||||
|
||||
### Project Categories
|
||||
|
||||
**AI & Infrastructure:**
|
||||
- AI Orchestrator (smart routing between RS 8000 & RunPod)
|
||||
- Model hosting & fine-tuning pipelines
|
||||
- Cost optimization & monitoring dashboards
|
||||
|
||||
**Web Applications & Sites:**
|
||||
- **canvas-website**: Main collaborative canvas (integration hub)
|
||||
- All deployed in Docker containers on RS 8000
|
||||
- Cloudflare Workers for edge functions (Hyperindex)
|
||||
- Static sites + dynamic backends containerized
|
||||
|
||||
**Supporting Projects:**
|
||||
- **Hyperindex**: Tldraw canvas integration (Cloudflare stack) - integrates into canvas-website
|
||||
- **rSpace**: Real-time collaboration platform - integrates into canvas-website
|
||||
- **MycoFi**: DeFi/Web3 project - integrates into canvas-website
|
||||
- **Canvas-related tools**: Knowledge graph & visualization components
|
||||
|
||||
### Deployment Strategy
|
||||
1. **Development**: Local WSL2 environment (`/home/jeffe/Github/`)
|
||||
2. **Version Control**: Push to Gitea FIRST → Auto-mirror to GitHub
|
||||
3. **Containerization**: Build optimized Docker images with Traefik labels
|
||||
4. **Deployment**: Deploy to RS 8000 via Docker Compose (join `traefik-public` network)
|
||||
5. **Routing**: Traefik auto-discovers service via labels, no config changes needed
|
||||
6. **DNS**: Add hostname to Cloudflare tunnel (if new domain) or it just works (existing domains)
|
||||
7. **AI Integration**: Connect to local orchestrator API
|
||||
8. **Monitoring**: Grafana dashboards for all services
|
||||
|
||||
### Infrastructure Philosophy
|
||||
- **Self-hosted first**: Own your infrastructure (RS 8000 + Gitea)
|
||||
- **Cloud for edge cases**: Cloudflare (edge), RunPod (GPU burst)
|
||||
- **Cost-optimized**: Local CPU for 70-80% of workload
|
||||
- **Dockerized everything**: Reproducible, scalable, maintainable
|
||||
- **Smart orchestration**: Right compute for the right job
|
||||
|
||||
---
|
||||
|
||||
- can you make sure you are runing the hf download for a non deprecated version? After that, you can proceed with Image-to-Video 14B 720p (RECOMMENDED)
|
||||
huggingface-cli download Wan-AI/Wan2.1-I2V-14B-720P \
|
||||
--include "*.safetensors" \
|
||||
--local-dir models/diffusion_models/wan2.1_i2v_14b
|
||||
|
||||
## 🕸️ HYPERINDEX PROJECT - TOP PRIORITY
|
||||
|
||||
**Location:** `/home/jeffe/Github/hyperindex-system/`
|
||||
|
||||
When user is ready to work on the hyperindexing system:
|
||||
1. Reference `HYPERINDEX_PROJECT.md` for complete architecture and implementation details
|
||||
2. Follow `HYPERINDEX_TODO.md` for step-by-step checklist
|
||||
3. Start with Phase 1 (Database & Core Types), then proceed sequentially through Phase 5
|
||||
4. This is a tldraw canvas integration project using Cloudflare Workers, D1, R2, and Durable Objects
|
||||
5. Creates a "living, mycelial network" of web discoveries that spawn on the canvas in real-time
|
||||
|
||||
---
|
||||
|
||||
## 📋 BACKLOG.MD - UNIFIED TASK MANAGEMENT
|
||||
|
||||
**All projects use Backlog.md for task tracking.** Tasks are managed as markdown files and can be viewed at `backlog.jeffemmett.com` for a unified cross-project view.
|
||||
|
||||
### MCP Integration
|
||||
Backlog.md is integrated via MCP server. Available tools:
|
||||
- `backlog.task_create` - Create new tasks
|
||||
- `backlog.task_list` - List tasks with filters
|
||||
- `backlog.task_update` - Update task status/details
|
||||
- `backlog.task_view` - View task details
|
||||
- `backlog.search` - Search across tasks, docs, decisions
|
||||
|
||||
### Task Lifecycle Workflow
|
||||
|
||||
**CRITICAL: Claude agents MUST follow this workflow for ALL development tasks:**
|
||||
|
||||
#### 1. Task Discovery (Before Starting Work)
|
||||
```bash
|
||||
# Check if task already exists
|
||||
backlog search "<task description>" --plain
|
||||
|
||||
# List current tasks
|
||||
backlog task list --plain
|
||||
```
|
||||
|
||||
#### 2. Task Creation (If Not Exists)
|
||||
```bash
|
||||
# Create task with full details
|
||||
backlog task create "Task Title" \
|
||||
--desc "Detailed description" \
|
||||
--priority high \
|
||||
--status "To Do"
|
||||
```
|
||||
|
||||
#### 3. Starting Work (Move to In Progress)
|
||||
```bash
|
||||
# Update status when starting
|
||||
backlog task edit <task-id> --status "In Progress"
|
||||
```
|
||||
|
||||
#### 4. During Development (Update Notes)
|
||||
```bash
|
||||
# Append progress notes
|
||||
backlog task edit <task-id> --append-notes "Completed X, working on Y"
|
||||
|
||||
# Update acceptance criteria
|
||||
backlog task edit <task-id> --check-ac 1
|
||||
```
|
||||
|
||||
#### 5. Completion (Move to Done)
|
||||
```bash
|
||||
# Mark complete when finished
|
||||
backlog task edit <task-id> --status "Done"
|
||||
```
|
||||
|
||||
### Project Initialization
|
||||
|
||||
When starting work in a new repository that doesn't have backlog:
|
||||
```bash
|
||||
cd /path/to/repo
|
||||
backlog init "Project Name" --integration-mode mcp --defaults
|
||||
```
|
||||
|
||||
This creates the `backlog/` directory structure:
|
||||
```
|
||||
backlog/
|
||||
├── config.yml # Project configuration
|
||||
├── tasks/ # Active tasks
|
||||
├── completed/ # Finished tasks
|
||||
├── drafts/ # Draft tasks
|
||||
├── docs/ # Project documentation
|
||||
├── decisions/ # Architecture decision records
|
||||
└── archive/ # Archived tasks
|
||||
```
|
||||
|
||||
### Task File Format
|
||||
Tasks are markdown files with YAML frontmatter:
|
||||
```yaml
|
||||
---
|
||||
id: task-001
|
||||
title: Feature implementation
|
||||
status: In Progress
|
||||
assignee: [@claude]
|
||||
created_date: '2025-12-03 14:30'
|
||||
labels: [feature, backend]
|
||||
priority: high
|
||||
dependencies: [task-002]
|
||||
---
|
||||
|
||||
## Description
|
||||
What needs to be done...
|
||||
|
||||
## Plan
|
||||
1. Step one
|
||||
2. Step two
|
||||
|
||||
## Acceptance Criteria
|
||||
- [ ] Criterion 1
|
||||
- [x] Criterion 2 (completed)
|
||||
|
||||
## Notes
|
||||
Progress updates go here...
|
||||
```
|
||||
|
||||
### Cross-Project Aggregation (backlog.jeffemmett.com)
|
||||
|
||||
**Architecture:**
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ backlog.jeffemmett.com │
|
||||
│ (Unified Kanban Dashboard) │
|
||||
├─────────────────────────────────────────────────────────────┤
|
||||
│ │
|
||||
│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │
|
||||
│ │ canvas-web │ │ hyperindex │ │ mycofi │ ... │
|
||||
│ │ (purple) │ │ (green) │ │ (blue) │ │
|
||||
│ └──────┬──────┘ └──────┬──────┘ └──────┬──────┘ │
|
||||
│ │ │ │ │
|
||||
│ └────────────────┴────────────────┘ │
|
||||
│ │ │
|
||||
│ ┌───────────┴───────────┐ │
|
||||
│ │ Aggregation API │ │
|
||||
│ │ (polls all projects) │ │
|
||||
│ └───────────────────────┘ │
|
||||
│ │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
|
||||
Data Sources:
|
||||
├── Local: /home/jeffe/Github/*/backlog/
|
||||
└── Remote: ssh netcup "ls /opt/*/backlog/"
|
||||
```
|
||||
|
||||
**Color Coding by Project:**
|
||||
| Project | Color | Location |
|
||||
|---------|-------|----------|
|
||||
| canvas-website | Purple | Local + Netcup |
|
||||
| hyperindex-system | Green | Local |
|
||||
| mycofi-earth | Blue | Local + Netcup |
|
||||
| decolonize-time | Orange | Local + Netcup |
|
||||
| ai-orchestrator | Red | Netcup |
|
||||
|
||||
**Aggregation Service** (to be deployed on Netcup):
|
||||
- Polls all project `backlog/tasks/` directories
|
||||
- Serves unified JSON API at `api.backlog.jeffemmett.com`
|
||||
- Web UI at `backlog.jeffemmett.com` shows combined Kanban
|
||||
- Real-time updates via WebSocket
|
||||
- Filter by project, status, priority, assignee
|
||||
|
||||
### Agent Behavior Requirements
|
||||
|
||||
**When Claude starts working on ANY task:**
|
||||
|
||||
1. **Check for existing backlog** in the repo:
|
||||
```bash
|
||||
ls backlog/config.yml 2>/dev/null || echo "Backlog not initialized"
|
||||
```
|
||||
|
||||
2. **If backlog exists**, search for related tasks:
|
||||
```bash
|
||||
backlog search "<relevant keywords>" --plain
|
||||
```
|
||||
|
||||
3. **Create or update task** before writing code:
|
||||
```bash
|
||||
# If new task needed:
|
||||
backlog task create "Task title" --status "In Progress"
|
||||
|
||||
# If task exists:
|
||||
backlog task edit <id> --status "In Progress"
|
||||
```
|
||||
|
||||
4. **Update task on completion**:
|
||||
```bash
|
||||
backlog task edit <id> --status "Done" --append-notes "Implementation complete"
|
||||
```
|
||||
|
||||
5. **Never leave tasks in "In Progress"** when stopping work - either complete them or add notes explaining blockers.
|
||||
|
||||
### Viewing Tasks
|
||||
|
||||
**Terminal Kanban Board:**
|
||||
```bash
|
||||
backlog board
|
||||
```
|
||||
|
||||
**Web Interface (single project):**
|
||||
```bash
|
||||
backlog browser --port 6420
|
||||
```
|
||||
|
||||
**Unified View (all projects):**
|
||||
Visit `backlog.jeffemmett.com` (served from Netcup)
|
||||
|
||||
### Backlog CLI Quick Reference
|
||||
|
||||
#### Task Operations
|
||||
| Action | Command |
|
||||
|--------|---------|
|
||||
| View task | `backlog task 42 --plain` |
|
||||
| List tasks | `backlog task list --plain` |
|
||||
| Search tasks | `backlog search "topic" --plain` |
|
||||
| Filter by status | `backlog task list -s "In Progress" --plain` |
|
||||
| Create task | `backlog task create "Title" -d "Description" --ac "Criterion 1"` |
|
||||
| Edit task | `backlog task edit 42 -t "New Title" -s "In Progress"` |
|
||||
| Assign task | `backlog task edit 42 -a @claude` |
|
||||
|
||||
#### Acceptance Criteria Management
|
||||
| Action | Command |
|
||||
|--------|---------|
|
||||
| Add AC | `backlog task edit 42 --ac "New criterion"` |
|
||||
| Check AC #1 | `backlog task edit 42 --check-ac 1` |
|
||||
| Check multiple | `backlog task edit 42 --check-ac 1 --check-ac 2` |
|
||||
| Uncheck AC | `backlog task edit 42 --uncheck-ac 1` |
|
||||
| Remove AC | `backlog task edit 42 --remove-ac 2` |
|
||||
|
||||
#### Multi-line Input (Description/Plan/Notes)
|
||||
The CLI preserves input literally. Use shell-specific syntax for real newlines:
|
||||
|
||||
```bash
|
||||
# Bash/Zsh (ANSI-C quoting)
|
||||
backlog task edit 42 --notes $'Line1\nLine2\nLine3'
|
||||
backlog task edit 42 --plan $'1. Step one\n2. Step two'
|
||||
|
||||
# POSIX portable
|
||||
backlog task edit 42 --notes "$(printf 'Line1\nLine2')"
|
||||
|
||||
# Append notes progressively
|
||||
backlog task edit 42 --append-notes $'- Completed X\n- Working on Y'
|
||||
```
|
||||
|
||||
#### Definition of Done (DoD)
|
||||
A task is **Done** only when ALL of these are complete:
|
||||
|
||||
**Via CLI:**
|
||||
1. All acceptance criteria checked: `--check-ac <index>` for each
|
||||
2. Implementation notes added: `--notes "..."` or `--append-notes "..."`
|
||||
3. Status set to Done: `-s Done`
|
||||
|
||||
**Via Code/Testing:**
|
||||
4. Tests pass (run test suite and linting)
|
||||
5. Documentation updated if needed
|
||||
6. Code self-reviewed
|
||||
7. No regressions
|
||||
|
||||
**NEVER mark a task as Done without completing ALL items above.**
|
||||
|
||||
### Configuration Reference
|
||||
|
||||
---
|
||||
|
||||
## 🔧 TROUBLESHOOTING
|
||||
|
||||
### tmux "server exited unexpectedly"
|
||||
This error occurs when a stale socket file exists from a crashed tmux server.
|
||||
|
||||
**Fix:**
|
||||
```bash
|
||||
rm -f /tmp/tmux-$(id -u)/default
|
||||
```
|
||||
|
||||
Then start a new session normally with `tmux` or `tmux new -s <name>`.
|
||||
|
||||
---
|
||||
|
||||
Default `backlog/config.yml`:
|
||||
```yaml
|
||||
project_name: "Project Name"
|
||||
default_status: "To Do"
|
||||
statuses: ["To Do", "In Progress", "Done"]
|
||||
labels: []
|
||||
milestones: []
|
||||
date_format: yyyy-mm-dd
|
||||
max_column_width: 20
|
||||
auto_open_browser: true
|
||||
default_port: 6420
|
||||
remote_operations: true
|
||||
auto_commit: true
|
||||
zero_padded_ids: 3
|
||||
bypass_git_hooks: false
|
||||
check_active_branches: true
|
||||
active_branch_days: 60
|
||||
```
|
||||
|
|
@ -0,0 +1,168 @@
|
|||
# Migrating from Vercel to Cloudflare Pages
|
||||
|
||||
This guide will help you migrate your site from Vercel to Cloudflare Pages.
|
||||
|
||||
## Overview
|
||||
|
||||
**Current Setup:**
|
||||
- ✅ Frontend: Vercel (static site)
|
||||
- ✅ Backend: Cloudflare Worker (`jeffemmett-canvas.jeffemmett.workers.dev`)
|
||||
|
||||
**Target Setup:**
|
||||
- ✅ Frontend: Cloudflare Pages (`canvas-website.pages.dev`)
|
||||
- ✅ Backend: Cloudflare Worker (unchanged)
|
||||
|
||||
## Step 1: Configure Cloudflare Pages
|
||||
|
||||
### In Cloudflare Dashboard:
|
||||
|
||||
1. Go to [Cloudflare Dashboard](https://dash.cloudflare.com/)
|
||||
2. Navigate to **Pages** → **Create a project**
|
||||
3. Connect your GitHub repository: `Jeff-Emmett/canvas-website`
|
||||
4. Configure build settings:
|
||||
- **Project name**: `canvas-website` (or your preferred name)
|
||||
- **Production branch**: `main`
|
||||
- **Build command**: `npm run build`
|
||||
- **Build output directory**: `dist`
|
||||
- **Root directory**: `/` (leave empty)
|
||||
|
||||
5. Click **Save and Deploy**
|
||||
|
||||
## Step 2: Configure Environment Variables
|
||||
|
||||
### In Cloudflare Pages Dashboard:
|
||||
|
||||
1. Go to your Pages project → **Settings** → **Environment variables**
|
||||
2. Add all your `VITE_*` environment variables from Vercel:
|
||||
|
||||
**Required variables** (if you use them):
|
||||
```
|
||||
VITE_WORKER_ENV=production
|
||||
VITE_GITHUB_TOKEN=...
|
||||
VITE_QUARTZ_REPO=...
|
||||
VITE_QUARTZ_BRANCH=...
|
||||
VITE_CLOUDFLARE_API_KEY=...
|
||||
VITE_CLOUDFLARE_ACCOUNT_ID=...
|
||||
VITE_QUARTZ_API_URL=...
|
||||
VITE_QUARTZ_API_KEY=...
|
||||
VITE_DAILY_API_KEY=...
|
||||
```
|
||||
|
||||
**Note**: Only add variables that start with `VITE_` (these are exposed to the browser)
|
||||
|
||||
3. Set different values for **Production** and **Preview** environments if needed
|
||||
|
||||
## Step 3: Configure Custom Domain (Optional)
|
||||
|
||||
If you have a custom domain:
|
||||
|
||||
1. Go to **Pages** → Your project → **Custom domains**
|
||||
2. Click **Set up a custom domain**
|
||||
3. Add your domain (e.g., `jeffemmett.com`)
|
||||
4. Follow DNS instructions to add the CNAME record
|
||||
|
||||
## Step 4: Verify Routing
|
||||
|
||||
The `_redirects` file has been created to handle SPA routing. This replaces the `rewrites` from `vercel.json`.
|
||||
|
||||
**Routes configured:**
|
||||
- `/board/*` → serves `index.html`
|
||||
- `/inbox` → serves `index.html`
|
||||
- `/contact` → serves `index.html`
|
||||
- `/presentations` → serves `index.html`
|
||||
- `/dashboard` → serves `index.html`
|
||||
- All other routes → serves `index.html` (SPA fallback)
|
||||
|
||||
## Step 5: Update Worker URL for Production
|
||||
|
||||
Make sure your production environment uses the production worker:
|
||||
|
||||
1. In Cloudflare Pages → **Settings** → **Environment variables**
|
||||
2. Set `VITE_WORKER_ENV=production` for **Production** environment
|
||||
3. This will make the frontend connect to: `https://jeffemmett-canvas.jeffemmett.workers.dev`
|
||||
|
||||
## Step 6: Test the Deployment
|
||||
|
||||
1. After the first deployment completes, visit your Pages URL
|
||||
2. Test all routes:
|
||||
- `/board`
|
||||
- `/inbox`
|
||||
- `/contact`
|
||||
- `/presentations`
|
||||
- `/dashboard`
|
||||
3. Verify the canvas app connects to the Worker
|
||||
4. Test real-time collaboration features
|
||||
|
||||
## Step 7: Update DNS (If Using Custom Domain)
|
||||
|
||||
If you're using a custom domain:
|
||||
|
||||
1. Update your DNS records to point to Cloudflare Pages
|
||||
2. Remove Vercel DNS records
|
||||
3. Wait for DNS propagation (can take up to 48 hours)
|
||||
|
||||
## Step 8: Disable Vercel Deployment (Optional)
|
||||
|
||||
Once everything is working on Cloudflare Pages:
|
||||
|
||||
1. Go to Vercel Dashboard
|
||||
2. Navigate to your project → **Settings** → **Git**
|
||||
3. Disconnect the repository or delete the project
|
||||
|
||||
## Differences from Vercel
|
||||
|
||||
### Headers
|
||||
- **Vercel**: Configured in `vercel.json`
|
||||
- **Cloudflare Pages**: Configured in `_headers` file (if needed) or via Cloudflare dashboard
|
||||
|
||||
### Redirects/Rewrites
|
||||
- **Vercel**: Configured in `vercel.json` → `rewrites`
|
||||
- **Cloudflare Pages**: Configured in `_redirects` file ✅ (already created)
|
||||
|
||||
### Environment Variables
|
||||
- **Vercel**: Set in Vercel dashboard
|
||||
- **Cloudflare Pages**: Set in Cloudflare Pages dashboard (same process)
|
||||
|
||||
### Build Settings
|
||||
- **Vercel**: Auto-detected from `vercel.json`
|
||||
- **Cloudflare Pages**: Configured in dashboard (already set above)
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Issue: Routes return 404
|
||||
**Solution**: Make sure `_redirects` file is in the `dist` folder after build, or configure it in Cloudflare Pages dashboard
|
||||
|
||||
### Issue: Environment variables not working
|
||||
**Solution**:
|
||||
- Make sure variables start with `VITE_`
|
||||
- Rebuild after adding variables
|
||||
- Check browser console for errors
|
||||
|
||||
### Issue: Worker connection fails
|
||||
**Solution**:
|
||||
- Verify `VITE_WORKER_ENV=production` is set
|
||||
- Check Worker is deployed and accessible
|
||||
- Check CORS settings in Worker
|
||||
|
||||
## Files Changed
|
||||
|
||||
- ✅ Created `_redirects` file (replaces `vercel.json` rewrites)
|
||||
- ✅ Created this migration guide
|
||||
- ⚠️ `vercel.json` can be kept for reference or removed
|
||||
|
||||
## Next Steps
|
||||
|
||||
1. ✅ Configure Cloudflare Pages project
|
||||
2. ✅ Add environment variables
|
||||
3. ✅ Test deployment
|
||||
4. ⏳ Update DNS (if using custom domain)
|
||||
5. ⏳ Disable Vercel (once confirmed working)
|
||||
|
||||
## Support
|
||||
|
||||
If you encounter issues:
|
||||
- Check Cloudflare Pages build logs
|
||||
- Check browser console for errors
|
||||
- Verify Worker is accessible
|
||||
- Check environment variables are set correctly
|
||||
|
||||
|
|
@ -0,0 +1,37 @@
|
|||
# Cloudflare Pages Configuration
|
||||
|
||||
## Issue
|
||||
Cloudflare Pages cannot use the same `wrangler.toml` file as Workers because:
|
||||
- `wrangler.toml` contains Worker-specific configuration (main, account_id, triggers, etc.)
|
||||
- Pages projects have different configuration requirements
|
||||
- Pages cannot have both `main` and `pages_build_output_dir` in the same file
|
||||
|
||||
## Solution: Configure in Cloudflare Dashboard
|
||||
|
||||
Since `wrangler.toml` is for Workers only, configure Pages settings in the Cloudflare Dashboard:
|
||||
|
||||
### Steps:
|
||||
1. Go to [Cloudflare Dashboard](https://dash.cloudflare.com/)
|
||||
2. Navigate to **Pages** → Your Project
|
||||
3. Go to **Settings** → **Builds & deployments**
|
||||
4. Configure:
|
||||
- **Build command**: `npm run build`
|
||||
- **Build output directory**: `dist`
|
||||
- **Root directory**: `/` (or leave empty)
|
||||
5. Save settings
|
||||
|
||||
### Alternative: Use Environment Variables
|
||||
If you need to configure Pages via code, you can set environment variables in the Cloudflare Pages dashboard under **Settings** → **Environment variables**.
|
||||
|
||||
## Worker Deployment
|
||||
Workers are deployed separately using:
|
||||
```bash
|
||||
npm run deploy:worker
|
||||
```
|
||||
or
|
||||
```bash
|
||||
wrangler deploy
|
||||
```
|
||||
|
||||
The `wrangler.toml` file is used only for Worker deployments, not Pages.
|
||||
|
||||
|
|
@ -0,0 +1,101 @@
|
|||
# Cloudflare Worker Native Deployment Setup
|
||||
|
||||
This guide explains how to set up Cloudflare's native Git integration for automatic worker deployments.
|
||||
|
||||
## Quick Setup Steps
|
||||
|
||||
### 1. Enable Git Integration in Cloudflare Dashboard
|
||||
|
||||
1. Go to [Cloudflare Dashboard](https://dash.cloudflare.com/)
|
||||
2. Navigate to **Workers & Pages** → **jeffemmett-canvas**
|
||||
3. Go to **Settings** → **Builds & Deployments**
|
||||
4. Click **"Connect to Git"** or **"Set up Git integration"**
|
||||
5. Authorize Cloudflare to access your GitHub repository
|
||||
6. Select your repository: `Jeff-Emmett/canvas-website`
|
||||
7. Configure:
|
||||
- **Production branch**: `main`
|
||||
- **Build command**: Leave empty (wrangler automatically detects and builds from `wrangler.toml`)
|
||||
- **Root directory**: `/` (or leave empty)
|
||||
|
||||
### 2. Configure Build Settings
|
||||
|
||||
Cloudflare will automatically:
|
||||
- Detect `wrangler.toml` in the root directory
|
||||
- Build and deploy the worker on every push to `main`
|
||||
- Show build status in GitHub (commit statuses, PR comments)
|
||||
|
||||
### 3. Environment Variables
|
||||
|
||||
Set environment variables in Cloudflare Dashboard:
|
||||
1. Go to **Workers & Pages** → **jeffemmett-canvas** → **Settings** → **Variables**
|
||||
2. Add any required environment variables
|
||||
3. These are separate from `wrangler.toml` (which should only have non-sensitive config)
|
||||
|
||||
### 4. Verify Deployment
|
||||
|
||||
After setup:
|
||||
1. Push a commit to `main` branch
|
||||
2. Check Cloudflare Dashboard → **Workers & Pages** → **jeffemmett-canvas** → **Deployments**
|
||||
3. You should see a new deployment triggered by the Git push
|
||||
4. Check GitHub commit status - you should see Cloudflare build status
|
||||
|
||||
## How It Works
|
||||
|
||||
- **On push to `main`**: Automatically deploys to production using `wrangler.toml`
|
||||
- **On pull request**: Can optionally deploy to preview environment
|
||||
- **Build status**: Appears in GitHub as commit status and PR comments
|
||||
- **Deployments**: All visible in Cloudflare Dashboard
|
||||
|
||||
## Environment Configuration
|
||||
|
||||
### Production (main branch)
|
||||
- Uses `wrangler.toml` from root directory
|
||||
- Worker name: `jeffemmett-canvas`
|
||||
- R2 buckets: `jeffemmett-canvas`, `board-backups`
|
||||
|
||||
### Development/Preview
|
||||
- For dev environment, you can:
|
||||
- Use a separate worker with `wrangler.dev.toml` (requires manual deployment)
|
||||
- Or configure preview deployments in Cloudflare dashboard
|
||||
- Or use the deprecated GitHub Action (see `.github/workflows/deploy-worker.yml.disabled`)
|
||||
|
||||
## Manual Deployment (if needed)
|
||||
|
||||
If you need to deploy manually:
|
||||
|
||||
```bash
|
||||
# Production
|
||||
npm run deploy:worker
|
||||
# or
|
||||
wrangler deploy
|
||||
|
||||
# Development
|
||||
npm run deploy:worker:dev
|
||||
# or
|
||||
wrangler deploy --config wrangler.dev.toml
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Build fails
|
||||
- Check Cloudflare Dashboard → Deployments → View logs
|
||||
- Ensure `wrangler.toml` is in root directory
|
||||
- Verify all required environment variables are set in Cloudflare dashboard
|
||||
|
||||
### Not deploying automatically
|
||||
- Verify Git integration is connected in Cloudflare dashboard
|
||||
- Check that "Automatically deploy from Git" is enabled
|
||||
- Ensure you're pushing to the configured branch (`main`)
|
||||
|
||||
### Need to revert to GitHub Actions
|
||||
- Rename `.github/workflows/deploy-worker.yml.disabled` back to `deploy-worker.yml`
|
||||
- Disable Git integration in Cloudflare dashboard
|
||||
|
||||
## Benefits of Native Deployment
|
||||
|
||||
✅ **Simpler**: No workflow files to maintain
|
||||
✅ **Integrated**: Build status in GitHub
|
||||
✅ **Automatic**: Resource provisioning (KV, R2, Durable Objects)
|
||||
✅ **Free**: No GitHub Actions minutes usage
|
||||
✅ **Visible**: All deployments in Cloudflare dashboard
|
||||
|
||||
|
|
@ -0,0 +1,185 @@
|
|||
# Data Conversion Guide: TLDraw Sync to Automerge Sync
|
||||
|
||||
This guide explains the data conversion process from the old TLDraw sync format to the new Automerge sync format, and how to verify the conversion is working correctly.
|
||||
|
||||
## Data Format Changes
|
||||
|
||||
### Old Format (TLDraw Sync)
|
||||
```json
|
||||
{
|
||||
"documents": [
|
||||
{ "state": { "id": "shape:abc123", "typeName": "shape", ... } },
|
||||
{ "state": { "id": "page:page", "typeName": "page", ... } }
|
||||
],
|
||||
"schema": { ... }
|
||||
}
|
||||
```
|
||||
|
||||
### New Format (Automerge Sync)
|
||||
```json
|
||||
{
|
||||
"store": {
|
||||
"shape:abc123": { "id": "shape:abc123", "typeName": "shape", ... },
|
||||
"page:page": { "id": "page:page", "typeName": "page", ... }
|
||||
},
|
||||
"schema": { ... }
|
||||
}
|
||||
```
|
||||
|
||||
## Conversion Process
|
||||
|
||||
The conversion happens automatically when a document is loaded from R2. The `AutomergeDurableObject.getDocument()` method detects the format and converts it:
|
||||
|
||||
1. **Automerge Array Format**: Detected by `Array.isArray(rawDoc)`
|
||||
- Converts via `convertAutomergeToStore()`
|
||||
- Extracts `record.state` and uses it as the store record
|
||||
|
||||
2. **Store Format**: Detected by `rawDoc.store` existing
|
||||
- Already in correct format, uses as-is
|
||||
- No conversion needed
|
||||
|
||||
3. **Old Documents Format**: Detected by `rawDoc.documents` existing but no `store`
|
||||
- Converts via `migrateDocumentsToStore()`
|
||||
- Maps `doc.state.id` to `store[doc.state.id] = doc.state`
|
||||
|
||||
4. **Shape Property Migration**: After format conversion, all shapes are migrated via `migrateShapeProperties()`
|
||||
- Ensures required properties exist (x, y, rotation, isLocked, opacity, meta, index)
|
||||
- Moves `w`/`h` from top-level to `props` for geo shapes
|
||||
- Fixes richText structure
|
||||
- Preserves custom shape properties
|
||||
|
||||
## Validation & Error Handling
|
||||
|
||||
The conversion functions now include comprehensive validation:
|
||||
|
||||
- **Missing state.id**: Skipped with warning
|
||||
- **Missing state.typeName**: Skipped with warning
|
||||
- **Null/undefined records**: Skipped with warning
|
||||
- **Invalid ID types**: Skipped with warning
|
||||
- **Malformed shapes**: Fixed during shape migration
|
||||
|
||||
All validation errors are logged with detailed statistics.
|
||||
|
||||
## Custom Records
|
||||
|
||||
Custom record types (like `obsidian_vault:`) are preserved during conversion:
|
||||
- Tracked during conversion
|
||||
- Verified in logs
|
||||
- Preserved in the final store
|
||||
|
||||
## Custom Shapes
|
||||
|
||||
Custom shape types are preserved:
|
||||
- ObsNote
|
||||
- Holon
|
||||
- FathomMeetingsBrowser
|
||||
- HolonBrowser
|
||||
- LocationShare
|
||||
- ObsidianBrowser
|
||||
|
||||
All custom shape properties are preserved during migration.
|
||||
|
||||
## Logging
|
||||
|
||||
The conversion process logs comprehensive statistics:
|
||||
|
||||
```
|
||||
📊 Automerge to Store conversion statistics:
|
||||
- total: Number of records processed
|
||||
- converted: Number successfully converted
|
||||
- skipped: Number skipped (invalid)
|
||||
- errors: Number of errors
|
||||
- customRecordCount: Number of custom records
|
||||
- errorCount: Number of error details
|
||||
```
|
||||
|
||||
Similar statistics are logged for:
|
||||
- Documents to Store migration
|
||||
- Shape property migration
|
||||
|
||||
## Testing
|
||||
|
||||
### Test Edge Cases
|
||||
|
||||
Run the test script to verify edge case handling:
|
||||
|
||||
```bash
|
||||
npx tsx test-data-conversion.ts
|
||||
```
|
||||
|
||||
This tests:
|
||||
- Missing state.id
|
||||
- Missing state.typeName
|
||||
- Null/undefined records
|
||||
- Missing state property
|
||||
- Invalid ID types
|
||||
- Custom records
|
||||
- Malformed shapes
|
||||
- Empty documents
|
||||
- Mixed valid/invalid records
|
||||
|
||||
### Test with Real R2 Data
|
||||
|
||||
To test with actual R2 data:
|
||||
|
||||
1. **Check Worker Logs**: When a document is loaded, check the Cloudflare Worker logs for conversion statistics
|
||||
2. **Verify Data Integrity**: After conversion, verify:
|
||||
- All shapes appear correctly
|
||||
- All properties are preserved
|
||||
- No validation errors in TLDraw
|
||||
- Custom records are present
|
||||
- Custom shapes work correctly
|
||||
|
||||
3. **Monitor Conversion**: Watch for:
|
||||
- High skip counts (may indicate data issues)
|
||||
- Errors during conversion
|
||||
- Missing custom records
|
||||
- Shape migration issues
|
||||
|
||||
## Migration Checklist
|
||||
|
||||
- [x] Format detection (Automerge array, store format, old documents format)
|
||||
- [x] Validation for malformed records
|
||||
- [x] Error handling and logging
|
||||
- [x] Custom record preservation
|
||||
- [x] Custom shape preservation
|
||||
- [x] Shape property migration
|
||||
- [x] Comprehensive logging
|
||||
- [x] Edge case testing
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### High Skip Counts
|
||||
If many records are being skipped:
|
||||
1. Check error details in logs
|
||||
2. Verify data format in R2
|
||||
3. Check for missing required fields
|
||||
|
||||
### Missing Custom Records
|
||||
If custom records are missing:
|
||||
1. Check logs for custom record count
|
||||
2. Verify records start with expected prefix (e.g., `obsidian_vault:`)
|
||||
3. Check if records were filtered during conversion
|
||||
|
||||
### Shape Validation Errors
|
||||
If shapes have validation errors:
|
||||
1. Check shape migration logs
|
||||
2. Verify required properties are present
|
||||
3. Check for w/h in wrong location (should be in props for geo shapes)
|
||||
|
||||
## Backward Compatibility
|
||||
|
||||
The conversion is backward compatible:
|
||||
- Old format documents are automatically converted
|
||||
- New format documents are used as-is
|
||||
- No data loss during conversion
|
||||
- All properties are preserved
|
||||
|
||||
## Future Improvements
|
||||
|
||||
Potential improvements:
|
||||
1. Add migration flag to track converted documents
|
||||
2. Add backup before conversion
|
||||
3. Add rollback mechanism
|
||||
4. Add conversion progress tracking for large documents
|
||||
|
||||
|
|
@ -0,0 +1,141 @@
|
|||
# Data Conversion Summary
|
||||
|
||||
## Overview
|
||||
|
||||
This document summarizes the data conversion implementation from the old tldraw sync format to the new automerge sync format.
|
||||
|
||||
## Conversion Paths
|
||||
|
||||
The system handles three data formats automatically:
|
||||
|
||||
### 1. Automerge Array Format
|
||||
- **Format**: `[{ state: { id: "...", ... } }, ...]`
|
||||
- **Conversion**: `convertAutomergeToStore()`
|
||||
- **Handles**: Raw Automerge document format
|
||||
|
||||
### 2. Store Format (Already Converted)
|
||||
- **Format**: `{ store: { "recordId": {...}, ... }, schema: {...} }`
|
||||
- **Conversion**: None needed - already in correct format
|
||||
- **Handles**: Previously converted documents
|
||||
|
||||
### 3. Old Documents Format (Legacy)
|
||||
- **Format**: `{ documents: [{ state: {...} }, ...] }`
|
||||
- **Conversion**: `migrateDocumentsToStore()`
|
||||
- **Handles**: Old tldraw sync format
|
||||
|
||||
## Validation & Error Handling
|
||||
|
||||
### Record Validation
|
||||
- ✅ Validates `state` property exists
|
||||
- ✅ Validates `state.id` exists and is a string
|
||||
- ✅ Validates `state.typeName` exists (for documents format)
|
||||
- ✅ Skips invalid records with detailed logging
|
||||
- ✅ Preserves valid records
|
||||
|
||||
### Shape Migration
|
||||
- ✅ Ensures required properties (x, y, rotation, opacity, isLocked, meta, index)
|
||||
- ✅ Moves `w`/`h` from top-level to `props` for geo shapes
|
||||
- ✅ Fixes richText structure
|
||||
- ✅ Preserves custom shape properties (ObsNote, Holon, etc.)
|
||||
- ✅ Tracks and verifies custom shapes
|
||||
|
||||
### Custom Records
|
||||
- ✅ Preserves `obsidian_vault:` records
|
||||
- ✅ Tracks custom record count
|
||||
- ✅ Logs custom record IDs for verification
|
||||
|
||||
## Logging & Statistics
|
||||
|
||||
All conversion functions now provide comprehensive statistics:
|
||||
|
||||
### Conversion Statistics Include:
|
||||
- Total records processed
|
||||
- Successfully converted count
|
||||
- Skipped records (with reasons)
|
||||
- Errors encountered
|
||||
- Custom records preserved
|
||||
- Shape types distribution
|
||||
- Custom shapes preserved
|
||||
|
||||
### Log Levels:
|
||||
- **Info**: Conversion statistics, successful conversions
|
||||
- **Warn**: Skipped records, warnings (first 10 shown)
|
||||
- **Error**: Conversion errors with details
|
||||
|
||||
## Data Preservation Guarantees
|
||||
|
||||
### What is Preserved:
|
||||
- ✅ All valid shape data
|
||||
- ✅ All custom shape properties (ObsNote, Holon, etc.)
|
||||
- ✅ All custom records (obsidian_vault)
|
||||
- ✅ All metadata
|
||||
- ✅ All text content
|
||||
- ✅ All richText content (structure fixed, content preserved)
|
||||
|
||||
### What is Fixed:
|
||||
- 🔧 Missing required properties (defaults added)
|
||||
- 🔧 Invalid property locations (w/h moved to props)
|
||||
- 🔧 Malformed richText structure
|
||||
- 🔧 Missing typeName (inferred where possible)
|
||||
|
||||
### What is Skipped:
|
||||
- ⚠️ Records with missing `state` property
|
||||
- ⚠️ Records with missing `state.id`
|
||||
- ⚠️ Records with invalid `state.id` type
|
||||
- ⚠️ Records with missing `state.typeName` (for documents format)
|
||||
|
||||
## Testing
|
||||
|
||||
### Unit Tests
|
||||
- `test-data-conversion.ts`: Tests edge cases with malformed data
|
||||
- Covers: missing fields, null records, invalid types, custom records
|
||||
|
||||
### Integration Testing
|
||||
- Test with real R2 data (see `test-r2-conversion.md`)
|
||||
- Verify data integrity after conversion
|
||||
- Check logs for warnings/errors
|
||||
|
||||
## Migration Safety
|
||||
|
||||
### Safety Features:
|
||||
1. **Non-destructive**: Original R2 data is not modified until first save
|
||||
2. **Error handling**: Invalid records are skipped, not lost
|
||||
3. **Comprehensive logging**: All actions are logged for debugging
|
||||
4. **Fallback**: Creates empty document if conversion fails completely
|
||||
|
||||
### Rollback:
|
||||
- Original data remains in R2 until overwritten
|
||||
- Can restore from backup if needed
|
||||
- Conversion errors don't corrupt existing data
|
||||
|
||||
## Performance
|
||||
|
||||
- Conversion happens once per room (cached)
|
||||
- Statistics logging is efficient (limited to first 10 errors)
|
||||
- Shape migration only processes shapes (not all records)
|
||||
- Custom record tracking is lightweight
|
||||
|
||||
## Next Steps
|
||||
|
||||
1. ✅ Conversion logic implemented and validated
|
||||
2. ✅ Comprehensive logging added
|
||||
3. ✅ Custom records/shapes preservation verified
|
||||
4. ✅ Edge case handling implemented
|
||||
5. ⏳ Test with real R2 data (manual process)
|
||||
6. ⏳ Monitor production conversions
|
||||
|
||||
## Files Modified
|
||||
|
||||
- `worker/AutomergeDurableObject.ts`: Main conversion logic
|
||||
- `getDocument()`: Format detection and routing
|
||||
- `convertAutomergeToStore()`: Automerge array conversion
|
||||
- `migrateDocumentsToStore()`: Old documents format conversion
|
||||
- `migrateShapeProperties()`: Shape property migration
|
||||
|
||||
## Key Improvements
|
||||
|
||||
1. **Validation**: All records are validated before conversion
|
||||
2. **Logging**: Comprehensive statistics for debugging
|
||||
3. **Error Handling**: Graceful handling of malformed data
|
||||
4. **Preservation**: Custom records and shapes are tracked and verified
|
||||
5. **Safety**: Non-destructive conversion with fallbacks
|
||||
|
|
@ -0,0 +1,145 @@
|
|||
# Data Safety Verification: TldrawDurableObject → AutomergeDurableObject Migration
|
||||
|
||||
## Overview
|
||||
|
||||
This document verifies that the migration from `TldrawDurableObject` to `AutomergeDurableObject` is safe and will not result in data loss.
|
||||
|
||||
## R2 Bucket Configuration ✅
|
||||
|
||||
### Production Environment
|
||||
- **Bucket Binding**: `TLDRAW_BUCKET`
|
||||
- **Bucket Name**: `jeffemmett-canvas`
|
||||
- **Storage Path**: `rooms/${roomId}`
|
||||
- **Configuration**: `wrangler.toml` lines 30-32
|
||||
|
||||
### Development Environment
|
||||
- **Bucket Binding**: `TLDRAW_BUCKET`
|
||||
- **Bucket Name**: `jeffemmett-canvas-preview`
|
||||
- **Storage Path**: `rooms/${roomId}`
|
||||
- **Configuration**: `wrangler.toml` lines 72-74
|
||||
|
||||
## Data Storage Architecture
|
||||
|
||||
### Where Data is Stored
|
||||
|
||||
1. **Document Data (R2 Storage)** ✅
|
||||
- **Location**: R2 bucket at path `rooms/${roomId}`
|
||||
- **Format**: JSON document containing the full board state
|
||||
- **Persistence**: Permanent storage, independent of Durable Object instances
|
||||
- **Access**: Both `TldrawDurableObject` and `AutomergeDurableObject` use the same R2 bucket and path
|
||||
|
||||
2. **Room ID (Durable Object Storage)** ⚠️
|
||||
- **Location**: Durable Object's internal storage (`ctx.storage`)
|
||||
- **Purpose**: Cached room ID for the Durable Object instance
|
||||
- **Recovery**: Can be re-initialized from URL path (`/connect/:roomId`)
|
||||
|
||||
### Data Flow
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ R2 Bucket (TLDRAW_BUCKET) │
|
||||
│ │
|
||||
│ rooms/room-123 ←─── Document Data (PERSISTENT) │
|
||||
│ rooms/room-456 ←─── Document Data (PERSISTENT) │
|
||||
│ rooms/room-789 ←─── Document Data (PERSISTENT) │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
▲ ▲
|
||||
│ │
|
||||
┌─────────────────┘ └─────────────────┐
|
||||
│ │
|
||||
┌───────┴────────┐ ┌─────────────┴────────┐
|
||||
│ TldrawDurable │ │ AutomergeDurable │
|
||||
│ Object │ │ Object │
|
||||
│ (DEPRECATED) │ │ (ACTIVE) │
|
||||
└────────────────┘ └──────────────────────┘
|
||||
│ │
|
||||
└─────────────────── Both read/write ─────────────────────┘
|
||||
to the same R2 location
|
||||
```
|
||||
|
||||
## Migration Safety Guarantees
|
||||
|
||||
### ✅ No Data Loss Risk
|
||||
|
||||
1. **R2 Data is Independent**
|
||||
- Document data is stored in R2, not in Durable Object storage
|
||||
- R2 data persists even when Durable Object instances are deleted
|
||||
- Both classes use the same R2 bucket (`TLDRAW_BUCKET`) and path (`rooms/${roomId}`)
|
||||
|
||||
2. **Stub Class Ensures Compatibility**
|
||||
- `TldrawDurableObject` extends `AutomergeDurableObject`
|
||||
- Uses the same R2 bucket and storage path
|
||||
- Existing instances can access their data during migration
|
||||
|
||||
3. **Room ID Recovery**
|
||||
- `roomId` is passed in the URL path (`/connect/:roomId`)
|
||||
- Can be re-initialized if Durable Object storage is lost
|
||||
- Code handles missing `roomId` by reading from URL (see `AutomergeDurableObject.ts` lines 43-49)
|
||||
|
||||
4. **Automatic Format Conversion**
|
||||
- `AutomergeDurableObject` handles multiple data formats:
|
||||
- Automerge Array Format: `[{ state: {...} }, ...]`
|
||||
- Store Format: `{ store: { "recordId": {...}, ... }, schema: {...} }`
|
||||
- Old Documents Format: `{ documents: [{ state: {...} }, ...] }`
|
||||
- Conversion preserves all data, including custom shapes and records
|
||||
|
||||
### Migration Process
|
||||
|
||||
1. **Deployment with Stub**
|
||||
- `TldrawDurableObject` stub class is exported
|
||||
- Cloudflare recognizes the class exists
|
||||
- Existing instances can continue operating
|
||||
|
||||
2. **Delete-Class Migration**
|
||||
- Migration tag `v2` with `deleted_classes = ["TldrawDurableObject"]`
|
||||
- Cloudflare will delete Durable Object instances (not R2 data)
|
||||
- R2 data remains untouched
|
||||
|
||||
3. **Data Access After Migration**
|
||||
- New `AutomergeDurableObject` instances can access the same R2 data
|
||||
- Same bucket (`TLDRAW_BUCKET`) and path (`rooms/${roomId}`)
|
||||
- Automatic format conversion ensures compatibility
|
||||
|
||||
## Verification Checklist
|
||||
|
||||
- [x] R2 bucket binding is correctly configured (`TLDRAW_BUCKET`)
|
||||
- [x] Both production and dev environments have R2 buckets configured
|
||||
- [x] `AutomergeDurableObject` uses `env.TLDRAW_BUCKET`
|
||||
- [x] Storage path is consistent (`rooms/${roomId}`)
|
||||
- [x] Stub class extends `AutomergeDurableObject` (same R2 access)
|
||||
- [x] Migration includes `delete-class` for `TldrawDurableObject`
|
||||
- [x] Code handles missing `roomId` by reading from URL
|
||||
- [x] Format conversion logic preserves all data types
|
||||
- [x] Custom shapes and records are preserved during conversion
|
||||
|
||||
## Testing Recommendations
|
||||
|
||||
1. **Before Migration**
|
||||
- Verify R2 bucket contains expected room data
|
||||
- List rooms: `wrangler r2 object list TLDRAW_BUCKET --prefix "rooms/"`
|
||||
- Check a sample room's format
|
||||
|
||||
2. **After Migration**
|
||||
- Verify rooms are still accessible
|
||||
- Check that data format is correctly converted
|
||||
- Verify custom shapes and records are preserved
|
||||
- Monitor worker logs for conversion statistics
|
||||
|
||||
3. **Data Integrity Checks**
|
||||
- Shape count matches before/after
|
||||
- Custom shapes (ObsNote, Holon, etc.) have all properties
|
||||
- Custom records (obsidian_vault, etc.) are present
|
||||
- No validation errors in console
|
||||
|
||||
## Conclusion
|
||||
|
||||
✅ **The migration is safe and will not result in data loss.**
|
||||
|
||||
- All document data is stored in R2, which is independent of Durable Object instances
|
||||
- Both classes use the same R2 bucket and storage path
|
||||
- The stub class ensures compatibility during migration
|
||||
- Format conversion logic preserves all data types
|
||||
- Room IDs can be recovered from URL paths if needed
|
||||
|
||||
The only data that will be lost is the cached `roomId` in Durable Object storage, which can be easily re-initialized from the URL path.
|
||||
|
||||
|
|
@ -0,0 +1,92 @@
|
|||
# Deployment Guide
|
||||
|
||||
## Frontend Deployment (Cloudflare Pages)
|
||||
|
||||
The frontend is deployed to **Cloudflare Pages** (migrated from Vercel).
|
||||
|
||||
### Configuration
|
||||
- **Build command**: `npm run build`
|
||||
- **Build output directory**: `dist`
|
||||
- **SPA routing**: Handled by `_redirects` file
|
||||
|
||||
### Environment Variables
|
||||
Set in Cloudflare Pages dashboard → Settings → Environment variables:
|
||||
- All `VITE_*` variables needed for the frontend
|
||||
- `VITE_WORKER_ENV=production` for production
|
||||
|
||||
See `CLOUDFLARE_PAGES_MIGRATION.md` for detailed migration guide.
|
||||
|
||||
## Worker Deployment Strategy
|
||||
|
||||
**Using Cloudflare's Native Git Integration** for automatic deployments.
|
||||
|
||||
### Current Setup
|
||||
- ✅ **Cloudflare Workers Builds**: Automatic deployment on push to `main` branch
|
||||
- ✅ **Build Status**: Integrated with GitHub (commit statuses, PR comments)
|
||||
- ✅ **Environment Support**: Production and preview environments
|
||||
|
||||
### How to Configure Cloudflare Native Deployment
|
||||
|
||||
1. Go to [Cloudflare Dashboard](https://dash.cloudflare.com/)
|
||||
2. Navigate to **Workers & Pages** → **jeffemmett-canvas**
|
||||
3. Go to **Settings** → **Builds & Deployments**
|
||||
4. Ensure **"Automatically deploy from Git"** is enabled
|
||||
5. Configure build settings:
|
||||
- **Build command**: Leave empty (wrangler handles this automatically)
|
||||
- **Root directory**: `/` (or leave empty)
|
||||
- **Environment variables**: Set in Cloudflare dashboard (not in wrangler.toml)
|
||||
|
||||
### Why Use Cloudflare Native Deployment?
|
||||
|
||||
**Advantages:**
|
||||
- ✅ Simpler setup (no workflow files to maintain)
|
||||
- ✅ Integrated with Cloudflare dashboard
|
||||
- ✅ Automatic resource provisioning (KV, R2, Durable Objects)
|
||||
- ✅ Build status in GitHub (commit statuses, PR comments)
|
||||
- ✅ No GitHub Actions minutes usage
|
||||
- ✅ Less moving parts, easier to debug
|
||||
|
||||
**Note:** The GitHub Action workflow has been deprecated (see `.github/workflows/deploy-worker.yml.disabled`) but kept as backup.
|
||||
|
||||
### Migration Fix
|
||||
|
||||
The worker now includes a migration to rename `TldrawDurableObject` → `AutomergeDurableObject`:
|
||||
|
||||
```toml
|
||||
[[migrations]]
|
||||
tag = "v2"
|
||||
renamed_classes = [
|
||||
{ from = "TldrawDurableObject", to = "AutomergeDurableObject" }
|
||||
]
|
||||
```
|
||||
|
||||
This fixes the error: "New version of script does not export class 'TldrawDurableObject'"
|
||||
|
||||
### Manual Deployment (if needed)
|
||||
|
||||
If you need to deploy manually:
|
||||
|
||||
```bash
|
||||
# Production
|
||||
npm run deploy:worker
|
||||
|
||||
# Development
|
||||
npm run deploy:worker:dev
|
||||
```
|
||||
|
||||
Or directly:
|
||||
```bash
|
||||
wrangler deploy # Production (uses wrangler.toml)
|
||||
wrangler deploy --config wrangler.dev.toml # Dev
|
||||
```
|
||||
|
||||
## Pages Deployment
|
||||
|
||||
Pages deployment is separate and should be configured in Cloudflare Pages dashboard:
|
||||
- **Build command**: `npm run build`
|
||||
- **Build output directory**: `dist`
|
||||
- **Root directory**: `/` (or leave empty)
|
||||
|
||||
**Note**: `wrangler.toml` is for Workers only, not Pages.
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,100 @@
|
|||
# Deployment Summary
|
||||
|
||||
## Current Setup
|
||||
|
||||
### ✅ Frontend: Cloudflare Pages
|
||||
- **Deployment**: Automatic on push to `main` branch
|
||||
- **Build**: `npm run build`
|
||||
- **Output**: `dist/`
|
||||
- **Configuration**: Set in Cloudflare Pages dashboard
|
||||
- **Environment Variables**: Set in Cloudflare Pages dashboard (VITE_* variables)
|
||||
|
||||
### ✅ Worker: Cloudflare Native Git Integration
|
||||
- **Production**: Automatic deployment on push to `main` branch → uses `wrangler.toml`
|
||||
- **Preview**: Automatic deployment for pull requests → uses `wrangler.toml` (or can be configured for dev)
|
||||
- **Build Status**: Integrated with GitHub (commit statuses, PR comments)
|
||||
- **Configuration**: Managed in Cloudflare Dashboard → Settings → Builds & Deployments
|
||||
|
||||
### ❌ Vercel: Can be disabled
|
||||
- Frontend is now on Cloudflare Pages
|
||||
- Worker was never on Vercel
|
||||
- You can safely disconnect/delete the Vercel project
|
||||
|
||||
## Why Cloudflare Native Deployment?
|
||||
|
||||
**Cloudflare's native Git integration provides:**
|
||||
|
||||
1. ✅ **Simplicity**: No workflow files to maintain, automatic setup
|
||||
2. ✅ **Integration**: Build status directly in GitHub (commit statuses, PR comments)
|
||||
3. ✅ **Resource Provisioning**: Automatically provisions KV, R2, Durable Objects
|
||||
4. ✅ **Environment Support**: Production and preview environments
|
||||
5. ✅ **Dashboard Integration**: All deployments visible in Cloudflare dashboard
|
||||
6. ✅ **No GitHub Actions Minutes**: Free deployment, no usage limits
|
||||
|
||||
**Note:** GitHub Actions workflow has been deprecated (see `.github/workflows/deploy-worker.yml.disabled`) but kept as backup if needed.
|
||||
|
||||
## Environment Switching
|
||||
|
||||
### For Local Development
|
||||
|
||||
You can switch between dev and prod workers locally using:
|
||||
|
||||
```bash
|
||||
# Switch to production worker
|
||||
./switch-worker-env.sh production
|
||||
|
||||
# Switch to dev worker
|
||||
./switch-worker-env.sh dev
|
||||
|
||||
# Switch to local worker (requires local worker running)
|
||||
./switch-worker-env.sh local
|
||||
```
|
||||
|
||||
This updates `.env.local` with `VITE_WORKER_ENV=production` or `VITE_WORKER_ENV=dev`.
|
||||
|
||||
**Default**: Now set to `production` (changed from `dev`)
|
||||
|
||||
### For Cloudflare Pages
|
||||
|
||||
Set environment variables in Cloudflare Pages dashboard:
|
||||
- **Production**: `VITE_WORKER_ENV=production`
|
||||
- **Preview**: `VITE_WORKER_ENV=dev` (for testing)
|
||||
|
||||
## Deployment Workflow
|
||||
|
||||
### Frontend (Cloudflare Pages)
|
||||
1. Push to `main` → Auto-deploys to production
|
||||
2. Create PR → Auto-deploys to preview environment
|
||||
3. Environment variables set in Cloudflare dashboard
|
||||
|
||||
### Worker (Cloudflare Native)
|
||||
1. **Production**: Push to `main` → Auto-deploys to production worker
|
||||
2. **Preview**: Create PR → Auto-deploys to preview environment (optional)
|
||||
3. **Manual**: Deploy via `wrangler deploy` command or Cloudflare dashboard
|
||||
|
||||
## Testing Both Environments
|
||||
|
||||
### Local Testing
|
||||
```bash
|
||||
# Test with production worker
|
||||
./switch-worker-env.sh production
|
||||
npm run dev
|
||||
|
||||
# Test with dev worker
|
||||
./switch-worker-env.sh dev
|
||||
npm run dev
|
||||
```
|
||||
|
||||
### Remote Testing
|
||||
- **Production**: Visit your production Cloudflare Pages URL
|
||||
- **Dev**: Visit your dev worker URL directly or use preview deployment
|
||||
|
||||
## Next Steps
|
||||
|
||||
1. ✅ **Disable Vercel**: Go to Vercel dashboard → Disconnect repository
|
||||
2. ✅ **Verify Cloudflare Pages**: Ensure it's deploying correctly
|
||||
3. ✅ **Test Worker Deployments**: Push to main and verify production worker updates
|
||||
4. ✅ **Test Dev Worker**: Push to `automerge/test` branch and verify dev worker updates
|
||||
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,54 @@
|
|||
# Canvas Website Dockerfile
|
||||
# Builds Vite frontend and serves with nginx
|
||||
# Backend (sync) still uses Cloudflare Workers
|
||||
|
||||
# Build stage
|
||||
FROM node:20-alpine AS build
|
||||
WORKDIR /app
|
||||
|
||||
# Install dependencies
|
||||
COPY package*.json ./
|
||||
RUN npm ci --legacy-peer-deps
|
||||
|
||||
# Copy source
|
||||
COPY . .
|
||||
|
||||
# Build args for environment
|
||||
ARG VITE_TLDRAW_WORKER_URL=https://jeffemmett-canvas.jeffemmett.workers.dev
|
||||
ARG VITE_DAILY_API_KEY
|
||||
ARG VITE_RUNPOD_API_KEY
|
||||
ARG VITE_RUNPOD_IMAGE_ENDPOINT_ID
|
||||
ARG VITE_RUNPOD_VIDEO_ENDPOINT_ID
|
||||
ARG VITE_RUNPOD_TEXT_ENDPOINT_ID
|
||||
ARG VITE_RUNPOD_WHISPER_ENDPOINT_ID
|
||||
|
||||
# Set environment for build
|
||||
ENV VITE_TLDRAW_WORKER_URL=$VITE_TLDRAW_WORKER_URL
|
||||
ENV VITE_DAILY_API_KEY=$VITE_DAILY_API_KEY
|
||||
ENV VITE_RUNPOD_API_KEY=$VITE_RUNPOD_API_KEY
|
||||
ENV VITE_RUNPOD_IMAGE_ENDPOINT_ID=$VITE_RUNPOD_IMAGE_ENDPOINT_ID
|
||||
ENV VITE_RUNPOD_VIDEO_ENDPOINT_ID=$VITE_RUNPOD_VIDEO_ENDPOINT_ID
|
||||
ENV VITE_RUNPOD_TEXT_ENDPOINT_ID=$VITE_RUNPOD_TEXT_ENDPOINT_ID
|
||||
ENV VITE_RUNPOD_WHISPER_ENDPOINT_ID=$VITE_RUNPOD_WHISPER_ENDPOINT_ID
|
||||
|
||||
# Build the app
|
||||
RUN npm run build
|
||||
|
||||
# Production stage
|
||||
FROM nginx:alpine AS production
|
||||
WORKDIR /usr/share/nginx/html
|
||||
|
||||
# Remove default nginx static assets
|
||||
RUN rm -rf ./*
|
||||
|
||||
# Copy built assets from build stage
|
||||
COPY --from=build /app/dist .
|
||||
|
||||
# Copy nginx config
|
||||
COPY nginx.conf /etc/nginx/conf.d/default.conf
|
||||
|
||||
# Expose port
|
||||
EXPOSE 80
|
||||
|
||||
# Start nginx
|
||||
CMD ["nginx", "-g", "daemon off;"]
|
||||
|
|
@ -0,0 +1,142 @@
|
|||
# Fathom API Integration for tldraw Canvas
|
||||
|
||||
This integration allows you to import Fathom meeting transcripts directly into your tldraw canvas at jeffemmett.com/board/test.
|
||||
|
||||
## Features
|
||||
|
||||
- 🎥 **Import Fathom Meetings**: Browse and import your Fathom meeting recordings
|
||||
- 📝 **Rich Transcript Display**: View full transcripts with speaker identification and timestamps
|
||||
- ✅ **Action Items**: See extracted action items from meetings
|
||||
- 📋 **AI Summaries**: Display AI-generated meeting summaries
|
||||
- 🔗 **Direct Links**: Click to view meetings in Fathom
|
||||
- 🎨 **Customizable Display**: Toggle between compact and expanded views
|
||||
|
||||
## Setup Instructions
|
||||
|
||||
### 1. Get Your Fathom API Key
|
||||
|
||||
1. Go to your [Fathom User Settings](https://app.usefathom.com/settings/integrations)
|
||||
2. Navigate to the "Integrations" section
|
||||
3. Generate an API key
|
||||
4. Copy the API key for use in the canvas
|
||||
|
||||
### 2. Using the Integration
|
||||
|
||||
1. **Open the Canvas**: Navigate to `jeffemmett.com/board/test`
|
||||
2. **Access Fathom Meetings**: Click the "Fathom Meetings" button in the toolbar (calendar icon)
|
||||
3. **Enter API Key**: When prompted, enter your Fathom API key
|
||||
4. **Browse Meetings**: The panel will load your recent Fathom meetings
|
||||
5. **Add to Canvas**: Click "Add to Canvas" on any meeting to create a transcript shape
|
||||
|
||||
### 3. Customizing Transcript Shapes
|
||||
|
||||
Once added to the canvas, you can:
|
||||
|
||||
- **Toggle Transcript View**: Click the "📝 Transcript" button to show/hide the full transcript
|
||||
- **Toggle Action Items**: Click the "✅ Actions" button to show/hide action items
|
||||
- **Expand/Collapse**: Click the "📄 Expanded/Compact" button to change the view
|
||||
- **Resize**: Drag the corners to resize the shape
|
||||
- **Move**: Click and drag to reposition the shape
|
||||
|
||||
## API Endpoints
|
||||
|
||||
The integration includes these backend endpoints:
|
||||
|
||||
- `GET /api/fathom/meetings` - List all meetings
|
||||
- `GET /api/fathom/meetings/:id` - Get specific meeting details
|
||||
- `POST /api/fathom/webhook` - Receive webhook notifications (for future real-time updates)
|
||||
|
||||
## Webhook Setup (Optional)
|
||||
|
||||
For real-time updates when new meetings are recorded:
|
||||
|
||||
1. **Get Webhook URL**: Your webhook endpoint is `https://jeffemmett-canvas.jeffemmett.workers.dev/api/fathom/webhook`
|
||||
2. **Configure in Fathom**: Add this URL in your Fathom webhook settings
|
||||
3. **Enable Notifications**: Turn on webhook notifications for new meetings
|
||||
|
||||
## Data Structure
|
||||
|
||||
The Fathom transcript shape includes:
|
||||
|
||||
```typescript
|
||||
{
|
||||
meetingId: string
|
||||
meetingTitle: string
|
||||
meetingUrl: string
|
||||
summary: string
|
||||
transcript: Array<{
|
||||
speaker: string
|
||||
text: string
|
||||
timestamp: string
|
||||
}>
|
||||
actionItems: Array<{
|
||||
text: string
|
||||
assignee?: string
|
||||
dueDate?: string
|
||||
}>
|
||||
}
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
1. **"No API key provided"**: Make sure you've entered your Fathom API key correctly
|
||||
2. **"Failed to fetch meetings"**: Check that your API key is valid and has the correct permissions
|
||||
3. **Empty transcript**: Some meetings may not have transcripts if they were recorded without transcription enabled
|
||||
|
||||
### Getting Help
|
||||
|
||||
- Check the browser console for error messages
|
||||
- Verify your Fathom API key is correct
|
||||
- Ensure you have recorded meetings in Fathom
|
||||
- Contact support if issues persist
|
||||
|
||||
## Security Notes
|
||||
|
||||
- API keys are stored locally in your browser
|
||||
- Webhook endpoints are currently not signature-verified (TODO for production)
|
||||
- All data is processed client-side for privacy
|
||||
|
||||
## Future Enhancements
|
||||
|
||||
- [ ] Real-time webhook notifications
|
||||
- [ ] Search and filter meetings
|
||||
- [ ] Export transcript data
|
||||
- [ ] Integration with other meeting tools
|
||||
- [ ] Advanced transcript formatting options
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,75 @@
|
|||
# Gesture Recognition Tool
|
||||
|
||||
This document describes all available gestures in the Canvas application. Use the gesture tool (press `g` or select from toolbar) to draw these gestures and trigger their actions.
|
||||
|
||||
## How to Use
|
||||
|
||||
1. **Activate the Gesture Tool**: Press `g` or select the gesture tool from the toolbar
|
||||
2. **Draw a Gesture**: Use your mouse, pen, or finger to draw one of the gestures below
|
||||
3. **Release**: The gesture will be recognized and the corresponding action will be performed
|
||||
|
||||
## Available Gestures
|
||||
|
||||
### Basic Gestures (Default Mode)
|
||||
|
||||
| Gesture | Description | Action |
|
||||
|---------|-------------|---------|
|
||||
| **X** | Draw an "X" shape | Deletes selected shapes |
|
||||
| **Rectangle** | Draw a rectangle outline | Creates a rectangle shape at the gesture location |
|
||||
| **Circle** | Draw a circle/oval | Selects and highlights shapes under the gesture |
|
||||
| **Check** | Draw a checkmark (✓) | Changes color of shapes under the gesture to green |
|
||||
| **Caret** | Draw a caret (^) pointing up | Aligns selected shapes to the top |
|
||||
| **V** | Draw a "V" shape pointing down | Aligns selected shapes to the bottom |
|
||||
| **Delete** | Draw a delete symbol (similar to X) | Deletes selected shapes |
|
||||
| **Pigtail** | Draw a pigtail/spiral shape | Selects shapes under gesture and rotates them 90° counterclockwise |
|
||||
|
||||
### Layout Gestures (Hold Shift + Draw)
|
||||
|
||||
| Gesture | Description | Action |
|
||||
|---------|-------------|---------|
|
||||
| **Circle Layout** | Draw a circle while holding Shift | Arranges selected shapes in a circle around the gesture center |
|
||||
| **Triangle Layout** | Draw a triangle while holding Shift | Arranges selected shapes in a triangle around the gesture center |
|
||||
|
||||
## Gesture Tips
|
||||
|
||||
- **Accuracy**: Draw gestures clearly and completely for best recognition
|
||||
- **Size**: Gestures work at various sizes, but avoid extremely small or large drawings
|
||||
- **Speed**: Draw at a natural pace - not too fast or too slow
|
||||
- **Shift Key**: Hold Shift while drawing to access layout gestures
|
||||
- **Selection**: Most gestures work on selected shapes, so select shapes first if needed
|
||||
|
||||
## Keyboard Shortcut
|
||||
|
||||
- **`g`**: Activate the gesture tool
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
- If a gesture isn't recognized, try drawing it more clearly or at a different size
|
||||
- Make sure you're using the gesture tool (cursor should change to a cross)
|
||||
- For layout gestures, remember to hold Shift while drawing
|
||||
- Some gestures require shapes to be selected first
|
||||
|
||||
## Examples
|
||||
|
||||
### Deleting Shapes
|
||||
1. Select the shapes you want to delete
|
||||
2. Press `g` to activate gesture tool
|
||||
3. Draw an "X" over the shapes
|
||||
4. Release - the shapes will be deleted
|
||||
|
||||
### Creating a Rectangle
|
||||
1. Press `g` to activate gesture tool
|
||||
2. Draw a rectangle outline where you want the shape
|
||||
3. Release - a rectangle will be created
|
||||
|
||||
### Arranging Shapes in a Circle
|
||||
1. Select the shapes you want to arrange
|
||||
2. Press `g` to activate gesture tool
|
||||
3. Hold Shift and draw a circle
|
||||
4. Release - the shapes will be arranged in a circle
|
||||
|
||||
### Rotating Shapes
|
||||
1. Select the shapes you want to rotate
|
||||
2. Press `g` to activate gesture tool
|
||||
3. Draw a pigtail/spiral over the shapes
|
||||
4. Release - the shapes will rotate 90° counterclockwise
|
||||
|
|
@ -0,0 +1,79 @@
|
|||
# Vercel → Cloudflare Pages Migration Checklist
|
||||
|
||||
## ✅ Completed Setup
|
||||
|
||||
- [x] Created `_redirects` file for SPA routing (in `src/public/`)
|
||||
- [x] Updated `package.json` to remove Vercel from deploy script
|
||||
- [x] Created migration guide (`CLOUDFLARE_PAGES_MIGRATION.md`)
|
||||
- [x] Updated deployment documentation
|
||||
|
||||
## 📋 Action Items
|
||||
|
||||
### 1. Create Cloudflare Pages Project
|
||||
- [ ] Go to [Cloudflare Dashboard](https://dash.cloudflare.com/)
|
||||
- [ ] Navigate to **Pages** → **Create a project**
|
||||
- [ ] Connect GitHub repository: `Jeff-Emmett/canvas-website`
|
||||
- [ ] Configure:
|
||||
- **Project name**: `canvas-website`
|
||||
- **Production branch**: `main`
|
||||
- **Build command**: `npm run build`
|
||||
- **Build output directory**: `dist`
|
||||
- **Root directory**: `/` (leave empty)
|
||||
|
||||
### 2. Set Environment Variables
|
||||
- [ ] Go to Pages project → **Settings** → **Environment variables**
|
||||
- [ ] Add all `VITE_*` variables from Vercel:
|
||||
- `VITE_WORKER_ENV=production` (for production)
|
||||
- `VITE_WORKER_ENV=dev` (for preview)
|
||||
- Any other `VITE_*` variables you use
|
||||
- [ ] Set different values for **Production** and **Preview** if needed
|
||||
|
||||
### 3. Test First Deployment
|
||||
- [ ] Wait for first deployment to complete
|
||||
- [ ] Visit Pages URL (e.g., `canvas-website.pages.dev`)
|
||||
- [ ] Test routes:
|
||||
- [ ] `/board`
|
||||
- [ ] `/inbox`
|
||||
- [ ] `/contact`
|
||||
- [ ] `/presentations`
|
||||
- [ ] `/dashboard`
|
||||
- [ ] Verify canvas app connects to Worker
|
||||
- [ ] Test real-time collaboration
|
||||
|
||||
### 4. Configure Custom Domain (if applicable)
|
||||
- [ ] Go to Pages project → **Custom domains**
|
||||
- [ ] Add your domain (e.g., `jeffemmett.com`)
|
||||
- [ ] Update DNS records to point to Cloudflare Pages
|
||||
- [ ] Wait for DNS propagation
|
||||
|
||||
### 5. Clean Up Vercel (after confirming Cloudflare works)
|
||||
- [ ] Verify everything works on Cloudflare Pages
|
||||
- [ ] Go to Vercel Dashboard
|
||||
- [ ] Disconnect repository or delete project
|
||||
- [ ] Update DNS records if using custom domain
|
||||
|
||||
## 🔍 Verification Steps
|
||||
|
||||
After migration, verify:
|
||||
- ✅ All routes work (no 404s)
|
||||
- ✅ Canvas app loads and connects to Worker
|
||||
- ✅ Real-time collaboration works
|
||||
- ✅ Environment variables are accessible
|
||||
- ✅ Assets load correctly
|
||||
- ✅ No console errors
|
||||
|
||||
## 📝 Notes
|
||||
|
||||
- The `_redirects` file is in `src/public/` and will be copied to `dist/` during build
|
||||
- Worker deployment is separate and unchanged
|
||||
- Environment variables must start with `VITE_` to be accessible in the browser
|
||||
- Cloudflare Pages automatically deploys on push to `main` branch
|
||||
|
||||
## 🆘 If Something Goes Wrong
|
||||
|
||||
1. Check Cloudflare Pages build logs
|
||||
2. Check browser console for errors
|
||||
3. Verify environment variables are set
|
||||
4. Verify Worker is accessible
|
||||
5. Check `_redirects` file is in `dist/` after build
|
||||
|
||||
|
|
@ -0,0 +1,232 @@
|
|||
# mulTmux Integration
|
||||
|
||||
mulTmux is now integrated into the canvas-website project as a collaborative terminal tool. This allows multiple developers to work together in the same terminal session.
|
||||
|
||||
## Installation
|
||||
|
||||
From the root of the canvas-website project:
|
||||
|
||||
```bash
|
||||
# Install all dependencies including mulTmux packages
|
||||
npm run multmux:install
|
||||
|
||||
# Build mulTmux packages
|
||||
npm run multmux:build
|
||||
```
|
||||
|
||||
## Available Commands
|
||||
|
||||
All commands are run from the **root** of the canvas-website project:
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `npm run multmux:install` | Install mulTmux dependencies |
|
||||
| `npm run multmux:build` | Build server and CLI packages |
|
||||
| `npm run multmux:dev:server` | Run server in development mode |
|
||||
| `npm run multmux:dev:cli` | Run CLI in development mode |
|
||||
| `npm run multmux:start` | Start the production server |
|
||||
|
||||
## Quick Start
|
||||
|
||||
### 1. Build mulTmux
|
||||
|
||||
```bash
|
||||
npm run multmux:build
|
||||
```
|
||||
|
||||
### 2. Start the Server Locally (for testing)
|
||||
|
||||
```bash
|
||||
npm run multmux:start
|
||||
```
|
||||
|
||||
Server will be available at:
|
||||
- HTTP API: `http://localhost:3000`
|
||||
- WebSocket: `ws://localhost:3001`
|
||||
|
||||
### 3. Install CLI Globally
|
||||
|
||||
```bash
|
||||
cd multmux/packages/cli
|
||||
npm link
|
||||
```
|
||||
|
||||
Now you can use the `multmux` command anywhere!
|
||||
|
||||
### 4. Create a Session
|
||||
|
||||
```bash
|
||||
# Local testing
|
||||
multmux create my-session
|
||||
|
||||
# Or specify your AI server (when deployed)
|
||||
multmux create my-session --server http://your-ai-server:3000
|
||||
```
|
||||
|
||||
### 5. Join from Another Terminal
|
||||
|
||||
```bash
|
||||
multmux join <token-from-above> --server ws://your-ai-server:3001
|
||||
```
|
||||
|
||||
## Deploying to AI Server
|
||||
|
||||
### Option 1: Using the Deploy Script
|
||||
|
||||
```bash
|
||||
cd multmux
|
||||
./infrastructure/deploy.sh
|
||||
```
|
||||
|
||||
This will:
|
||||
- Install system dependencies (tmux, Node.js)
|
||||
- Build the project
|
||||
- Set up PM2 for process management
|
||||
- Start the server
|
||||
|
||||
### Option 2: Manual Deployment
|
||||
|
||||
1. **SSH to your AI server**
|
||||
```bash
|
||||
ssh your-ai-server
|
||||
```
|
||||
|
||||
2. **Clone or copy the project**
|
||||
```bash
|
||||
git clone <your-repo>
|
||||
cd canvas-website
|
||||
git checkout mulTmux-webtree
|
||||
```
|
||||
|
||||
3. **Install and build**
|
||||
```bash
|
||||
npm install
|
||||
npm run multmux:build
|
||||
```
|
||||
|
||||
4. **Start with PM2**
|
||||
```bash
|
||||
cd multmux
|
||||
npm install -g pm2
|
||||
pm2 start packages/server/dist/index.js --name multmux-server
|
||||
pm2 save
|
||||
pm2 startup
|
||||
```
|
||||
|
||||
## Project Structure
|
||||
|
||||
```
|
||||
canvas-website/
|
||||
├── multmux/
|
||||
│ ├── packages/
|
||||
│ │ ├── server/ # Backend (Node.js + tmux)
|
||||
│ │ └── cli/ # Command-line client
|
||||
│ ├── infrastructure/
|
||||
│ │ ├── deploy.sh # Auto-deployment script
|
||||
│ │ └── nginx.conf # Reverse proxy config
|
||||
│ └── README.md # Full documentation
|
||||
├── package.json # Now includes workspace config
|
||||
└── MULTMUX_INTEGRATION.md # This file
|
||||
```
|
||||
|
||||
## Usage Examples
|
||||
|
||||
### Collaborative Coding Session
|
||||
|
||||
```bash
|
||||
# Developer 1: Create session in project directory
|
||||
cd /path/to/project
|
||||
multmux create coding-session --repo $(pwd)
|
||||
|
||||
# Developer 2: Join and start coding together
|
||||
multmux join <token>
|
||||
|
||||
# Both can now type in the same terminal!
|
||||
```
|
||||
|
||||
### Debugging Together
|
||||
|
||||
```bash
|
||||
# Create a session for debugging
|
||||
multmux create debug-auth-issue
|
||||
|
||||
# Share token with teammate
|
||||
# Both can run commands, check logs, etc.
|
||||
```
|
||||
|
||||
### List Active Sessions
|
||||
|
||||
```bash
|
||||
multmux list
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### Environment Variables
|
||||
|
||||
You can customize ports by setting environment variables:
|
||||
|
||||
```bash
|
||||
export PORT=3000 # HTTP API port
|
||||
export WS_PORT=3001 # WebSocket port
|
||||
```
|
||||
|
||||
### Token Expiration
|
||||
|
||||
Default: 60 minutes. To change, edit `/home/jeffe/Github/canvas-website/multmux/packages/server/src/managers/TokenManager.ts:11`
|
||||
|
||||
### Session Cleanup
|
||||
|
||||
Sessions auto-cleanup when all users disconnect. To change this behavior, edit `/home/jeffe/Github/canvas-website/multmux/packages/server/src/managers/SessionManager.ts:64`
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### "Command not found: multmux"
|
||||
|
||||
Run `npm link` from the CLI package:
|
||||
```bash
|
||||
cd multmux/packages/cli
|
||||
npm link
|
||||
```
|
||||
|
||||
### "Connection refused"
|
||||
|
||||
1. Check server is running:
|
||||
```bash
|
||||
pm2 status
|
||||
```
|
||||
|
||||
2. Check ports are available:
|
||||
```bash
|
||||
netstat -tlnp | grep -E '3000|3001'
|
||||
```
|
||||
|
||||
3. Check logs:
|
||||
```bash
|
||||
pm2 logs multmux-server
|
||||
```
|
||||
|
||||
### Token Expired
|
||||
|
||||
Generate a new token:
|
||||
```bash
|
||||
curl -X POST http://localhost:3000/api/sessions/<session-id>/tokens \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"expiresInMinutes": 60}'
|
||||
```
|
||||
|
||||
## Security Notes
|
||||
|
||||
- Tokens expire after 60 minutes
|
||||
- Sessions are isolated per tmux instance
|
||||
- All input is validated on the server
|
||||
- Use nginx + SSL for production deployments
|
||||
|
||||
## Next Steps
|
||||
|
||||
1. **Test locally first**: Run `npm run multmux:start` and try creating/joining sessions
|
||||
2. **Deploy to AI server**: Use `./infrastructure/deploy.sh`
|
||||
3. **Set up nginx**: Copy config from `infrastructure/nginx.conf` for SSL/reverse proxy
|
||||
4. **Share with team**: Send them tokens to collaborate!
|
||||
|
||||
For full documentation, see `multmux/README.md`.
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,236 @@
|
|||
# Offline Storage Feasibility Assessment
|
||||
|
||||
|
||||
Summary
|
||||
Difficulty: Medium — Feasible with Automerge's built-in support.
|
||||
Why it's feasible
|
||||
Automerge Repo supports storage adapters — you can add an IndexedDB adapter alongside the network adapter
|
||||
CRDT merging — Automerge handles conflict resolution automatically
|
||||
Existing architecture — minimal changes needed; mainly adding a storage adapter
|
||||
What needs to be done
|
||||
Add IndexedDB storage adapter (easy)
|
||||
Install/create an IndexedDB storage adapter
|
||||
Add it to the Repo configuration
|
||||
Fix document ID consistency (important)
|
||||
Currently uses repo.create() which generates new IDs each time
|
||||
Need to use repo.find(roomId) or store a roomId → documentId mapping
|
||||
This is the main blocker for local storage persistence
|
||||
Handle online/offline transitions (medium)
|
||||
Listen for network status changes
|
||||
Trigger sync when coming back online
|
||||
The existing network adapter already handles reconnection
|
||||
Update initialization logic (medium)
|
||||
Load from IndexedDB first (instant offline access)
|
||||
Then sync with server when online
|
||||
Automerge will automatically merge changes
|
||||
Estimated effort
|
||||
Research & setup: 1-2 hours
|
||||
Implementation: 4-6 hours
|
||||
Testing: 2-3 hours
|
||||
Total: ~1 day of focused work
|
||||
Main challenge
|
||||
The document ID consistency issue. The code comment says "We can't use repo.find() with a custom ID because Automerge requires specific document ID formats" — this needs to be resolved. Options:
|
||||
Store a mapping of roomId → documentId in IndexedDB
|
||||
Use a deterministic algorithm to convert roomId to a valid Automerge document ID
|
||||
Use Automerge's document ID generation but persist the mapping
|
||||
Benefits
|
||||
Instant offline access — no loading spinners
|
||||
Automatic conflict resolution — Automerge handles merging
|
||||
Better UX — works seamlessly offline
|
||||
Resilience — works even if server is temporarily unavailable
|
||||
I've created a detailed feasibility document at OFFLINE_STORAGE_FEASIBILITY.md with implementation steps, code examples, and considerations.
|
||||
Should I start implementing this, or do you want to review the feasibility document first?
|
||||
|
||||
|
||||
## Summary
|
||||
|
||||
**Difficulty: Medium** - The implementation is straightforward thanks to Automerge's built-in support for storage adapters, but requires careful integration with the existing sync architecture.
|
||||
|
||||
## Current Architecture
|
||||
|
||||
1. **Client-side**: Uses `@automerge/automerge-repo` with `CloudflareNetworkAdapter` for WebSocket sync
|
||||
2. **Server-side**: `AutomergeDurableObject` stores documents in R2 and handles WebSocket connections
|
||||
3. **Persistence flow**:
|
||||
- Client saves to worker via POST `/room/:roomId`
|
||||
- Worker persists to R2 (throttled to every 2 seconds)
|
||||
- Client loads initial data from server via GET `/room/:roomId`
|
||||
|
||||
## What's Needed
|
||||
|
||||
### 1. Add IndexedDB Storage Adapter (Easy)
|
||||
|
||||
Automerge Repo supports storage adapters out of the box. You'll need to:
|
||||
|
||||
- Install `@automerge/automerge-repo-storage-indexeddb` (if available) or create a custom IndexedDB adapter
|
||||
- Add the storage adapter to the Repo configuration alongside the network adapter
|
||||
- The Repo will automatically persist document changes to IndexedDB
|
||||
|
||||
**Code changes needed:**
|
||||
```typescript
|
||||
// In useAutomergeSyncRepo.ts
|
||||
import { IndexedDBStorageAdapter } from "@automerge/automerge-repo-storage-indexeddb"
|
||||
|
||||
const [repo] = useState(() => {
|
||||
const adapter = new CloudflareNetworkAdapter(workerUrl, roomId, applyJsonSyncData)
|
||||
const storageAdapter = new IndexedDBStorageAdapter() // Add this
|
||||
return new Repo({
|
||||
network: [adapter],
|
||||
storage: [storageAdapter] // Add this
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
### 2. Load from Local Storage on Startup (Medium)
|
||||
|
||||
Modify the initialization logic to:
|
||||
- Check IndexedDB for existing document data
|
||||
- Load from IndexedDB first (for instant offline access)
|
||||
- Then sync with server when online
|
||||
- Automerge will automatically merge local and remote changes
|
||||
|
||||
**Code changes needed:**
|
||||
```typescript
|
||||
// In useAutomergeSyncRepo.ts - modify initializeHandle
|
||||
const initializeHandle = async () => {
|
||||
// Check if document exists in IndexedDB first
|
||||
const localDoc = await repo.find(roomId) // This will load from IndexedDB if available
|
||||
|
||||
// Then sync with server (if online)
|
||||
if (navigator.onLine) {
|
||||
// Existing server sync logic
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Handle Online/Offline Transitions (Medium)
|
||||
|
||||
- Detect network status changes
|
||||
- When coming online, ensure sync happens
|
||||
- The existing `CloudflareNetworkAdapter` already handles reconnection, but you may want to add explicit sync triggers
|
||||
|
||||
**Code changes needed:**
|
||||
```typescript
|
||||
// Add network status listener
|
||||
useEffect(() => {
|
||||
const handleOnline = () => {
|
||||
console.log('🌐 Back online - syncing with server')
|
||||
// Trigger sync - Automerge will handle merging automatically
|
||||
if (handle) {
|
||||
// The network adapter will automatically reconnect and sync
|
||||
}
|
||||
}
|
||||
|
||||
window.addEventListener('online', handleOnline)
|
||||
return () => window.removeEventListener('online', handleOnline)
|
||||
}, [handle])
|
||||
```
|
||||
|
||||
### 4. Document ID Consistency (Important)
|
||||
|
||||
Currently, the code creates a new document handle each time (`repo.create()`). For local storage to work properly, you need:
|
||||
- Consistent document IDs per room
|
||||
- The challenge: Automerge requires specific document ID formats (like `automerge:xxxxx`)
|
||||
- **Solution options:**
|
||||
1. Use `repo.find()` with a properly formatted Automerge document ID (derive from roomId)
|
||||
2. Store a mapping of roomId → documentId in IndexedDB
|
||||
3. Use a deterministic way to generate document IDs from roomId
|
||||
|
||||
**Code changes needed:**
|
||||
```typescript
|
||||
// Option 1: Generate deterministic Automerge document ID from roomId
|
||||
const documentId = `automerge:${roomId}` // May need proper formatting
|
||||
const handle = repo.find(documentId) // This will load from IndexedDB or create new
|
||||
|
||||
// Option 2: Store mapping in IndexedDB
|
||||
const storedMapping = await getDocumentIdMapping(roomId)
|
||||
const documentId = storedMapping || generateNewDocumentId()
|
||||
const handle = repo.find(documentId)
|
||||
await saveDocumentIdMapping(roomId, documentId)
|
||||
```
|
||||
|
||||
**Note**: The current code comment says "We can't use repo.find() with a custom ID because Automerge requires specific document ID formats" - this needs to be resolved. You may need to:
|
||||
- Use Automerge's document ID generation but store the mapping
|
||||
- Or use a deterministic algorithm to convert roomId to valid Automerge document ID format
|
||||
|
||||
## Benefits
|
||||
|
||||
1. **Instant Offline Access**: Users can immediately see and edit their data without waiting for server response
|
||||
2. **Automatic Merging**: Automerge's CRDT nature means local and remote changes merge automatically without conflicts
|
||||
3. **Better UX**: No loading spinners when offline - data is instantly available
|
||||
4. **Resilience**: Works even if server is temporarily unavailable
|
||||
|
||||
## Challenges & Considerations
|
||||
|
||||
### 1. Storage Quota Limits
|
||||
- IndexedDB has browser-specific limits (typically 50% of disk space)
|
||||
- Large documents could hit quota limits
|
||||
- **Solution**: Monitor storage usage and implement cleanup for old documents
|
||||
|
||||
### 2. Document ID Management
|
||||
- Need to ensure consistent document IDs per room
|
||||
- Current code uses `repo.create()` which generates new IDs
|
||||
- **Solution**: Use `repo.find(roomId)` with a consistent ID format
|
||||
|
||||
### 3. Initial Load Strategy
|
||||
- Should load from IndexedDB first (fast) or server first (fresh)?
|
||||
- **Recommendation**: Load from IndexedDB first for instant UI, then sync with server in background
|
||||
|
||||
### 4. Conflict Resolution
|
||||
- Automerge handles this automatically, but you may want to show users when their offline changes were merged
|
||||
- **Solution**: Use Automerge's change tracking to show merge notifications
|
||||
|
||||
### 5. Storage Adapter Availability
|
||||
- Need to verify if `@automerge/automerge-repo-storage-indexeddb` exists
|
||||
- If not, you'll need to create a custom adapter (still straightforward)
|
||||
|
||||
## Implementation Steps
|
||||
|
||||
1. **Research**: Check if `@automerge/automerge-repo-storage-indexeddb` package exists
|
||||
2. **Install**: Add storage adapter package or create custom adapter
|
||||
3. **Modify Repo Setup**: Add storage adapter to Repo configuration
|
||||
4. **Update Document Loading**: Use `repo.find()` instead of `repo.create()` for consistent IDs
|
||||
5. **Add Network Detection**: Listen for online/offline events
|
||||
6. **Test**: Verify offline editing works and syncs correctly when back online
|
||||
7. **Handle Edge Cases**: Storage quota, document size limits, etc.
|
||||
|
||||
## Estimated Effort
|
||||
|
||||
- **Research & Setup**: 1-2 hours
|
||||
- **Implementation**: 4-6 hours
|
||||
- **Testing**: 2-3 hours
|
||||
- **Total**: ~1 day of focused work
|
||||
|
||||
## Code Locations to Modify
|
||||
|
||||
1. `src/automerge/useAutomergeSyncRepo.ts` - Main sync hook (add storage adapter, modify initialization)
|
||||
2. `src/automerge/CloudflareAdapter.ts` - Network adapter (may need minor changes for offline detection)
|
||||
3. Potentially create: `src/automerge/IndexedDBStorageAdapter.ts` - If custom adapter needed
|
||||
|
||||
## Conclusion
|
||||
|
||||
This is a **medium-complexity** feature that's very feasible. Automerge's architecture is designed for this exact use case, and the main work is:
|
||||
1. Adding the storage adapter (straightforward)
|
||||
2. Ensuring consistent document IDs (important fix)
|
||||
3. Handling online/offline transitions (moderate complexity)
|
||||
|
||||
The biggest benefit is that Automerge's CRDT nature means you don't need to write complex merge logic - it handles conflict resolution automatically.
|
||||
|
||||
---
|
||||
|
||||
## Related: Google Data Sovereignty
|
||||
|
||||
Beyond canvas document storage, we also support importing and securely storing Google Workspace data locally. See **[docs/GOOGLE_DATA_SOVEREIGNTY.md](./docs/GOOGLE_DATA_SOVEREIGNTY.md)** for the complete architecture covering:
|
||||
|
||||
- **Gmail** - Import and encrypt emails locally
|
||||
- **Drive** - Import and encrypt documents locally
|
||||
- **Photos** - Import thumbnails with on-demand full resolution
|
||||
- **Calendar** - Import and encrypt events locally
|
||||
|
||||
Key principles:
|
||||
1. **Local-first**: All data stored in encrypted IndexedDB
|
||||
2. **User-controlled encryption**: Keys derived from WebCrypto auth, never leave browser
|
||||
3. **Selective sharing**: Choose what to share to canvas boards
|
||||
4. **Optional R2 backup**: Encrypted cloud backup (you hold the keys)
|
||||
|
||||
This builds on the same IndexedDB + Automerge foundation described above.
|
||||
|
||||
|
|
@ -0,0 +1,232 @@
|
|||
# Quartz Database Setup Guide
|
||||
|
||||
This guide explains how to set up a Quartz database with read/write permissions for your canvas website. Based on the [Quartz static site generator](https://quartz.jzhao.xyz/) architecture, there are several approaches available.
|
||||
|
||||
## Overview
|
||||
|
||||
Quartz is a static site generator that transforms Markdown content into websites. To enable read/write functionality, we've implemented multiple sync approaches that work with Quartz's architecture.
|
||||
|
||||
## Setup Options
|
||||
|
||||
### 1. GitHub Integration (Recommended)
|
||||
|
||||
This is the most natural approach since Quartz is designed to work with GitHub repositories.`
|
||||
|
||||
#### Prerequisites
|
||||
- A GitHub repository containing your Quartz site
|
||||
- A GitHub Personal Access Token with repository write permissions
|
||||
|
||||
#### Setup Steps
|
||||
|
||||
1. **Create a GitHub Personal Access Token:**
|
||||
- Go to GitHub Settings → Developer settings → Personal access tokens
|
||||
- Generate a new token with `repo` permissions for the Jeff-Emmett/quartz repository
|
||||
- Copy the token
|
||||
|
||||
2. **Configure Environment Variables:**
|
||||
Create a `.env.local` file in your project root with:
|
||||
```bash
|
||||
# GitHub Integration for Jeff-Emmett/quartz
|
||||
NEXT_PUBLIC_GITHUB_TOKEN=your_github_token_here
|
||||
NEXT_PUBLIC_QUARTZ_REPO=Jeff-Emmett/quartz
|
||||
```
|
||||
|
||||
**Important:** Replace `your_github_token_here` with your actual GitHub Personal Access Token.
|
||||
|
||||
3. **Set up GitHub Actions (Optional):**
|
||||
- The included `.github/workflows/quartz-sync.yml` will automatically rebuild your Quartz site when content changes
|
||||
- Make sure your repository has GitHub Pages enabled
|
||||
|
||||
#### How It Works
|
||||
- When you sync a note, it creates/updates a Markdown file in your GitHub repository
|
||||
- The file is placed in the `content/` directory with proper frontmatter
|
||||
- GitHub Actions automatically rebuilds and deploys your Quartz site
|
||||
- Your changes appear on your live Quartz site within minutes
|
||||
|
||||
### 2. Cloudflare Integration
|
||||
|
||||
Uses your existing Cloudflare infrastructure for persistent storage.
|
||||
|
||||
#### Prerequisites
|
||||
- Cloudflare account with R2 and Durable Objects enabled
|
||||
- API token with appropriate permissions
|
||||
|
||||
#### Setup Steps
|
||||
|
||||
1. **Create Cloudflare API Token:**
|
||||
- Go to Cloudflare Dashboard → My Profile → API Tokens
|
||||
- Create a token with `Cloudflare R2:Edit` and `Durable Objects:Edit` permissions
|
||||
- Note your Account ID
|
||||
|
||||
2. **Configure Environment Variables:**
|
||||
```bash
|
||||
# Add to your .env.local file
|
||||
NEXT_PUBLIC_CLOUDFLARE_API_KEY=your_api_key_here
|
||||
NEXT_PUBLIC_CLOUDFLARE_ACCOUNT_ID=your_account_id_here
|
||||
NEXT_PUBLIC_CLOUDFLARE_R2_BUCKET=your-bucket-name
|
||||
```
|
||||
|
||||
3. **Deploy the API Endpoint:**
|
||||
- The `src/pages/api/quartz/sync.ts` endpoint handles Cloudflare storage
|
||||
- Deploy this to your Cloudflare Workers or Vercel
|
||||
|
||||
#### How It Works
|
||||
- Notes are stored in Cloudflare R2 for persistence
|
||||
- Durable Objects handle real-time sync across devices
|
||||
- The API endpoint manages note storage and retrieval
|
||||
- Changes are immediately available to all connected clients
|
||||
|
||||
### 3. Direct Quartz API
|
||||
|
||||
If your Quartz site exposes an API for content updates.
|
||||
|
||||
#### Setup Steps
|
||||
|
||||
1. **Configure Environment Variables:**
|
||||
```bash
|
||||
# Add to your .env.local file
|
||||
NEXT_PUBLIC_QUARTZ_API_URL=https://your-quartz-site.com/api
|
||||
NEXT_PUBLIC_QUARTZ_API_KEY=your_api_key_here
|
||||
```
|
||||
|
||||
2. **Implement API Endpoints:**
|
||||
- Your Quartz site needs to expose `/api/notes` endpoints
|
||||
- See the example implementation in the sync code
|
||||
|
||||
### 4. Webhook Integration
|
||||
|
||||
Send updates to a webhook that processes and syncs to Quartz.
|
||||
|
||||
#### Setup Steps
|
||||
|
||||
1. **Configure Environment Variables:**
|
||||
```bash
|
||||
# Add to your .env.local file
|
||||
NEXT_PUBLIC_QUARTZ_WEBHOOK_URL=https://your-webhook-endpoint.com/quartz-sync
|
||||
NEXT_PUBLIC_QUARTZ_WEBHOOK_SECRET=your_webhook_secret_here
|
||||
```
|
||||
|
||||
2. **Set up Webhook Handler:**
|
||||
- Create an endpoint that receives note updates
|
||||
- Process the updates and sync to your Quartz site
|
||||
- Implement proper authentication using the webhook secret
|
||||
|
||||
## Configuration
|
||||
|
||||
### Environment Variables
|
||||
|
||||
Create a `.env.local` file with the following variables:
|
||||
|
||||
```bash
|
||||
# GitHub Integration
|
||||
NEXT_PUBLIC_GITHUB_TOKEN=your_github_token
|
||||
NEXT_PUBLIC_QUARTZ_REPO=username/repo-name
|
||||
|
||||
# Cloudflare Integration
|
||||
NEXT_PUBLIC_CLOUDFLARE_API_KEY=your_api_key
|
||||
NEXT_PUBLIC_CLOUDFLARE_ACCOUNT_ID=your_account_id
|
||||
NEXT_PUBLIC_CLOUDFLARE_R2_BUCKET=your-bucket-name
|
||||
|
||||
# Quartz API Integration
|
||||
NEXT_PUBLIC_QUARTZ_API_URL=https://your-site.com/api
|
||||
NEXT_PUBLIC_QUARTZ_API_KEY=your_api_key
|
||||
|
||||
# Webhook Integration
|
||||
NEXT_PUBLIC_QUARTZ_WEBHOOK_URL=https://your-webhook.com/sync
|
||||
NEXT_PUBLIC_QUARTZ_WEBHOOK_SECRET=your_secret
|
||||
```
|
||||
|
||||
### Runtime Configuration
|
||||
|
||||
You can also configure sync settings at runtime:
|
||||
|
||||
```typescript
|
||||
import { saveQuartzSyncSettings } from '@/config/quartzSync'
|
||||
|
||||
// Enable/disable specific sync methods
|
||||
saveQuartzSyncSettings({
|
||||
github: { enabled: true },
|
||||
cloudflare: { enabled: false },
|
||||
webhook: { enabled: true }
|
||||
})
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
### Basic Sync
|
||||
|
||||
The sync functionality is automatically integrated into your ObsNote shapes. When you edit a note and click "Sync Updates", it will:
|
||||
|
||||
1. Try the configured sync methods in order of preference
|
||||
2. Fall back to local storage if all methods fail
|
||||
3. Provide feedback on the sync status
|
||||
|
||||
### Advanced Sync
|
||||
|
||||
For more control, you can use the QuartzSync class directly:
|
||||
|
||||
```typescript
|
||||
import { QuartzSync, createQuartzNoteFromShape } from '@/lib/quartzSync'
|
||||
|
||||
const sync = new QuartzSync({
|
||||
githubToken: 'your_token',
|
||||
githubRepo: 'username/repo'
|
||||
})
|
||||
|
||||
const note = createQuartzNoteFromShape(shape)
|
||||
await sync.smartSync(note)
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
1. **"No vault configured for sync"**
|
||||
- Make sure you've selected a vault in the Obsidian Vault Browser
|
||||
- Check that the vault path is properly saved in your session
|
||||
|
||||
2. **GitHub API errors**
|
||||
- Verify your GitHub token has the correct permissions
|
||||
- Check that the repository name is correct (username/repo-name format)
|
||||
|
||||
3. **Cloudflare sync failures**
|
||||
- Ensure your API key has the necessary permissions
|
||||
- Verify the account ID and bucket name are correct
|
||||
|
||||
4. **Environment variables not loading**
|
||||
- Make sure your `.env.local` file is in the project root
|
||||
- Restart your development server after adding new variables
|
||||
|
||||
### Debug Mode
|
||||
|
||||
Enable debug logging by opening the browser console. The sync process provides detailed logs for troubleshooting.
|
||||
|
||||
## Security Considerations
|
||||
|
||||
1. **API Keys**: Never commit API keys to version control
|
||||
2. **GitHub Tokens**: Use fine-grained tokens with minimal required permissions
|
||||
3. **Webhook Secrets**: Always use strong, unique secrets for webhook authentication
|
||||
4. **CORS**: Configure CORS properly for API endpoints
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Start with GitHub Integration**: It's the most reliable and well-supported approach
|
||||
2. **Use Fallbacks**: Always have local storage as a fallback option
|
||||
3. **Monitor Sync Status**: Check the console logs for sync success/failure
|
||||
4. **Test Thoroughly**: Verify sync works with different types of content
|
||||
5. **Backup Important Data**: Don't rely solely on sync for critical content
|
||||
|
||||
## Support
|
||||
|
||||
For issues or questions:
|
||||
1. Check the console logs for detailed error messages
|
||||
2. Verify your environment variables are set correctly
|
||||
3. Test with a simple note first
|
||||
4. Check the GitHub repository for updates and issues
|
||||
|
||||
## References
|
||||
|
||||
- [Quartz Documentation](https://quartz.jzhao.xyz/)
|
||||
- [Quartz GitHub Repository](https://github.com/jackyzha0/quartz)
|
||||
- [GitHub API Documentation](https://docs.github.com/en/rest)
|
||||
- [Cloudflare R2 Documentation](https://developers.cloudflare.com/r2/)
|
||||
|
|
@ -0,0 +1,267 @@
|
|||
# Quick Start Guide - AI Services Setup
|
||||
|
||||
**Get your AI orchestration running in under 30 minutes!**
|
||||
|
||||
---
|
||||
|
||||
## 🎯 Goal
|
||||
|
||||
Deploy a smart AI orchestration layer that saves you $768-1,824/year by routing 70-80% of workload to your Netcup RS 8000 (FREE) and only using RunPod GPU when needed.
|
||||
|
||||
---
|
||||
|
||||
## ⚡ 30-Minute Quick Start
|
||||
|
||||
### Step 1: Verify Access (2 min)
|
||||
|
||||
```bash
|
||||
# Test SSH to Netcup RS 8000
|
||||
ssh netcup "hostname && docker --version"
|
||||
|
||||
# Expected output:
|
||||
# vXXXXXX.netcup.net
|
||||
# Docker version 24.0.x
|
||||
```
|
||||
|
||||
✅ **Success?** Continue to Step 2
|
||||
❌ **Failed?** Setup SSH key or contact Netcup support
|
||||
|
||||
### Step 2: Deploy AI Orchestrator (10 min)
|
||||
|
||||
```bash
|
||||
# Create directory structure
|
||||
ssh netcup << 'EOF'
|
||||
mkdir -p /opt/ai-orchestrator/{services/{router,workers,monitor},configs,data}
|
||||
cd /opt/ai-orchestrator
|
||||
EOF
|
||||
|
||||
# Deploy minimal stack (text generation only for quick start)
|
||||
ssh netcup "cat > /opt/ai-orchestrator/docker-compose.yml" << 'EOF'
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
redis:
|
||||
image: redis:7-alpine
|
||||
ports: ["6379:6379"]
|
||||
volumes: ["./data/redis:/data"]
|
||||
command: redis-server --appendonly yes
|
||||
|
||||
ollama:
|
||||
image: ollama/ollama:latest
|
||||
ports: ["11434:11434"]
|
||||
volumes: ["/data/models/ollama:/root/.ollama"]
|
||||
EOF
|
||||
|
||||
# Start services
|
||||
ssh netcup "cd /opt/ai-orchestrator && docker-compose up -d"
|
||||
|
||||
# Verify
|
||||
ssh netcup "docker ps"
|
||||
```
|
||||
|
||||
### Step 3: Download AI Model (5 min)
|
||||
|
||||
```bash
|
||||
# Pull Llama 3 8B (smaller, faster for testing)
|
||||
ssh netcup "docker exec ollama ollama pull llama3:8b"
|
||||
|
||||
# Test it
|
||||
ssh netcup "docker exec ollama ollama run llama3:8b 'Hello, world!'"
|
||||
```
|
||||
|
||||
Expected output: A friendly AI response!
|
||||
|
||||
### Step 4: Test from Your Machine (3 min)
|
||||
|
||||
```bash
|
||||
# Get Netcup IP
|
||||
NETCUP_IP="159.195.32.209"
|
||||
|
||||
# Test Ollama directly
|
||||
curl -X POST http://$NETCUP_IP:11434/api/generate \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"model": "llama3:8b",
|
||||
"prompt": "Write hello world in Python",
|
||||
"stream": false
|
||||
}'
|
||||
```
|
||||
|
||||
Expected: Python code response!
|
||||
|
||||
### Step 5: Configure canvas-website (5 min)
|
||||
|
||||
```bash
|
||||
cd /home/jeffe/Github/canvas-website-branch-worktrees/add-runpod-AI-API
|
||||
|
||||
# Create minimal .env.local
|
||||
cat > .env.local << 'EOF'
|
||||
# Ollama direct access (for quick testing)
|
||||
VITE_OLLAMA_URL=http://159.195.32.209:11434
|
||||
|
||||
# Your existing vars...
|
||||
VITE_GOOGLE_CLIENT_ID=your_google_client_id
|
||||
VITE_TLDRAW_WORKER_URL=your_worker_url
|
||||
EOF
|
||||
|
||||
# Install and start
|
||||
npm install
|
||||
npm run dev
|
||||
```
|
||||
|
||||
### Step 6: Test in Browser (5 min)
|
||||
|
||||
1. Open http://localhost:5173 (or your dev port)
|
||||
2. Create a Prompt shape or use LLM command
|
||||
3. Type: "Write a hello world program"
|
||||
4. Submit
|
||||
5. Verify: Response appears using your local Ollama!
|
||||
|
||||
**🎉 Success!** You're now running AI locally for FREE!
|
||||
|
||||
---
|
||||
|
||||
## 🚀 Next: Full Setup (Optional)
|
||||
|
||||
Once quick start works, deploy the full stack:
|
||||
|
||||
### Option A: Full AI Orchestrator (1 hour)
|
||||
|
||||
Follow: `AI_SERVICES_DEPLOYMENT_GUIDE.md` Phase 2-3
|
||||
|
||||
Adds:
|
||||
- Smart routing layer
|
||||
- Image generation (local SD + RunPod)
|
||||
- Video generation (RunPod Wan2.1)
|
||||
- Cost tracking
|
||||
- Monitoring dashboards
|
||||
|
||||
### Option B: Just Add Image Generation (30 min)
|
||||
|
||||
```bash
|
||||
# Add Stable Diffusion CPU to docker-compose.yml
|
||||
ssh netcup "cat >> /opt/ai-orchestrator/docker-compose.yml" << 'EOF'
|
||||
|
||||
stable-diffusion:
|
||||
image: ghcr.io/stablecog/sc-worker:latest
|
||||
ports: ["7860:7860"]
|
||||
volumes: ["/data/models/stable-diffusion:/models"]
|
||||
environment:
|
||||
USE_CPU: "true"
|
||||
EOF
|
||||
|
||||
ssh netcup "cd /opt/ai-orchestrator && docker-compose up -d"
|
||||
```
|
||||
|
||||
### Option C: Full Migration (4-5 weeks)
|
||||
|
||||
Follow: `NETCUP_MIGRATION_PLAN.md` for complete DigitalOcean → Netcup migration
|
||||
|
||||
---
|
||||
|
||||
## 🐛 Quick Troubleshooting
|
||||
|
||||
### "Connection refused to 159.195.32.209:11434"
|
||||
|
||||
```bash
|
||||
# Check if firewall blocking
|
||||
ssh netcup "sudo ufw status"
|
||||
ssh netcup "sudo ufw allow 11434/tcp"
|
||||
ssh netcup "sudo ufw allow 8000/tcp" # For AI orchestrator later
|
||||
```
|
||||
|
||||
### "docker: command not found"
|
||||
|
||||
```bash
|
||||
# Install Docker
|
||||
ssh netcup << 'EOF'
|
||||
curl -fsSL https://get.docker.com -o get-docker.sh
|
||||
sudo sh get-docker.sh
|
||||
sudo usermod -aG docker $USER
|
||||
EOF
|
||||
|
||||
# Reconnect and retry
|
||||
ssh netcup "docker --version"
|
||||
```
|
||||
|
||||
### "Ollama model not found"
|
||||
|
||||
```bash
|
||||
# List installed models
|
||||
ssh netcup "docker exec ollama ollama list"
|
||||
|
||||
# If empty, pull model
|
||||
ssh netcup "docker exec ollama ollama pull llama3:8b"
|
||||
```
|
||||
|
||||
### "AI response very slow (>30s)"
|
||||
|
||||
```bash
|
||||
# Check if downloading model for first time
|
||||
ssh netcup "docker exec ollama ollama list"
|
||||
|
||||
# Use smaller model for testing
|
||||
ssh netcup "docker exec ollama ollama pull mistral:7b"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 💡 Quick Tips
|
||||
|
||||
1. **Start with 8B model**: Faster responses, good for testing
|
||||
2. **Use localhost for dev**: Point directly to Ollama URL
|
||||
3. **Deploy orchestrator later**: Once basic setup works
|
||||
4. **Monitor resources**: `ssh netcup htop` to check CPU/RAM
|
||||
5. **Test locally first**: Verify before adding RunPod costs
|
||||
|
||||
---
|
||||
|
||||
## 📋 Checklist
|
||||
|
||||
- [ ] SSH access to Netcup works
|
||||
- [ ] Docker installed and running
|
||||
- [ ] Redis and Ollama containers running
|
||||
- [ ] Llama3 model downloaded
|
||||
- [ ] Test curl request works
|
||||
- [ ] canvas-website .env.local configured
|
||||
- [ ] Browser test successful
|
||||
|
||||
**All checked?** You're ready! 🎉
|
||||
|
||||
---
|
||||
|
||||
## 🎯 Next Steps
|
||||
|
||||
Choose your path:
|
||||
|
||||
**Path 1: Keep it Simple**
|
||||
- Use Ollama directly for text generation
|
||||
- Add user API keys in canvas settings for images
|
||||
- Deploy full orchestrator later
|
||||
|
||||
**Path 2: Deploy Full Stack**
|
||||
- Follow `AI_SERVICES_DEPLOYMENT_GUIDE.md`
|
||||
- Setup image + video generation
|
||||
- Enable cost tracking and monitoring
|
||||
|
||||
**Path 3: Full Migration**
|
||||
- Follow `NETCUP_MIGRATION_PLAN.md`
|
||||
- Migrate all services from DigitalOcean
|
||||
- Setup production infrastructure
|
||||
|
||||
---
|
||||
|
||||
## 📚 Reference Docs
|
||||
|
||||
- **This Guide**: Quick 30-min setup
|
||||
- **AI_SERVICES_SUMMARY.md**: Complete feature overview
|
||||
- **AI_SERVICES_DEPLOYMENT_GUIDE.md**: Full deployment (all services)
|
||||
- **NETCUP_MIGRATION_PLAN.md**: Complete migration plan (8 phases)
|
||||
- **RUNPOD_SETUP.md**: RunPod WhisperX setup
|
||||
- **TEST_RUNPOD_AI.md**: Testing guide
|
||||
|
||||
---
|
||||
|
||||
**Questions?** Check `AI_SERVICES_SUMMARY.md` or deployment guide!
|
||||
|
||||
**Ready for full setup?** Continue to `AI_SERVICES_DEPLOYMENT_GUIDE.md`! 🚀
|
||||
|
|
@ -0,0 +1,255 @@
|
|||
# RunPod WhisperX Integration Setup
|
||||
|
||||
This guide explains how to set up and use the RunPod WhisperX endpoint for transcription in the canvas website.
|
||||
|
||||
## Overview
|
||||
|
||||
The transcription system can now use a hosted WhisperX endpoint on RunPod instead of running the Whisper model locally in the browser. This provides:
|
||||
- Better accuracy with WhisperX's advanced features
|
||||
- Faster processing (no model download needed)
|
||||
- Reduced client-side resource usage
|
||||
- Support for longer audio files
|
||||
|
||||
## Prerequisites
|
||||
|
||||
1. A RunPod account with an active WhisperX endpoint
|
||||
2. Your RunPod API key
|
||||
3. Your RunPod endpoint ID
|
||||
|
||||
## Configuration
|
||||
|
||||
### Environment Variables
|
||||
|
||||
Add the following environment variables to your `.env.local` file (or your deployment environment):
|
||||
|
||||
```bash
|
||||
# RunPod Configuration
|
||||
VITE_RUNPOD_API_KEY=your_runpod_api_key_here
|
||||
VITE_RUNPOD_ENDPOINT_ID=your_endpoint_id_here
|
||||
```
|
||||
|
||||
Or if using Next.js:
|
||||
|
||||
```bash
|
||||
NEXT_PUBLIC_RUNPOD_API_KEY=your_runpod_api_key_here
|
||||
NEXT_PUBLIC_RUNPOD_ENDPOINT_ID=your_endpoint_id_here
|
||||
```
|
||||
|
||||
### Getting Your RunPod Credentials
|
||||
|
||||
1. **API Key**:
|
||||
- Go to [RunPod Settings](https://www.runpod.io/console/user/settings)
|
||||
- Navigate to API Keys section
|
||||
- Create a new API key or copy an existing one
|
||||
|
||||
2. **Endpoint ID**:
|
||||
- Go to [RunPod Serverless Endpoints](https://www.runpod.io/console/serverless)
|
||||
- Find your WhisperX endpoint
|
||||
- Copy the endpoint ID from the URL or endpoint details
|
||||
- Example: If your endpoint URL is `https://api.runpod.ai/v2/lrtisuv8ixbtub/run`, then `lrtisuv8ixbtub` is your endpoint ID
|
||||
|
||||
## Usage
|
||||
|
||||
### Automatic Detection
|
||||
|
||||
The transcription hook automatically detects if RunPod is configured and uses it instead of the local Whisper model. No code changes are needed!
|
||||
|
||||
### Manual Override
|
||||
|
||||
If you want to explicitly control which transcription method to use:
|
||||
|
||||
```typescript
|
||||
import { useWhisperTranscription } from '@/hooks/useWhisperTranscriptionSimple'
|
||||
|
||||
const {
|
||||
isRecording,
|
||||
transcript,
|
||||
startRecording,
|
||||
stopRecording
|
||||
} = useWhisperTranscription({
|
||||
useRunPod: true, // Force RunPod usage
|
||||
language: 'en',
|
||||
onTranscriptUpdate: (text) => {
|
||||
console.log('New transcript:', text)
|
||||
}
|
||||
})
|
||||
```
|
||||
|
||||
Or to force local model:
|
||||
|
||||
```typescript
|
||||
useWhisperTranscription({
|
||||
useRunPod: false, // Force local Whisper model
|
||||
// ... other options
|
||||
})
|
||||
```
|
||||
|
||||
## API Format
|
||||
|
||||
The integration sends audio data to your RunPod endpoint in the following format:
|
||||
|
||||
```json
|
||||
{
|
||||
"input": {
|
||||
"audio": "base64_encoded_audio_data",
|
||||
"audio_format": "audio/wav",
|
||||
"language": "en",
|
||||
"task": "transcribe"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Expected Response Format
|
||||
|
||||
The endpoint should return one of these formats:
|
||||
|
||||
**Direct Response:**
|
||||
```json
|
||||
{
|
||||
"output": {
|
||||
"text": "Transcribed text here"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Or with segments:**
|
||||
```json
|
||||
{
|
||||
"output": {
|
||||
"segments": [
|
||||
{
|
||||
"start": 0.0,
|
||||
"end": 2.5,
|
||||
"text": "Transcribed text here"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Async Job Pattern:**
|
||||
```json
|
||||
{
|
||||
"id": "job-id-123",
|
||||
"status": "IN_QUEUE"
|
||||
}
|
||||
```
|
||||
|
||||
The integration automatically handles async jobs by polling the status endpoint until completion.
|
||||
|
||||
## Customizing the API Request
|
||||
|
||||
If your WhisperX endpoint expects a different request format, you can modify `src/lib/runpodApi.ts`:
|
||||
|
||||
```typescript
|
||||
// In transcribeWithRunPod function
|
||||
const requestBody = {
|
||||
input: {
|
||||
// Adjust these fields based on your endpoint
|
||||
audio: audioBase64,
|
||||
// Add or modify fields as needed
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### "RunPod API key or endpoint ID not configured"
|
||||
|
||||
- Ensure environment variables are set correctly
|
||||
- Restart your development server after adding environment variables
|
||||
- Check that variable names match exactly (case-sensitive)
|
||||
|
||||
### "RunPod API error: 401"
|
||||
|
||||
- Verify your API key is correct
|
||||
- Check that your API key has not expired
|
||||
- Ensure you're using the correct API key format
|
||||
|
||||
### "RunPod API error: 404"
|
||||
|
||||
- Verify your endpoint ID is correct
|
||||
- Check that your endpoint is active in the RunPod console
|
||||
- Ensure the endpoint URL format matches: `https://api.runpod.ai/v2/{ENDPOINT_ID}/run`
|
||||
|
||||
### "No transcription text found in RunPod response"
|
||||
|
||||
- Check your endpoint's response format matches the expected format
|
||||
- Verify your WhisperX endpoint is configured correctly
|
||||
- Check the browser console for detailed error messages
|
||||
|
||||
### "Failed to return job results" (400 Bad Request)
|
||||
|
||||
This error occurs on the **server side** when your WhisperX endpoint tries to return results. This typically means:
|
||||
|
||||
1. **Response format mismatch**: Your endpoint's response doesn't match RunPod's expected format
|
||||
- Ensure your endpoint returns: `{"output": {"text": "..."}}` or `{"output": {"segments": [...]}}`
|
||||
- The response must be valid JSON
|
||||
- Check your endpoint handler code to ensure it's returning the correct structure
|
||||
|
||||
2. **Response size limits**: The response might be too large
|
||||
- Try with shorter audio files first
|
||||
- Check RunPod's response size limits
|
||||
|
||||
3. **Timeout issues**: The endpoint might be taking too long to process
|
||||
- Check your endpoint logs for processing time
|
||||
- Consider optimizing your WhisperX model configuration
|
||||
|
||||
4. **Check endpoint handler**: Review your WhisperX endpoint's `handler.py` or equivalent:
|
||||
```python
|
||||
# Example correct format
|
||||
def handler(event):
|
||||
# ... process audio ...
|
||||
return {
|
||||
"output": {
|
||||
"text": transcription_text
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Transcription not working
|
||||
|
||||
- Check browser console for errors
|
||||
- Verify your endpoint is active and responding
|
||||
- Test your endpoint directly using curl or Postman
|
||||
- Ensure audio format is supported (WAV format is recommended)
|
||||
- Check RunPod endpoint logs for server-side errors
|
||||
|
||||
## Testing Your Endpoint
|
||||
|
||||
You can test your RunPod endpoint directly:
|
||||
|
||||
```bash
|
||||
curl -X POST https://api.runpod.ai/v2/YOUR_ENDPOINT_ID/run \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Bearer YOUR_API_KEY" \
|
||||
-d '{
|
||||
"input": {
|
||||
"audio": "base64_audio_data_here",
|
||||
"audio_format": "audio/wav",
|
||||
"language": "en"
|
||||
}
|
||||
}'
|
||||
```
|
||||
|
||||
## Fallback Behavior
|
||||
|
||||
If RunPod is not configured or fails, the system will:
|
||||
1. Try to use RunPod if configured
|
||||
2. Fall back to local Whisper model if RunPod fails or is not configured
|
||||
3. Show error messages if both methods fail
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
- **RunPod**: Better for longer audio files and higher accuracy, but requires network connection
|
||||
- **Local Model**: Works offline, but requires model download and uses more client resources
|
||||
|
||||
## Support
|
||||
|
||||
For issues specific to:
|
||||
- **RunPod API**: Check [RunPod Documentation](https://docs.runpod.io)
|
||||
- **WhisperX**: Check your WhisperX endpoint configuration
|
||||
- **Integration**: Check browser console for detailed error messages
|
||||
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,91 @@
|
|||
# Sanitization Explanation
|
||||
|
||||
## Why Sanitization Exists
|
||||
|
||||
Sanitization is **necessary** because TLDraw has strict schema requirements that must be met for shapes to render correctly. Without sanitization, we get validation errors and broken shapes.
|
||||
|
||||
## Critical Fixes (MUST KEEP)
|
||||
|
||||
These fixes are **required** for TLDraw to work:
|
||||
|
||||
1. **Move w/h/geo from top-level to props for geo shapes**
|
||||
- TLDraw schema requires `w`, `h`, and `geo` to be in `props`, not at the top level
|
||||
- Without this, TLDraw throws validation errors
|
||||
|
||||
2. **Remove w/h from group shapes**
|
||||
- Group shapes don't have `w`/`h` properties
|
||||
- Having them causes validation errors
|
||||
|
||||
3. **Remove w/h from line shapes**
|
||||
- Line shapes use `points`, not `w`/`h`
|
||||
- Having them causes validation errors
|
||||
|
||||
4. **Fix richText structure**
|
||||
- TLDraw requires `richText` to be `{ content: [...], type: 'doc' }`
|
||||
- Old data might have it as an array or missing structure
|
||||
- We preserve all content, just fix the structure
|
||||
|
||||
5. **Fix crop structure for image/video**
|
||||
- TLDraw requires `crop` to be `{ topLeft: {x,y}, bottomRight: {x,y} }` or `null`
|
||||
- Old data might have `{ x, y, w, h }` format
|
||||
- We convert the format, preserving the crop area
|
||||
|
||||
6. **Remove h/geo from text shapes**
|
||||
- Text shapes don't have `h` or `geo` properties
|
||||
- Having them causes validation errors
|
||||
|
||||
7. **Ensure required properties exist**
|
||||
- Some shapes require certain properties (e.g., `points` for line shapes)
|
||||
- We only add defaults if truly missing
|
||||
|
||||
## What We Preserve
|
||||
|
||||
We **preserve all user data**:
|
||||
- ✅ `richText` content (we only fix structure, never delete content)
|
||||
- ✅ `text` property on arrows
|
||||
- ✅ All metadata (`meta` object)
|
||||
- ✅ All valid shape properties
|
||||
- ✅ Custom shape properties
|
||||
|
||||
## What We Remove (Only When Necessary)
|
||||
|
||||
We only remove properties that:
|
||||
1. **Cause validation errors** (e.g., `w`/`h` on groups/lines)
|
||||
2. **Are invalid for the shape type** (e.g., `geo` on text shapes)
|
||||
|
||||
We **never** remove:
|
||||
- User-created content (text, richText)
|
||||
- Valid metadata
|
||||
- Properties that don't cause errors
|
||||
|
||||
## Current Sanitization Locations
|
||||
|
||||
1. **TLStoreToAutomerge.ts** - When saving from TLDraw to Automerge
|
||||
- Minimal fixes only
|
||||
- Preserves all data
|
||||
|
||||
2. **AutomergeToTLStore.ts** - When loading from Automerge to TLDraw
|
||||
- Minimal fixes only
|
||||
- Preserves all data
|
||||
|
||||
3. **useAutomergeStoreV2.ts** - Initial load processing
|
||||
- More extensive (handles migration from old formats)
|
||||
- Still preserves all user data
|
||||
|
||||
## Can We Simplify?
|
||||
|
||||
**Yes, but carefully:**
|
||||
|
||||
1. ✅ We can remove property deletions that don't cause validation errors
|
||||
2. ✅ We can consolidate duplicate logic
|
||||
3. ❌ We **cannot** remove schema fixes (w/h/geo movement, richText structure)
|
||||
4. ❌ We **cannot** remove property deletions that cause validation errors
|
||||
|
||||
## Recommendation
|
||||
|
||||
Keep sanitization but:
|
||||
1. Only delete properties that **actually cause validation errors**
|
||||
2. Preserve all user data (text, richText, metadata)
|
||||
3. Consolidate duplicate logic between files
|
||||
4. Add comments explaining why each fix is necessary
|
||||
|
||||
|
|
@ -0,0 +1,139 @@
|
|||
# Testing RunPod AI Integration
|
||||
|
||||
This guide explains how to test the RunPod AI API integration in development.
|
||||
|
||||
## Quick Setup
|
||||
|
||||
1. **Add RunPod environment variables to `.env.local`:**
|
||||
|
||||
```bash
|
||||
# Add these lines to your .env.local file
|
||||
VITE_RUNPOD_API_KEY=your_runpod_api_key_here
|
||||
VITE_RUNPOD_ENDPOINT_ID=your_endpoint_id_here
|
||||
```
|
||||
|
||||
**Important:** Replace `your_runpod_api_key_here` and `your_endpoint_id_here` with your actual RunPod credentials.
|
||||
|
||||
2. **Get your RunPod credentials:**
|
||||
- **API Key**: Go to [RunPod Settings](https://www.runpod.io/console/user/settings) → API Keys section
|
||||
- **Endpoint ID**: Go to [RunPod Serverless Endpoints](https://www.runpod.io/console/serverless) → Find your endpoint → Copy the ID from the URL
|
||||
- Example: If URL is `https://api.runpod.ai/v2/jqd16o7stu29vq/run`, then `jqd16o7stu29vq` is your endpoint ID
|
||||
|
||||
3. **Restart the dev server:**
|
||||
```bash
|
||||
npm run dev
|
||||
```
|
||||
|
||||
## Testing the Integration
|
||||
|
||||
### Method 1: Using Prompt Shapes
|
||||
1. Open the canvas website in your browser
|
||||
2. Select the **Prompt** tool from the toolbar (or press the keyboard shortcut)
|
||||
3. Click on the canvas to create a prompt shape
|
||||
4. Type a prompt like "Write a hello world program in Python"
|
||||
5. Press Enter or click the send button
|
||||
6. The AI response should appear in the prompt shape
|
||||
|
||||
### Method 2: Using Arrow LLM Action
|
||||
1. Create an arrow shape pointing from one shape to another
|
||||
2. Add text to the arrow (this becomes the prompt)
|
||||
3. Select the arrow
|
||||
4. Press **Alt+G** (or use the action menu)
|
||||
5. The AI will process the prompt and fill the target shape with the response
|
||||
|
||||
### Method 3: Using Command Palette
|
||||
1. Press **Cmd+J** (Mac) or **Ctrl+J** (Windows/Linux) to open the LLM view
|
||||
2. Type your prompt
|
||||
3. Press Enter
|
||||
4. The response should appear
|
||||
|
||||
## Verifying RunPod is Being Used
|
||||
|
||||
1. **Open browser console** (F12 or Cmd+Option+I)
|
||||
2. Look for these log messages:
|
||||
- `🔑 Found RunPod configuration from environment variables - using as primary AI provider`
|
||||
- `🔍 Found X available AI providers: runpod (default)`
|
||||
- `🔄 Attempting to use runpod API (default)...`
|
||||
|
||||
3. **Check Network tab:**
|
||||
- Look for requests to `https://api.runpod.ai/v2/{endpointId}/run`
|
||||
- The request should have `Authorization: Bearer {your_api_key}` header
|
||||
|
||||
## Expected Behavior
|
||||
|
||||
- **With RunPod configured**: RunPod will be used FIRST (priority over user API keys)
|
||||
- **Without RunPod**: System will fall back to user-configured API keys (OpenAI, Anthropic, etc.)
|
||||
- **If both fail**: You'll see an error message
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### "No valid API key found for any provider"
|
||||
- Check that `.env.local` has the correct variable names (`VITE_RUNPOD_API_KEY` and `VITE_RUNPOD_ENDPOINT_ID`)
|
||||
- Restart the dev server after adding environment variables
|
||||
- Check browser console for detailed error messages
|
||||
|
||||
### "RunPod API error: 401"
|
||||
- Verify your API key is correct
|
||||
- Check that your API key hasn't expired
|
||||
- Ensure you're using the correct API key format
|
||||
|
||||
### "RunPod API error: 404"
|
||||
- Verify your endpoint ID is correct
|
||||
- Check that your endpoint is active in RunPod console
|
||||
- Ensure the endpoint URL format matches: `https://api.runpod.ai/v2/{ENDPOINT_ID}/run`
|
||||
|
||||
### RunPod not being used
|
||||
- Check browser console for `🔑 Found RunPod configuration` message
|
||||
- Verify environment variables are loaded (check `import.meta.env.VITE_RUNPOD_API_KEY` in console)
|
||||
- Make sure you restarted the dev server after adding environment variables
|
||||
|
||||
## Testing Different Scenarios
|
||||
|
||||
### Test 1: RunPod Only (No User Keys)
|
||||
1. Remove or clear any user API keys from localStorage
|
||||
2. Set RunPod environment variables
|
||||
3. Run an AI command
|
||||
4. Should use RunPod automatically
|
||||
|
||||
### Test 2: RunPod Priority (With User Keys)
|
||||
1. Set RunPod environment variables
|
||||
2. Also configure user API keys in settings
|
||||
3. Run an AI command
|
||||
4. Should use RunPod FIRST, then fall back to user keys if RunPod fails
|
||||
|
||||
### Test 3: Fallback Behavior
|
||||
1. Set RunPod environment variables with invalid credentials
|
||||
2. Configure valid user API keys
|
||||
3. Run an AI command
|
||||
4. Should try RunPod first, fail, then use user keys
|
||||
|
||||
## API Request Format
|
||||
|
||||
The integration sends requests in this format:
|
||||
|
||||
```json
|
||||
{
|
||||
"input": {
|
||||
"prompt": "Your prompt text here"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The system prompt and user prompt are combined into a single prompt string.
|
||||
|
||||
## Response Handling
|
||||
|
||||
The integration handles multiple response formats:
|
||||
- Direct text response: `{ "output": "text" }`
|
||||
- Object with text: `{ "output": { "text": "..." } }`
|
||||
- Object with response: `{ "output": { "response": "..." } }`
|
||||
- Async jobs: Polls until completion
|
||||
|
||||
## Next Steps
|
||||
|
||||
Once testing is successful:
|
||||
1. Verify RunPod responses are working correctly
|
||||
2. Test with different prompt types
|
||||
3. Monitor RunPod usage and costs
|
||||
4. Consider adding rate limiting if needed
|
||||
|
||||
|
|
@ -0,0 +1,84 @@
|
|||
# TLDraw Interactive Elements - Z-Index Requirements
|
||||
|
||||
## Important Note for Developers
|
||||
|
||||
When creating tldraw shapes that contain interactive elements (buttons, inputs, links, etc.), you **MUST** set appropriate z-index values to ensure these elements are clickable and accessible.
|
||||
|
||||
## The Problem
|
||||
|
||||
TLDraw's canvas has its own event handling and layering system. Interactive elements within custom shapes can be blocked by the canvas's event listeners, making them unclickable or unresponsive.
|
||||
|
||||
## The Solution
|
||||
|
||||
Always add the following CSS properties to interactive elements:
|
||||
|
||||
```css
|
||||
.interactive-element {
|
||||
position: relative;
|
||||
z-index: 1000; /* or higher if needed */
|
||||
}
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
### Buttons
|
||||
```css
|
||||
.custom-button {
|
||||
/* ... other styles ... */
|
||||
position: relative;
|
||||
z-index: 1000;
|
||||
}
|
||||
```
|
||||
|
||||
### Input Fields
|
||||
```css
|
||||
.custom-input {
|
||||
/* ... other styles ... */
|
||||
position: relative;
|
||||
z-index: 1000;
|
||||
}
|
||||
```
|
||||
|
||||
### Links
|
||||
```css
|
||||
.custom-link {
|
||||
/* ... other styles ... */
|
||||
position: relative;
|
||||
z-index: 1000;
|
||||
}
|
||||
```
|
||||
|
||||
## Z-Index Guidelines
|
||||
|
||||
- **1000**: Standard interactive elements (buttons, inputs, links)
|
||||
- **1001-1999**: Dropdowns, modals, tooltips
|
||||
- **2000+**: Critical overlays, error messages
|
||||
|
||||
## Testing Checklist
|
||||
|
||||
Before deploying any tldraw shape with interactive elements:
|
||||
|
||||
- [ ] Test clicking all buttons/links
|
||||
- [ ] Test input field focus and typing
|
||||
- [ ] Test hover states
|
||||
- [ ] Test on different screen sizes
|
||||
- [ ] Verify elements work when shape is selected/deselected
|
||||
- [ ] Verify elements work when shape is moved/resized
|
||||
|
||||
## Common Issues
|
||||
|
||||
1. **Elements appear clickable but don't respond** → Add z-index
|
||||
2. **Hover states don't work** → Add z-index
|
||||
3. **Elements work sometimes but not others** → Check z-index conflicts
|
||||
4. **Mobile touch events don't work** → Ensure z-index is high enough
|
||||
|
||||
## Files to Remember
|
||||
|
||||
This note should be updated whenever new interactive elements are added to tldraw shapes. Current shapes with interactive elements:
|
||||
|
||||
- `src/components/TranscribeComponent.tsx` - Copy button (z-index: 1000)
|
||||
|
||||
## Last Updated
|
||||
|
||||
Created: [Current Date]
|
||||
Last Updated: [Current Date]
|
||||
|
|
@ -0,0 +1,60 @@
|
|||
# Transcription Setup Guide
|
||||
|
||||
## Why the Start Button Doesn't Work
|
||||
|
||||
The transcription start button is likely disabled because the **OpenAI API key is not configured**. The button will be disabled and show a tooltip "OpenAI API key not configured - Please set your API key in settings" when this is the case.
|
||||
|
||||
## How to Fix It
|
||||
|
||||
### Step 1: Get an OpenAI API Key
|
||||
1. Go to [OpenAI API Keys](https://platform.openai.com/api-keys)
|
||||
2. Sign in to your OpenAI account
|
||||
3. Click "Create new secret key"
|
||||
4. Copy the API key (it starts with `sk-`)
|
||||
|
||||
### Step 2: Configure the API Key in Canvas
|
||||
1. In your Canvas application, look for the **Settings** button (usually a gear icon)
|
||||
2. Open the settings dialog
|
||||
3. Find the **OpenAI API Key** field
|
||||
4. Paste your API key
|
||||
5. Save the settings
|
||||
|
||||
### Step 3: Test the Transcription
|
||||
1. Create a transcription shape on the canvas
|
||||
2. Click the "Start" button
|
||||
3. Allow microphone access when prompted
|
||||
4. Start speaking - you should see the transcription appear in real-time
|
||||
|
||||
## Debugging Information
|
||||
|
||||
The application now includes debug logging to help identify issues:
|
||||
|
||||
- **Console Logs**: Check the browser console for messages starting with `🔧 OpenAI Config Debug:`
|
||||
- **Visual Indicators**: The transcription window will show "(API Key Required)" if not configured
|
||||
- **Button State**: The start button will be disabled and grayed out if the API key is missing
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Button Still Disabled After Adding API Key
|
||||
1. Refresh the page to reload the configuration
|
||||
2. Check the browser console for any error messages
|
||||
3. Verify the API key is correctly saved in settings
|
||||
|
||||
### Microphone Permission Issues
|
||||
1. Make sure you've granted microphone access to the browser
|
||||
2. Check that your microphone is working in other applications
|
||||
3. Try refreshing the page and granting permission again
|
||||
|
||||
### No Audio Being Recorded
|
||||
1. Check the browser console for audio-related error messages
|
||||
2. Verify your microphone is not being used by another application
|
||||
3. Try using a different browser if issues persist
|
||||
|
||||
## Technical Details
|
||||
|
||||
The transcription system:
|
||||
- Uses the device microphone directly (not Daily room audio)
|
||||
- Records audio in WebM format
|
||||
- Sends audio chunks to OpenAI's Whisper API
|
||||
- Updates the transcription shape in real-time
|
||||
- Requires a valid OpenAI API key to function
|
||||
|
|
@ -0,0 +1,93 @@
|
|||
# Worker Environment Switching Guide
|
||||
|
||||
## Quick Switch Commands
|
||||
|
||||
### Switch to Dev Environment (Default)
|
||||
```bash
|
||||
./switch-worker-env.sh dev
|
||||
```
|
||||
|
||||
### Switch to Production Environment
|
||||
```bash
|
||||
./switch-worker-env.sh production
|
||||
```
|
||||
|
||||
### Switch to Local Environment
|
||||
```bash
|
||||
./switch-worker-env.sh local
|
||||
```
|
||||
|
||||
## Manual Switching
|
||||
|
||||
You can also manually edit the environment by:
|
||||
|
||||
1. **Option 1**: Set environment variable
|
||||
```bash
|
||||
export VITE_WORKER_ENV=dev
|
||||
```
|
||||
|
||||
2. **Option 2**: Edit `.env.local` file
|
||||
```
|
||||
VITE_WORKER_ENV=dev
|
||||
```
|
||||
|
||||
3. **Option 3**: Edit `src/constants/workerUrl.ts` directly
|
||||
```typescript
|
||||
const WORKER_ENV = 'dev' // Change this line
|
||||
```
|
||||
|
||||
## Available Environments
|
||||
|
||||
| Environment | URL | Description |
|
||||
|-------------|-----|-------------|
|
||||
| `local` | `http://localhost:5172` | Local worker (requires `npm run dev:worker:local`) |
|
||||
| `dev` | `https://jeffemmett-canvas-automerge-dev.jeffemmett.workers.dev` | Cloudflare dev environment |
|
||||
| `production` | `https://jeffemmett-canvas.jeffemmett.workers.dev` | Production environment |
|
||||
|
||||
## Current Status
|
||||
|
||||
- ✅ **Dev Environment**: Working with AutomergeDurableObject
|
||||
- ✅ **R2 Data Loading**: Fixed format conversion
|
||||
- ✅ **WebSocket**: Improved with keep-alive and reconnection
|
||||
- 🔄 **Production**: Ready to deploy when testing is complete
|
||||
|
||||
## Testing the Fix
|
||||
|
||||
1. Switch to dev environment: `./switch-worker-env.sh dev`
|
||||
2. Start your frontend: `npm run dev`
|
||||
3. Check browser console for environment logs
|
||||
4. Test R2 data loading in your canvas app
|
||||
5. Verify WebSocket connections are stable
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,341 @@
|
|||
# Git Worktree Automation Setup
|
||||
|
||||
This repository is configured to automatically create Git worktrees for new branches, allowing you to work on multiple branches simultaneously without switching contexts.
|
||||
|
||||
## What Are Worktrees?
|
||||
|
||||
Git worktrees allow you to have multiple working directories (copies of your repo) checked out to different branches at the same time. This means:
|
||||
|
||||
- No need to stash or commit work when switching branches
|
||||
- Run dev servers on multiple branches simultaneously
|
||||
- Compare code across branches easily
|
||||
- Keep your main branch clean while working on features
|
||||
|
||||
## Automatic Worktree Creation
|
||||
|
||||
A Git hook (`.git/hooks/post-checkout`) is installed that automatically creates worktrees when you create a new branch from `main`:
|
||||
|
||||
```bash
|
||||
# This will automatically create a worktree at ../canvas-website-feature-name
|
||||
git checkout -b feature/new-feature
|
||||
```
|
||||
|
||||
**Worktree Location Pattern:**
|
||||
```
|
||||
/home/jeffe/Github/
|
||||
├── canvas-website/ # Main repo (main branch)
|
||||
├── canvas-website-feature-name/ # Worktree for feature branch
|
||||
└── canvas-website-bugfix-something/ # Worktree for bugfix branch
|
||||
```
|
||||
|
||||
## Manual Worktree Management
|
||||
|
||||
Use the `worktree-manager.sh` script for manual management:
|
||||
|
||||
### List All Worktrees
|
||||
```bash
|
||||
./scripts/worktree-manager.sh list
|
||||
```
|
||||
|
||||
### Create a New Worktree
|
||||
```bash
|
||||
# Creates worktree for existing branch
|
||||
./scripts/worktree-manager.sh create feature/my-feature
|
||||
|
||||
# Or create new branch with worktree
|
||||
./scripts/worktree-manager.sh create feature/new-branch
|
||||
```
|
||||
|
||||
### Remove a Worktree
|
||||
```bash
|
||||
./scripts/worktree-manager.sh remove feature/old-feature
|
||||
```
|
||||
|
||||
### Clean Up All Worktrees (Keep Main)
|
||||
```bash
|
||||
./scripts/worktree-manager.sh clean
|
||||
```
|
||||
|
||||
### Show Status of All Worktrees
|
||||
```bash
|
||||
./scripts/worktree-manager.sh status
|
||||
```
|
||||
|
||||
### Navigate to a Worktree
|
||||
```bash
|
||||
# Get worktree path
|
||||
./scripts/worktree-manager.sh goto feature/my-feature
|
||||
|
||||
# Or use with cd
|
||||
cd $(./scripts/worktree-manager.sh goto feature/my-feature)
|
||||
```
|
||||
|
||||
### Help
|
||||
```bash
|
||||
./scripts/worktree-manager.sh help
|
||||
```
|
||||
|
||||
## Workflow Examples
|
||||
|
||||
### Starting a New Feature
|
||||
|
||||
**With automatic worktree creation:**
|
||||
```bash
|
||||
# In main repo
|
||||
cd /home/jeffe/Github/canvas-website
|
||||
|
||||
# Create and switch to new branch (worktree auto-created)
|
||||
git checkout -b feature/terminal-tool
|
||||
|
||||
# Notification appears:
|
||||
# 🌳 Creating worktree for branch: feature/terminal-tool
|
||||
# 📁 Location: /home/jeffe/Github/canvas-website-feature-terminal-tool
|
||||
|
||||
# Continue working in current directory or switch to worktree
|
||||
cd ../canvas-website-feature-terminal-tool
|
||||
```
|
||||
|
||||
**Manual worktree creation:**
|
||||
```bash
|
||||
./scripts/worktree-manager.sh create feature/my-feature
|
||||
cd $(./scripts/worktree-manager.sh goto feature/my-feature)
|
||||
```
|
||||
|
||||
### Working on Multiple Features Simultaneously
|
||||
|
||||
```bash
|
||||
# Terminal 1: Main repo (main branch)
|
||||
cd /home/jeffe/Github/canvas-website
|
||||
npm run dev # Port 5173
|
||||
|
||||
# Terminal 2: Feature branch 1
|
||||
cd /home/jeffe/Github/canvas-website-feature-auth
|
||||
npm run dev # Different port
|
||||
|
||||
# Terminal 3: Feature branch 2
|
||||
cd /home/jeffe/Github/canvas-website-feature-ui
|
||||
npm run dev # Another port
|
||||
|
||||
# All running simultaneously, no conflicts!
|
||||
```
|
||||
|
||||
### Comparing Code Across Branches
|
||||
|
||||
```bash
|
||||
# Use diff or your IDE to compare files
|
||||
diff /home/jeffe/Github/canvas-website/src/App.tsx \
|
||||
/home/jeffe/Github/canvas-website-feature-auth/src/App.tsx
|
||||
|
||||
# Or open both in VS Code
|
||||
code /home/jeffe/Github/canvas-website \
|
||||
/home/jeffe/Github/canvas-website-feature-auth
|
||||
```
|
||||
|
||||
### Cleaning Up After Merging
|
||||
|
||||
```bash
|
||||
# After merging feature/my-feature to main
|
||||
cd /home/jeffe/Github/canvas-website
|
||||
|
||||
# Remove the worktree
|
||||
./scripts/worktree-manager.sh remove feature/my-feature
|
||||
|
||||
# Or clean all worktrees except main
|
||||
./scripts/worktree-manager.sh clean
|
||||
```
|
||||
|
||||
## How It Works
|
||||
|
||||
### Post-Checkout Hook
|
||||
|
||||
The `.git/hooks/post-checkout` script runs automatically after `git checkout` and:
|
||||
|
||||
1. Detects if you're creating a new branch from `main`
|
||||
2. Creates a worktree in `../canvas-website-{branch-name}`
|
||||
3. Links the worktree to the new branch
|
||||
4. Shows a notification with the worktree path
|
||||
|
||||
**Hook Behavior:**
|
||||
- ✅ Creates worktree when: `git checkout -b new-branch` (from main)
|
||||
- ❌ Skips creation when:
|
||||
- Switching to existing branches
|
||||
- Already in a worktree
|
||||
- Worktree already exists for that branch
|
||||
- Not branching from main/master
|
||||
|
||||
### Worktree Manager Script
|
||||
|
||||
The `scripts/worktree-manager.sh` script provides:
|
||||
- User-friendly commands for worktree operations
|
||||
- Colored output for better readability
|
||||
- Error handling and validation
|
||||
- Status reporting across all worktrees
|
||||
|
||||
## Git Commands with Worktrees
|
||||
|
||||
Most Git commands work the same way in worktrees:
|
||||
|
||||
```bash
|
||||
# In any worktree
|
||||
git status # Shows status of current worktree
|
||||
git add . # Stages files in current worktree
|
||||
git commit -m "..." # Commits in current branch
|
||||
git push # Pushes current branch
|
||||
git pull # Pulls current branch
|
||||
|
||||
# List all worktrees (works from any worktree)
|
||||
git worktree list
|
||||
|
||||
# Remove a worktree (from main repo)
|
||||
git worktree remove feature/branch-name
|
||||
|
||||
# Prune deleted worktrees
|
||||
git worktree prune
|
||||
```
|
||||
|
||||
## Important Notes
|
||||
|
||||
### Shared Git Directory
|
||||
|
||||
All worktrees share the same `.git` directory (in the main repo), which means:
|
||||
- ✅ Commits, branches, and remotes are shared across all worktrees
|
||||
- ✅ One `git fetch` or `git pull` in main updates all worktrees
|
||||
- ⚠️ Don't delete the main repo while worktrees exist
|
||||
- ⚠️ Stashes are shared (stash in one worktree, pop in another)
|
||||
|
||||
### Node Modules
|
||||
|
||||
Each worktree has its own `node_modules`:
|
||||
- First time entering a worktree: run `npm install`
|
||||
- Dependencies may differ across branches
|
||||
- More disk space usage (one `node_modules` per worktree)
|
||||
|
||||
### Port Conflicts
|
||||
|
||||
When running dev servers in multiple worktrees:
|
||||
```bash
|
||||
# Main repo
|
||||
npm run dev # Uses default port 5173
|
||||
|
||||
# In worktree, specify different port
|
||||
npm run dev -- --port 5174
|
||||
```
|
||||
|
||||
### IDE Integration
|
||||
|
||||
**VS Code:**
|
||||
```bash
|
||||
# Open specific worktree
|
||||
code /home/jeffe/Github/canvas-website-feature-name
|
||||
|
||||
# Or open multiple worktrees as workspace
|
||||
code --add /home/jeffe/Github/canvas-website \
|
||||
--add /home/jeffe/Github/canvas-website-feature-name
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Worktree Path Already Exists
|
||||
|
||||
If you see:
|
||||
```
|
||||
fatal: '/path/to/worktree' already exists
|
||||
```
|
||||
|
||||
Remove the directory manually:
|
||||
```bash
|
||||
rm -rf /home/jeffe/Github/canvas-website-feature-name
|
||||
git worktree prune
|
||||
```
|
||||
|
||||
### Can't Delete Main Repo
|
||||
|
||||
If you have active worktrees, you can't delete the main repo. Clean up first:
|
||||
```bash
|
||||
./scripts/worktree-manager.sh clean
|
||||
```
|
||||
|
||||
### Worktree Out of Sync
|
||||
|
||||
If a worktree seems out of sync:
|
||||
```bash
|
||||
cd /path/to/worktree
|
||||
git fetch origin
|
||||
git reset --hard origin/branch-name
|
||||
```
|
||||
|
||||
### Hook Not Running
|
||||
|
||||
If the post-checkout hook isn't running:
|
||||
```bash
|
||||
# Check if it's executable
|
||||
ls -la .git/hooks/post-checkout
|
||||
|
||||
# Make it executable if needed
|
||||
chmod +x .git/hooks/post-checkout
|
||||
|
||||
# Test the hook manually
|
||||
.git/hooks/post-checkout HEAD HEAD 1
|
||||
```
|
||||
|
||||
## Disabling Automatic Worktrees
|
||||
|
||||
To disable automatic worktree creation:
|
||||
|
||||
```bash
|
||||
# Remove or rename the hook
|
||||
mv .git/hooks/post-checkout .git/hooks/post-checkout.disabled
|
||||
```
|
||||
|
||||
To re-enable:
|
||||
```bash
|
||||
mv .git/hooks/post-checkout.disabled .git/hooks/post-checkout
|
||||
```
|
||||
|
||||
## Advanced Usage
|
||||
|
||||
### Custom Worktree Location
|
||||
|
||||
Modify the `post-checkout` hook to change the worktree location:
|
||||
```bash
|
||||
# Edit .git/hooks/post-checkout
|
||||
# Change this line:
|
||||
WORKTREE_BASE=$(dirname "$REPO_ROOT")
|
||||
|
||||
# To (example):
|
||||
WORKTREE_BASE="$HOME/worktrees"
|
||||
```
|
||||
|
||||
### Worktree for Remote Branches
|
||||
|
||||
```bash
|
||||
# Create worktree for remote branch
|
||||
git worktree add ../canvas-website-remote-branch origin/feature-branch
|
||||
|
||||
# Or use the script
|
||||
./scripts/worktree-manager.sh create origin/feature-branch
|
||||
```
|
||||
|
||||
### Detached HEAD Worktree
|
||||
|
||||
```bash
|
||||
# Create worktree at specific commit
|
||||
git worktree add ../canvas-website-commit-abc123 abc123
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Clean up regularly**: Remove worktrees for merged branches
|
||||
2. **Name branches clearly**: Worktree names mirror branch names
|
||||
3. **Run npm install**: Always run in new worktrees
|
||||
4. **Check branch**: Always verify which branch you're on before committing
|
||||
5. **Use status command**: Check all worktrees before major operations
|
||||
|
||||
## Resources
|
||||
|
||||
- [Git Worktree Documentation](https://git-scm.com/docs/git-worktree)
|
||||
- [Git Hooks Documentation](https://git-scm.com/docs/githooks)
|
||||
|
||||
---
|
||||
|
||||
**Setup Complete!** New branches will automatically create worktrees. Use `./scripts/worktree-manager.sh help` for manual management.
|
||||
|
|
@ -0,0 +1,25 @@
|
|||
# Cloudflare Pages redirects and rewrites
|
||||
# This file handles SPA routing and URL rewrites (replaces vercel.json rewrites)
|
||||
|
||||
# Specific route rewrites (matching vercel.json)
|
||||
# Handle both with and without trailing slashes
|
||||
/board/* /index.html 200
|
||||
/board /index.html 200
|
||||
/board/ /index.html 200
|
||||
/inbox /index.html 200
|
||||
/inbox/ /index.html 200
|
||||
/contact /index.html 200
|
||||
/contact/ /index.html 200
|
||||
/presentations /index.html 200
|
||||
/presentations/ /index.html 200
|
||||
/presentations/* /index.html 200
|
||||
/dashboard /index.html 200
|
||||
/dashboard/ /index.html 200
|
||||
/login /index.html 200
|
||||
/login/ /index.html 200
|
||||
/debug /index.html 200
|
||||
/debug/ /index.html 200
|
||||
|
||||
# SPA fallback - all routes should serve index.html (must be last)
|
||||
/* /index.html 200
|
||||
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
project_name: "Canvas Feature List"
|
||||
default_status: "To Do"
|
||||
statuses: ["To Do", "In Progress", "Done"]
|
||||
labels: []
|
||||
milestones: []
|
||||
date_format: yyyy-mm-dd
|
||||
max_column_width: 20
|
||||
auto_open_browser: true
|
||||
default_port: 6420
|
||||
remote_operations: true
|
||||
auto_commit: true
|
||||
zero_padded_ids: 3
|
||||
bypass_git_hooks: false
|
||||
check_active_branches: true
|
||||
active_branch_days: 60
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
---
|
||||
id: task-001
|
||||
title: offline local storage
|
||||
status: To Do
|
||||
assignee: []
|
||||
created_date: '2025-12-03 23:42'
|
||||
updated_date: '2025-12-04 12:13'
|
||||
labels: []
|
||||
dependencies: []
|
||||
---
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
---
|
||||
id: task-002
|
||||
title: RunPod AI API Integration
|
||||
status: Done
|
||||
assignee: []
|
||||
created_date: '2025-12-03'
|
||||
labels: [feature, ai, integration]
|
||||
priority: high
|
||||
branch: add-runpod-AI-API
|
||||
worktree: /home/jeffe/Github/canvas-website-branch-worktrees/add-runpod-AI-API
|
||||
updated_date: '2025-12-04 13:43'
|
||||
---
|
||||
|
||||
## Description
|
||||
Integrate RunPod serverless AI API for image generation and other AI features on the canvas.
|
||||
|
||||
## Branch Info
|
||||
- **Branch**: `add-runpod-AI-API`
|
||||
- **Worktree**: `/home/jeffe/Github/canvas-website-branch-worktrees/add-runpod-AI-API`
|
||||
- **Commit**: 083095c
|
||||
|
||||
## Acceptance Criteria
|
||||
- [ ] Connect to RunPod serverless endpoints
|
||||
- [ ] Implement image generation from canvas
|
||||
- [ ] Handle AI responses and display on canvas
|
||||
- [ ] Error handling and loading states
|
||||
|
|
@ -0,0 +1,24 @@
|
|||
---
|
||||
id: task-003
|
||||
title: MulTmux Web Integration
|
||||
status: In Progress
|
||||
assignee: []
|
||||
created_date: '2025-12-03'
|
||||
labels: [feature, terminal, integration]
|
||||
priority: medium
|
||||
branch: mulTmux-webtree
|
||||
worktree: /home/jeffe/Github/canvas-website-branch-worktrees/mulTmux-webtree
|
||||
---
|
||||
|
||||
## Description
|
||||
Integrate MulTmux web terminal functionality into the canvas for terminal-based interactions.
|
||||
|
||||
## Branch Info
|
||||
- **Branch**: `mulTmux-webtree`
|
||||
- **Worktree**: `/home/jeffe/Github/canvas-website-branch-worktrees/mulTmux-webtree`
|
||||
- **Commit**: 8ea3490
|
||||
|
||||
## Acceptance Criteria
|
||||
- [ ] Embed terminal component in canvas
|
||||
- [ ] Handle terminal I/O within canvas context
|
||||
- [ ] Support multiple terminal sessions
|
||||
|
|
@ -0,0 +1,24 @@
|
|||
---
|
||||
id: task-004
|
||||
title: IO Chip Feature
|
||||
status: In Progress
|
||||
assignee: []
|
||||
created_date: '2025-12-03'
|
||||
labels: [feature, io, ui]
|
||||
priority: medium
|
||||
branch: feature/io-chip
|
||||
worktree: /home/jeffe/Github/canvas-website-io-chip
|
||||
---
|
||||
|
||||
## Description
|
||||
Implement IO chip feature for the canvas - enabling input/output connections between canvas elements.
|
||||
|
||||
## Branch Info
|
||||
- **Branch**: `feature/io-chip`
|
||||
- **Worktree**: `/home/jeffe/Github/canvas-website-io-chip`
|
||||
- **Commit**: 527462a
|
||||
|
||||
## Acceptance Criteria
|
||||
- [ ] Create IO chip component
|
||||
- [ ] Enable connections between canvas elements
|
||||
- [ ] Handle data flow between connected chips
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
---
|
||||
id: task-005
|
||||
title: Automerge CRDT Sync
|
||||
status: To Do
|
||||
assignee: []
|
||||
created_date: '2025-12-03'
|
||||
labels: [feature, sync, collaboration]
|
||||
priority: high
|
||||
branch: Automerge
|
||||
---
|
||||
|
||||
## Description
|
||||
Implement Automerge CRDT-based synchronization for real-time collaborative canvas editing.
|
||||
|
||||
## Branch Info
|
||||
- **Branch**: `Automerge`
|
||||
|
||||
## Acceptance Criteria
|
||||
- [ ] Integrate Automerge library
|
||||
- [ ] Enable real-time sync between clients
|
||||
- [ ] Handle conflict resolution automatically
|
||||
- [ ] Persist state across sessions
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
---
|
||||
id: task-006
|
||||
title: Stripe Payment Integration
|
||||
status: To Do
|
||||
assignee: []
|
||||
created_date: '2025-12-03'
|
||||
labels: [feature, payments, integration]
|
||||
priority: medium
|
||||
branch: stripe-integration
|
||||
---
|
||||
|
||||
## Description
|
||||
Integrate Stripe for payment processing and subscription management.
|
||||
|
||||
## Branch Info
|
||||
- **Branch**: `stripe-integration`
|
||||
|
||||
## Acceptance Criteria
|
||||
- [ ] Set up Stripe API connection
|
||||
- [ ] Implement payment flow
|
||||
- [ ] Handle subscriptions
|
||||
- [ ] Add billing management UI
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
---
|
||||
id: task-007
|
||||
title: Web3 Integration
|
||||
status: To Do
|
||||
assignee: []
|
||||
created_date: '2025-12-03'
|
||||
labels: [feature, web3, blockchain]
|
||||
priority: low
|
||||
branch: web3-integration
|
||||
---
|
||||
|
||||
## Description
|
||||
Integrate Web3 capabilities for blockchain-based features (wallet connect, NFT canvas elements, etc.).
|
||||
|
||||
## Branch Info
|
||||
- **Branch**: `web3-integration`
|
||||
|
||||
## Acceptance Criteria
|
||||
- [ ] Add wallet connection
|
||||
- [ ] Enable NFT minting of canvas elements
|
||||
- [ ] Blockchain-based ownership verification
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
---
|
||||
id: task-008
|
||||
title: Audio Recording Feature
|
||||
status: To Do
|
||||
assignee: []
|
||||
created_date: '2025-12-03'
|
||||
labels: [feature, audio, media]
|
||||
priority: medium
|
||||
branch: audio-recording-attempt
|
||||
---
|
||||
|
||||
## Description
|
||||
Implement audio recording capability for voice notes and audio annotations on the canvas.
|
||||
|
||||
## Branch Info
|
||||
- **Branch**: `audio-recording-attempt`
|
||||
|
||||
## Acceptance Criteria
|
||||
- [ ] Record audio from microphone
|
||||
- [ ] Save audio clips to canvas
|
||||
- [ ] Playback audio annotations
|
||||
- [ ] Transcription integration
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
---
|
||||
id: task-009
|
||||
title: Web Speech API Transcription
|
||||
status: To Do
|
||||
assignee: []
|
||||
created_date: '2025-12-03'
|
||||
labels: [feature, transcription, speech]
|
||||
priority: medium
|
||||
branch: transcribe-webspeechAPI
|
||||
---
|
||||
|
||||
## Description
|
||||
Implement speech-to-text transcription using the Web Speech API for voice input on the canvas.
|
||||
|
||||
## Branch Info
|
||||
- **Branch**: `transcribe-webspeechAPI`
|
||||
|
||||
## Acceptance Criteria
|
||||
- [ ] Capture speech via Web Speech API
|
||||
- [ ] Convert to text in real-time
|
||||
- [ ] Display transcription on canvas
|
||||
- [ ] Support multiple languages
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
---
|
||||
id: task-010
|
||||
title: Holon Integration
|
||||
status: To Do
|
||||
assignee: []
|
||||
created_date: '2025-12-03'
|
||||
labels: [feature, holon, integration]
|
||||
priority: medium
|
||||
branch: holon-integration
|
||||
---
|
||||
|
||||
## Description
|
||||
Integrate Holon framework for hierarchical canvas organization and nested structures.
|
||||
|
||||
## Branch Info
|
||||
- **Branch**: `holon-integration`
|
||||
|
||||
## Acceptance Criteria
|
||||
- [ ] Implement holon data structure
|
||||
- [ ] Enable nested canvas elements
|
||||
- [ ] Support hierarchical navigation
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
---
|
||||
id: task-011
|
||||
title: Terminal Tool
|
||||
status: To Do
|
||||
assignee: []
|
||||
created_date: '2025-12-03'
|
||||
labels: [feature, terminal, tool]
|
||||
priority: medium
|
||||
branch: feature/terminal-tool
|
||||
---
|
||||
|
||||
## Description
|
||||
Add a terminal tool to the canvas toolbar for embedding terminal sessions.
|
||||
|
||||
## Branch Info
|
||||
- **Branch**: `feature/terminal-tool`
|
||||
|
||||
## Acceptance Criteria
|
||||
- [ ] Add terminal tool to toolbar
|
||||
- [ ] Spawn terminal instances on canvas
|
||||
- [ ] Handle terminal sizing and positioning
|
||||
|
|
@ -0,0 +1,67 @@
|
|||
---
|
||||
id: task-012
|
||||
title: Dark Mode Theme
|
||||
status: Done
|
||||
assignee: []
|
||||
created_date: '2025-12-03'
|
||||
updated_date: '2025-12-04 06:29'
|
||||
labels:
|
||||
- feature
|
||||
- ui
|
||||
- theme
|
||||
dependencies: []
|
||||
priority: medium
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||
Implement dark mode theme support for the canvas interface.
|
||||
|
||||
## Branch Info
|
||||
- **Branch**: `dark-mode`
|
||||
<!-- SECTION:DESCRIPTION:END -->
|
||||
|
||||
## Acceptance Criteria
|
||||
<!-- AC:BEGIN -->
|
||||
- [x] #1 Create dark theme colors
|
||||
- [x] #2 Add theme toggle
|
||||
- [x] #3 Persist user preference
|
||||
- [x] #4 System theme detection
|
||||
<!-- AC:END -->
|
||||
|
||||
## Implementation Notes
|
||||
|
||||
<!-- SECTION:NOTES:BEGIN -->
|
||||
## Implementation Complete (2025-12-03)
|
||||
|
||||
### Components Updated:
|
||||
|
||||
1. **Mycelial Intelligence (MI) Bar** (`src/ui/MycelialIntelligenceBar.tsx`)
|
||||
- Added dark mode color palette with automatic switching based on `isDark` state
|
||||
- Dark backgrounds, lighter text, adjusted shadows
|
||||
- Inline code blocks use CSS class for proper dark mode styling
|
||||
|
||||
2. **Comprehensive CSS Dark Mode** (`src/css/style.css`)
|
||||
- Added CSS variables: `--card-bg`, `--input-bg`, `--muted-text`
|
||||
- Dark mode styles for: blockquotes, tables, navigation, command palette, MDXEditor, chat containers, form inputs, error/success messages
|
||||
|
||||
3. **UserSettingsModal** (`src/ui/UserSettingsModal.tsx`)
|
||||
- Added `colors` object with dark/light mode variants
|
||||
- Updated all inline styles to use theme-aware colors
|
||||
|
||||
4. **StandardizedToolWrapper** (`src/components/StandardizedToolWrapper.tsx`)
|
||||
- Added `useIsDarkMode` hook for dark mode detection
|
||||
- Updated wrapper backgrounds, shadows, borders, tags styling
|
||||
|
||||
5. **Markdown Tool** (`src/shapes/MarkdownShapeUtil.tsx`)
|
||||
- Dark mode detection with automatic background switching
|
||||
- Fixed scrollbar: vertical only, hidden when not needed
|
||||
- Added toolbar minimize/expand button
|
||||
|
||||
### Technical Details:
|
||||
- Automatic detection via `document.documentElement.classList` observer
|
||||
- CSS variables for base styles that auto-switch in dark mode
|
||||
- Inline style support with conditional color objects
|
||||
- Comprehensive coverage of all major UI components and tools
|
||||
<!-- SECTION:NOTES:END -->
|
||||
|
|
@ -0,0 +1,44 @@
|
|||
---
|
||||
id: task-013
|
||||
title: Markdown Tool UX Improvements
|
||||
status: Done
|
||||
assignee: []
|
||||
created_date: '2025-12-04 06:29'
|
||||
updated_date: '2025-12-04 06:29'
|
||||
labels:
|
||||
- feature
|
||||
- ui
|
||||
- markdown
|
||||
dependencies: []
|
||||
priority: medium
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||
Improve the Markdown tool user experience with better scrollbar behavior and collapsible toolbar.
|
||||
|
||||
## Changes Implemented:
|
||||
- Scrollbar is now vertical only (no horizontal scrollbar)
|
||||
- Scrollbar auto-hides when not needed
|
||||
- Added minimize/expand button for the formatting toolbar
|
||||
- Full editing area uses available space
|
||||
<!-- SECTION:DESCRIPTION:END -->
|
||||
|
||||
## Acceptance Criteria
|
||||
<!-- AC:BEGIN -->
|
||||
- [x] #1 Scrollbar is vertical only
|
||||
- [x] #2 Scrollbar hides when not needed
|
||||
- [x] #3 Toolbar has minimize/expand toggle
|
||||
- [x] #4 Full window is editing area
|
||||
<!-- AC:END -->
|
||||
|
||||
## Implementation Notes
|
||||
|
||||
<!-- SECTION:NOTES:BEGIN -->
|
||||
Implementation completed in `src/shapes/MarkdownShapeUtil.tsx`:
|
||||
- Added `overflow-x: hidden` to content area
|
||||
- Custom scrollbar styling with thin width and auto-hide
|
||||
- Added toggle button in toolbar that collapses/expands formatting options
|
||||
- `isToolbarMinimized` state controls toolbar visibility
|
||||
<!-- SECTION:NOTES:END -->
|
||||
|
|
@ -0,0 +1,351 @@
|
|||
---
|
||||
id: task-014
|
||||
title: Implement WebGPU-based local image generation to reduce RunPod costs
|
||||
status: To Do
|
||||
assignee: []
|
||||
created_date: '2025-12-04 11:46'
|
||||
updated_date: '2025-12-04 11:47'
|
||||
labels:
|
||||
- performance
|
||||
- cost-optimization
|
||||
- webgpu
|
||||
- ai
|
||||
- image-generation
|
||||
dependencies: []
|
||||
priority: high
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||
Integrate WebGPU-powered browser-based image generation (SD-Turbo) to reduce RunPod API costs and eliminate cold start delays. This creates a hybrid pipeline where quick drafts/iterations run locally in the browser (FREE, ~1-3 seconds), while high-quality final renders still use RunPod SDXL.
|
||||
|
||||
**Problem:**
|
||||
- Current image generation always hits RunPod (~$0.02/image + 10-30s cold starts)
|
||||
- No instant feedback loop for creative iteration
|
||||
- 100% of compute costs are cloud-based
|
||||
|
||||
**Solution:**
|
||||
- Add WebGPU capability detection
|
||||
- Integrate SD-Turbo for instant browser-based previews
|
||||
- Smart routing: drafts → browser, final renders → RunPod
|
||||
- Potential 70% reduction in RunPod image generation costs
|
||||
|
||||
**Cost Impact (projected):**
|
||||
- 1,000 images/mo: $20 → $6 (save $14/mo)
|
||||
- 5,000 images/mo: $100 → $30 (save $70/mo)
|
||||
- 10,000 images/mo: $200 → $60 (save $140/mo)
|
||||
|
||||
**Browser Support:**
|
||||
- Chrome/Edge: Full WebGPU (v113+)
|
||||
- Firefox: Windows (July 2025)
|
||||
- Safari: v26 beta
|
||||
- Fallback: WASM backend for unsupported browsers
|
||||
<!-- SECTION:DESCRIPTION:END -->
|
||||
|
||||
## Acceptance Criteria
|
||||
<!-- AC:BEGIN -->
|
||||
- [ ] #1 WebGPU capability detection added to clientConfig.ts
|
||||
- [ ] #2 SD-Turbo model loads and runs in browser via WebGPU
|
||||
- [ ] #3 ImageGenShapeUtil has Quick Preview vs High Quality toggle
|
||||
- [ ] #4 Smart routing in aiOrchestrator routes drafts to browser
|
||||
- [ ] #5 Fallback to WASM for browsers without WebGPU
|
||||
- [ ] #6 User can generate preview images with zero cold start
|
||||
- [ ] #7 RunPod only called for High Quality final renders
|
||||
- [ ] #8 Model download progress indicator shown to user
|
||||
- [ ] #9 Works offline after initial model download
|
||||
<!-- AC:END -->
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
<!-- SECTION:PLAN:BEGIN -->
|
||||
## Phase 1: Foundation (Quick Wins)
|
||||
|
||||
### 1.1 WebGPU Capability Detection
|
||||
**File:** `src/lib/clientConfig.ts`
|
||||
|
||||
```typescript
|
||||
export async function detectWebGPUCapabilities(): Promise<{
|
||||
hasWebGPU: boolean
|
||||
hasF16: boolean
|
||||
adapterInfo?: GPUAdapterInfo
|
||||
estimatedVRAM?: number
|
||||
}> {
|
||||
if (!navigator.gpu) {
|
||||
return { hasWebGPU: false, hasF16: false }
|
||||
}
|
||||
|
||||
const adapter = await navigator.gpu.requestAdapter()
|
||||
if (!adapter) {
|
||||
return { hasWebGPU: false, hasF16: false }
|
||||
}
|
||||
|
||||
const hasF16 = adapter.features.has('shader-f16')
|
||||
const adapterInfo = await adapter.requestAdapterInfo()
|
||||
|
||||
return {
|
||||
hasWebGPU: true,
|
||||
hasF16,
|
||||
adapterInfo,
|
||||
estimatedVRAM: adapterInfo.memoryHeaps?.[0]?.size
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 1.2 Install Dependencies
|
||||
```bash
|
||||
npm install @anthropic-ai/sdk onnxruntime-web
|
||||
# Or for transformers.js v3:
|
||||
npm install @huggingface/transformers
|
||||
```
|
||||
|
||||
### 1.3 Vite Config Updates
|
||||
**File:** `vite.config.ts`
|
||||
- Ensure WASM/ONNX assets are properly bundled
|
||||
- Add WebGPU shader compilation support
|
||||
- Configure chunk splitting for ML models
|
||||
|
||||
---
|
||||
|
||||
## Phase 2: Browser Diffusion Integration
|
||||
|
||||
### 2.1 Create WebGPU Diffusion Module
|
||||
**New File:** `src/lib/webgpuDiffusion.ts`
|
||||
|
||||
```typescript
|
||||
import { pipeline } from '@huggingface/transformers'
|
||||
|
||||
let generator: any = null
|
||||
let loadingPromise: Promise<void> | null = null
|
||||
|
||||
export async function initSDTurbo(
|
||||
onProgress?: (progress: number, status: string) => void
|
||||
): Promise<void> {
|
||||
if (generator) return
|
||||
if (loadingPromise) return loadingPromise
|
||||
|
||||
loadingPromise = (async () => {
|
||||
onProgress?.(0, 'Loading SD-Turbo model...')
|
||||
|
||||
generator = await pipeline(
|
||||
'text-to-image',
|
||||
'Xenova/sdxl-turbo', // or 'stabilityai/sd-turbo'
|
||||
{
|
||||
device: 'webgpu',
|
||||
dtype: 'fp16',
|
||||
progress_callback: (p) => onProgress?.(p.progress, p.status)
|
||||
}
|
||||
)
|
||||
|
||||
onProgress?.(100, 'Ready')
|
||||
})()
|
||||
|
||||
return loadingPromise
|
||||
}
|
||||
|
||||
export async function generateLocalImage(
|
||||
prompt: string,
|
||||
options?: {
|
||||
width?: number
|
||||
height?: number
|
||||
steps?: number
|
||||
seed?: number
|
||||
}
|
||||
): Promise<string> {
|
||||
if (!generator) {
|
||||
throw new Error('SD-Turbo not initialized. Call initSDTurbo() first.')
|
||||
}
|
||||
|
||||
const result = await generator(prompt, {
|
||||
width: options?.width || 512,
|
||||
height: options?.height || 512,
|
||||
num_inference_steps: options?.steps || 1, // SD-Turbo = 1 step
|
||||
seed: options?.seed
|
||||
})
|
||||
|
||||
// Returns base64 data URL
|
||||
return result[0].image
|
||||
}
|
||||
|
||||
export function isSDTurboReady(): boolean {
|
||||
return generator !== null
|
||||
}
|
||||
|
||||
export async function unloadSDTurbo(): Promise<void> {
|
||||
generator = null
|
||||
loadingPromise = null
|
||||
// Force garbage collection of GPU memory
|
||||
}
|
||||
```
|
||||
|
||||
### 2.2 Create Model Download Manager
|
||||
**New File:** `src/lib/modelDownloadManager.ts`
|
||||
|
||||
Handle progressive model downloads with:
|
||||
- IndexedDB caching for persistence
|
||||
- Progress tracking UI
|
||||
- Resume capability for interrupted downloads
|
||||
- Storage quota management
|
||||
|
||||
---
|
||||
|
||||
## Phase 3: UI Integration
|
||||
|
||||
### 3.1 Update ImageGenShapeUtil
|
||||
**File:** `src/shapes/ImageGenShapeUtil.tsx`
|
||||
|
||||
Add to shape props:
|
||||
```typescript
|
||||
type IImageGen = TLBaseShape<"ImageGen", {
|
||||
// ... existing props
|
||||
generationMode: 'auto' | 'local' | 'cloud' // NEW
|
||||
localModelStatus: 'not-loaded' | 'loading' | 'ready' | 'error' // NEW
|
||||
localModelProgress: number // NEW (0-100)
|
||||
}>
|
||||
```
|
||||
|
||||
Add UI toggle:
|
||||
```tsx
|
||||
<div className="generation-mode-toggle">
|
||||
<button
|
||||
onClick={() => setMode('local')}
|
||||
disabled={!hasWebGPU}
|
||||
title={!hasWebGPU ? 'WebGPU not supported' : 'Fast preview (~1-3s)'}
|
||||
>
|
||||
⚡ Quick Preview
|
||||
</button>
|
||||
<button
|
||||
onClick={() => setMode('cloud')}
|
||||
title="High quality SDXL (~10-30s)"
|
||||
>
|
||||
✨ High Quality
|
||||
</button>
|
||||
</div>
|
||||
```
|
||||
|
||||
### 3.2 Smart Generation Logic
|
||||
```typescript
|
||||
const generateImage = async (prompt: string) => {
|
||||
const mode = shape.props.generationMode
|
||||
const capabilities = await detectWebGPUCapabilities()
|
||||
|
||||
// Auto mode: local for iterations, cloud for final
|
||||
if (mode === 'auto' || mode === 'local') {
|
||||
if (capabilities.hasWebGPU && isSDTurboReady()) {
|
||||
// Generate locally - instant!
|
||||
const imageUrl = await generateLocalImage(prompt)
|
||||
updateShape({ imageUrl, source: 'local' })
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Fall back to RunPod
|
||||
await generateWithRunPod(prompt)
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Phase 4: AI Orchestrator Integration
|
||||
|
||||
### 4.1 Update aiOrchestrator.ts
|
||||
**File:** `src/lib/aiOrchestrator.ts`
|
||||
|
||||
Add browser as compute target:
|
||||
```typescript
|
||||
type ComputeTarget = 'browser' | 'netcup' | 'runpod'
|
||||
|
||||
interface ImageGenerationOptions {
|
||||
prompt: string
|
||||
priority: 'draft' | 'final'
|
||||
preferLocal?: boolean
|
||||
}
|
||||
|
||||
async function generateImage(options: ImageGenerationOptions) {
|
||||
const { hasWebGPU } = await detectWebGPUCapabilities()
|
||||
|
||||
// Routing logic
|
||||
if (options.priority === 'draft' && hasWebGPU && isSDTurboReady()) {
|
||||
return { target: 'browser', cost: 0 }
|
||||
}
|
||||
|
||||
if (options.priority === 'final') {
|
||||
return { target: 'runpod', cost: 0.02 }
|
||||
}
|
||||
|
||||
// Fallback chain
|
||||
return { target: 'runpod', cost: 0.02 }
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Phase 5: Advanced Features (Future)
|
||||
|
||||
### 5.1 Real-time img2img Refinement
|
||||
- Start with browser SD-Turbo draft
|
||||
- User adjusts/annotates
|
||||
- Send to RunPod SDXL for final with img2img
|
||||
|
||||
### 5.2 Browser-based Upscaling
|
||||
- Add Real-ESRGAN-lite via ONNX Runtime
|
||||
- 2x/4x upscale locally before cloud render
|
||||
|
||||
### 5.3 Background Removal
|
||||
- U2Net in browser via transformers.js
|
||||
- Zero-cost background removal
|
||||
|
||||
### 5.4 Style Transfer
|
||||
- Fast neural style transfer via WebGPU shaders
|
||||
- Real-time preview on canvas
|
||||
|
||||
---
|
||||
|
||||
## Technical Considerations
|
||||
|
||||
### Model Sizes
|
||||
| Model | Size | Load Time | Generation |
|
||||
|-------|------|-----------|------------|
|
||||
| SD-Turbo | ~2GB | 30-60s (first) | 1-3s |
|
||||
| SD-Turbo (quantized) | ~1GB | 15-30s | 2-4s |
|
||||
|
||||
### Memory Management
|
||||
- Unload model when tab backgrounded
|
||||
- Clear GPU memory on low-memory warnings
|
||||
- IndexedDB for model caching (survives refresh)
|
||||
|
||||
### Error Handling
|
||||
- Graceful degradation to WASM if WebGPU fails
|
||||
- Clear error messages for unsupported browsers
|
||||
- Automatic fallback to RunPod on local failure
|
||||
|
||||
---
|
||||
|
||||
## Files to Create/Modify
|
||||
|
||||
**New Files:**
|
||||
- `src/lib/webgpuDiffusion.ts` - SD-Turbo wrapper
|
||||
- `src/lib/modelDownloadManager.ts` - Model caching
|
||||
- `src/lib/webgpuCapabilities.ts` - Detection utilities
|
||||
- `src/components/ModelDownloadProgress.tsx` - UI component
|
||||
|
||||
**Modified Files:**
|
||||
- `src/lib/clientConfig.ts` - Add WebGPU detection
|
||||
- `src/lib/aiOrchestrator.ts` - Add browser routing
|
||||
- `src/shapes/ImageGenShapeUtil.tsx` - Add mode toggle
|
||||
- `vite.config.ts` - ONNX/WASM config
|
||||
- `package.json` - New dependencies
|
||||
|
||||
---
|
||||
|
||||
## Testing Checklist
|
||||
|
||||
- [ ] WebGPU detection works on Chrome, Edge, Firefox
|
||||
- [ ] WASM fallback works on Safari/older browsers
|
||||
- [ ] Model downloads and caches correctly
|
||||
- [ ] Generation completes in <5s on modern GPU
|
||||
- [ ] Memory cleaned up properly on unload
|
||||
- [ ] Offline generation works after model cached
|
||||
- [ ] RunPod fallback triggers correctly
|
||||
- [ ] Cost tracking reflects local vs cloud usage
|
||||
<!-- SECTION:PLAN:END -->
|
||||
|
|
@ -0,0 +1,146 @@
|
|||
---
|
||||
id: task-015
|
||||
title: Set up Cloudflare D1 email-collector database for cross-site subscriptions
|
||||
status: To Do
|
||||
assignee: []
|
||||
created_date: '2025-12-04 12:00'
|
||||
updated_date: '2025-12-04 12:03'
|
||||
labels:
|
||||
- infrastructure
|
||||
- cloudflare
|
||||
- d1
|
||||
- email
|
||||
- cross-site
|
||||
dependencies: []
|
||||
priority: medium
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||
Create a standalone Cloudflare D1 database for collecting email subscriptions across all websites (mycofi.earth, canvas.jeffemmett.com, decolonizeti.me, etc.) with easy export capabilities.
|
||||
|
||||
**Purpose:**
|
||||
- Unified email collection from all sites
|
||||
- Page-separated lists (e.g., /newsletter, /waitlist, /landing)
|
||||
- Simple CSV/JSON export for email campaigns
|
||||
- GDPR-compliant with unsubscribe tracking
|
||||
|
||||
**Sites to integrate:**
|
||||
- mycofi.earth
|
||||
- canvas.jeffemmett.com
|
||||
- decolonizeti.me
|
||||
- games.jeffemmett.com
|
||||
- Future sites
|
||||
|
||||
**Key Features:**
|
||||
- Double opt-in verification
|
||||
- Source tracking (which site, which page)
|
||||
- Export in multiple formats (CSV, JSON, Mailchimp)
|
||||
- Basic admin dashboard or CLI for exports
|
||||
- Rate limiting to prevent abuse
|
||||
<!-- SECTION:DESCRIPTION:END -->
|
||||
|
||||
## Acceptance Criteria
|
||||
<!-- AC:BEGIN -->
|
||||
- [ ] #1 D1 database 'email-collector' created on Cloudflare
|
||||
- [ ] #2 Schema deployed with subscribers, verification_tokens tables
|
||||
- [ ] #3 POST /api/subscribe endpoint accepts email + source_site + source_page
|
||||
- [ ] #4 Email verification flow with token-based double opt-in
|
||||
- [ ] #5 GET /api/emails/export returns CSV with filters (site, date, verified)
|
||||
- [ ] #6 Unsubscribe endpoint and tracking
|
||||
- [ ] #7 Rate limiting prevents spam submissions
|
||||
- [ ] #8 At least one site integrated and collecting emails
|
||||
<!-- AC:END -->
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
<!-- SECTION:PLAN:BEGIN -->
|
||||
## Implementation Steps
|
||||
|
||||
### 1. Create D1 Database
|
||||
```bash
|
||||
wrangler d1 create email-collector
|
||||
```
|
||||
|
||||
### 2. Create Schema File
|
||||
Create `worker/email-collector-schema.sql`:
|
||||
|
||||
```sql
|
||||
-- Email Collector Schema
|
||||
-- Cross-site email subscription management
|
||||
|
||||
CREATE TABLE IF NOT EXISTS subscribers (
|
||||
id TEXT PRIMARY KEY,
|
||||
email TEXT NOT NULL,
|
||||
email_hash TEXT NOT NULL, -- For duplicate checking
|
||||
source_site TEXT NOT NULL,
|
||||
source_page TEXT,
|
||||
referrer TEXT,
|
||||
ip_country TEXT,
|
||||
subscribed_at TEXT DEFAULT (datetime('now')),
|
||||
verified INTEGER DEFAULT 0,
|
||||
verified_at TEXT,
|
||||
unsubscribed INTEGER DEFAULT 0,
|
||||
unsubscribed_at TEXT,
|
||||
metadata TEXT -- JSON for custom fields
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS verification_tokens (
|
||||
id TEXT PRIMARY KEY,
|
||||
email TEXT NOT NULL,
|
||||
token TEXT UNIQUE NOT NULL,
|
||||
expires_at TEXT NOT NULL,
|
||||
used INTEGER DEFAULT 0,
|
||||
created_at TEXT DEFAULT (datetime('now'))
|
||||
);
|
||||
|
||||
-- Rate limiting table
|
||||
CREATE TABLE IF NOT EXISTS rate_limits (
|
||||
ip_hash TEXT PRIMARY KEY,
|
||||
request_count INTEGER DEFAULT 1,
|
||||
window_start TEXT DEFAULT (datetime('now'))
|
||||
);
|
||||
|
||||
-- Indexes
|
||||
CREATE INDEX IF NOT EXISTS idx_subs_email_hash ON subscribers(email_hash);
|
||||
CREATE INDEX IF NOT EXISTS idx_subs_site ON subscribers(source_site);
|
||||
CREATE INDEX IF NOT EXISTS idx_subs_page ON subscribers(source_site, source_page);
|
||||
CREATE INDEX IF NOT EXISTS idx_subs_verified ON subscribers(verified);
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS idx_subs_unique ON subscribers(email_hash, source_site);
|
||||
CREATE INDEX IF NOT EXISTS idx_tokens_token ON verification_tokens(token);
|
||||
```
|
||||
|
||||
### 3. Create Worker Endpoints
|
||||
Create `worker/emailCollector.ts`:
|
||||
|
||||
```typescript
|
||||
// POST /api/subscribe
|
||||
// GET /api/verify/:token
|
||||
// POST /api/unsubscribe
|
||||
// GET /api/emails/export (auth required)
|
||||
// GET /api/emails/stats
|
||||
```
|
||||
|
||||
### 4. Export Formats
|
||||
- CSV: `email,source_site,source_page,subscribed_at,verified`
|
||||
- JSON: Full object array
|
||||
- Mailchimp: CSV with required headers
|
||||
|
||||
### 5. Admin Authentication
|
||||
- Use simple API key for export endpoint
|
||||
- Store in Worker secret: `EMAIL_ADMIN_KEY`
|
||||
|
||||
### 6. Integration
|
||||
Add to each site's signup form:
|
||||
```javascript
|
||||
fetch('https://canvas.jeffemmett.com/api/subscribe', {
|
||||
method: 'POST',
|
||||
body: JSON.stringify({
|
||||
email: 'user@example.com',
|
||||
source_site: 'mycofi.earth',
|
||||
source_page: '/newsletter'
|
||||
})
|
||||
})
|
||||
```
|
||||
<!-- SECTION:PLAN:END -->
|
||||
|
|
@ -0,0 +1,56 @@
|
|||
---
|
||||
id: task-016
|
||||
title: Add encryption for CryptID emails at rest
|
||||
status: To Do
|
||||
assignee: []
|
||||
created_date: '2025-12-04 12:01'
|
||||
labels:
|
||||
- security
|
||||
- cryptid
|
||||
- encryption
|
||||
- privacy
|
||||
- d1
|
||||
dependencies:
|
||||
- task-017
|
||||
priority: medium
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||
Enhance CryptID security by encrypting email addresses stored in D1 database. This protects user privacy even if the database is compromised.
|
||||
|
||||
**Encryption Strategy:**
|
||||
- Encrypt email addresses before storing in D1
|
||||
- Use Cloudflare Workers KV or environment secret for encryption key
|
||||
- Store encrypted email + hash for lookups
|
||||
- Decrypt only when needed (sending emails, display)
|
||||
|
||||
**Implementation Options:**
|
||||
1. **AES-GCM encryption** with key in Worker secret
|
||||
2. **Deterministic encryption** for email lookups (hash-based)
|
||||
3. **Hybrid approach**: Hash for lookup index, AES for actual email
|
||||
|
||||
**Schema Changes:**
|
||||
```sql
|
||||
ALTER TABLE users ADD COLUMN email_encrypted TEXT;
|
||||
ALTER TABLE users ADD COLUMN email_hash TEXT; -- For lookups
|
||||
-- Migrate existing emails, then drop plaintext column
|
||||
```
|
||||
|
||||
**Considerations:**
|
||||
- Key rotation strategy
|
||||
- Performance impact on lookups
|
||||
- Backup/recovery implications
|
||||
<!-- SECTION:DESCRIPTION:END -->
|
||||
|
||||
## Acceptance Criteria
|
||||
<!-- AC:BEGIN -->
|
||||
- [ ] #1 Encryption key securely stored in Worker secrets
|
||||
- [ ] #2 Emails encrypted before D1 insert
|
||||
- [ ] #3 Email lookup works via hash index
|
||||
- [ ] #4 Decryption works for email display and sending
|
||||
- [ ] #5 Existing emails migrated to encrypted format
|
||||
- [ ] #6 Key rotation procedure documented
|
||||
- [ ] #7 No plaintext emails in database
|
||||
<!-- AC:END -->
|
||||
|
|
@ -0,0 +1,63 @@
|
|||
---
|
||||
id: task-017
|
||||
title: Deploy CryptID email recovery to dev branch and test
|
||||
status: To Do
|
||||
assignee: []
|
||||
created_date: '2025-12-04 12:00'
|
||||
updated_date: '2025-12-04 12:27'
|
||||
labels:
|
||||
- feature
|
||||
- cryptid
|
||||
- auth
|
||||
- testing
|
||||
- dev-branch
|
||||
dependencies:
|
||||
- task-018
|
||||
- task-019
|
||||
priority: high
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||
Push the existing CryptID email recovery code changes to dev branch and test the full flow before merging to main.
|
||||
|
||||
**Code Changes Ready:**
|
||||
- src/App.tsx - Routes for /verify-email, /link-device
|
||||
- src/components/auth/CryptID.tsx - Email linking flow
|
||||
- src/components/auth/Profile.tsx - Email management UI, device list
|
||||
- src/css/crypto-auth.css - Styling for email/device modals
|
||||
- worker/types.ts - Updated D1 types
|
||||
- worker/worker.ts - Auth API routes
|
||||
- worker/cryptidAuth.ts - Auth handlers (already committed)
|
||||
|
||||
**Test Scenarios:**
|
||||
1. Link email to existing CryptID account
|
||||
2. Verify email via link
|
||||
3. Request device link from new device
|
||||
4. Approve device link via email
|
||||
5. View and revoke linked devices
|
||||
6. Recover account on new device via email
|
||||
<!-- SECTION:DESCRIPTION:END -->
|
||||
|
||||
## Acceptance Criteria
|
||||
<!-- AC:BEGIN -->
|
||||
- [ ] #1 All CryptID changes committed to dev branch
|
||||
- [ ] #2 Worker deployed to dev environment
|
||||
- [ ] #3 Link email flow works end-to-end
|
||||
- [ ] #4 Email verification completes successfully
|
||||
- [ ] #5 Device linking via email works
|
||||
- [ ] #6 Device revocation works
|
||||
- [ ] #7 Profile shows linked email and devices
|
||||
- [ ] #8 No console errors in happy path
|
||||
<!-- AC:END -->
|
||||
|
||||
## Implementation Notes
|
||||
|
||||
<!-- SECTION:NOTES:BEGIN -->
|
||||
Branch created: `feature/cryptid-email-recovery`
|
||||
|
||||
Code committed and pushed to Gitea
|
||||
|
||||
PR available at: https://gitea.jeffemmett.com/jeffemmett/canvas-website/compare/main...feature/cryptid-email-recovery
|
||||
<!-- SECTION:NOTES:END -->
|
||||
|
|
@ -0,0 +1,111 @@
|
|||
---
|
||||
id: task-018
|
||||
title: Create Cloudflare D1 cryptid-auth database
|
||||
status: To Do
|
||||
assignee: []
|
||||
created_date: '2025-12-04 12:02'
|
||||
updated_date: '2025-12-04 12:27'
|
||||
labels:
|
||||
- infrastructure
|
||||
- cloudflare
|
||||
- d1
|
||||
- cryptid
|
||||
- auth
|
||||
dependencies: []
|
||||
priority: high
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||
Create the D1 database on Cloudflare for CryptID authentication system. This is the first step before deploying the email recovery feature.
|
||||
|
||||
**Database Purpose:**
|
||||
- Store user accounts linked to CryptID usernames
|
||||
- Store device public keys for multi-device auth
|
||||
- Store verification tokens for email/device linking
|
||||
- Enable account recovery via verified email
|
||||
|
||||
**Security Considerations:**
|
||||
- Emails should be encrypted at rest (task-016)
|
||||
- Public keys are safe to store (not secrets)
|
||||
- Tokens are time-limited and single-use
|
||||
- No passwords stored (WebCrypto key-based auth)
|
||||
<!-- SECTION:DESCRIPTION:END -->
|
||||
|
||||
## Acceptance Criteria
|
||||
<!-- AC:BEGIN -->
|
||||
- [ ] #1 D1 database 'cryptid-auth' created via wrangler d1 create
|
||||
- [ ] #2 D1 database 'cryptid-auth-dev' created for dev environment
|
||||
- [ ] #3 Database IDs added to wrangler.toml (replacing placeholders)
|
||||
- [ ] #4 Schema from worker/schema.sql deployed to both databases
|
||||
- [ ] #5 Verified tables exist: users, device_keys, verification_tokens
|
||||
<!-- AC:END -->
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
<!-- SECTION:PLAN:BEGIN -->
|
||||
## Implementation Steps
|
||||
|
||||
### 1. Create D1 Databases
|
||||
Run from local machine or Netcup (requires wrangler CLI):
|
||||
|
||||
```bash
|
||||
cd /home/jeffe/Github/canvas-website
|
||||
|
||||
# Create production database
|
||||
wrangler d1 create cryptid-auth
|
||||
|
||||
# Create dev database
|
||||
wrangler d1 create cryptid-auth-dev
|
||||
```
|
||||
|
||||
### 2. Update wrangler.toml
|
||||
Replace placeholder IDs with actual database IDs from step 1:
|
||||
|
||||
```toml
|
||||
[[d1_databases]]
|
||||
binding = "CRYPTID_DB"
|
||||
database_name = "cryptid-auth"
|
||||
database_id = "<PROD_ID_FROM_STEP_1>"
|
||||
|
||||
[[env.dev.d1_databases]]
|
||||
binding = "CRYPTID_DB"
|
||||
database_name = "cryptid-auth-dev"
|
||||
database_id = "<DEV_ID_FROM_STEP_1>"
|
||||
```
|
||||
|
||||
### 3. Deploy Schema
|
||||
```bash
|
||||
# Deploy to dev first
|
||||
wrangler d1 execute cryptid-auth-dev --file=./worker/schema.sql
|
||||
|
||||
# Then production
|
||||
wrangler d1 execute cryptid-auth --file=./worker/schema.sql
|
||||
```
|
||||
|
||||
### 4. Verify Tables
|
||||
```bash
|
||||
# Check dev
|
||||
wrangler d1 execute cryptid-auth-dev --command="SELECT name FROM sqlite_master WHERE type='table';"
|
||||
|
||||
# Expected output:
|
||||
# - users
|
||||
# - device_keys
|
||||
# - verification_tokens
|
||||
```
|
||||
|
||||
### 5. Commit wrangler.toml Changes
|
||||
```bash
|
||||
git add wrangler.toml
|
||||
git commit -m "chore: add D1 database IDs for cryptid-auth"
|
||||
```
|
||||
<!-- SECTION:PLAN:END -->
|
||||
|
||||
## Implementation Notes
|
||||
|
||||
<!-- SECTION:NOTES:BEGIN -->
|
||||
Feature branch: `feature/cryptid-email-recovery`
|
||||
|
||||
Code is ready - waiting for D1 database creation
|
||||
<!-- SECTION:NOTES:END -->
|
||||
|
|
@ -0,0 +1,41 @@
|
|||
---
|
||||
id: task-019
|
||||
title: Configure CryptID secrets and SendGrid integration
|
||||
status: To Do
|
||||
assignee: []
|
||||
created_date: '2025-12-04 12:02'
|
||||
labels:
|
||||
- infrastructure
|
||||
- cloudflare
|
||||
- cryptid
|
||||
- secrets
|
||||
- sendgrid
|
||||
dependencies:
|
||||
- task-018
|
||||
priority: high
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||
Set up the required secrets and environment variables for CryptID email functionality on Cloudflare Workers.
|
||||
|
||||
**Required Secrets:**
|
||||
- SENDGRID_API_KEY - For sending verification emails
|
||||
- CRYPTID_EMAIL_FROM - Sender email address (e.g., auth@jeffemmett.com)
|
||||
- APP_URL - Base URL for verification links (e.g., https://canvas.jeffemmett.com)
|
||||
|
||||
**Configuration:**
|
||||
- Secrets set for both production and dev environments
|
||||
- SendGrid account configured with verified sender domain
|
||||
- Email templates tested
|
||||
<!-- SECTION:DESCRIPTION:END -->
|
||||
|
||||
## Acceptance Criteria
|
||||
<!-- AC:BEGIN -->
|
||||
- [ ] #1 SENDGRID_API_KEY secret set via wrangler secret put
|
||||
- [ ] #2 CRYPTID_EMAIL_FROM secret configured
|
||||
- [ ] #3 APP_URL environment variable set in wrangler.toml
|
||||
- [ ] #4 SendGrid sender domain verified (jeffemmett.com or subdomain)
|
||||
- [ ] #5 Test email sends successfully from Worker
|
||||
<!-- AC:END -->
|
||||
|
|
@ -0,0 +1,63 @@
|
|||
---
|
||||
id: task-024
|
||||
title: 'Open Mapping: Collaborative Route Planning Module'
|
||||
status: To Do
|
||||
assignee: []
|
||||
created_date: '2025-12-04 14:30'
|
||||
labels:
|
||||
- feature
|
||||
- mapping
|
||||
dependencies: []
|
||||
priority: high
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||
Implement an open-source mapping and routing layer for the canvas that provides advanced route planning capabilities beyond Google Maps. Built on OpenStreetMap, OSRM/Valhalla, and MapLibre GL JS.
|
||||
<!-- SECTION:DESCRIPTION:END -->
|
||||
|
||||
## Acceptance Criteria
|
||||
<!-- AC:BEGIN -->
|
||||
- [ ] #1 MapLibre GL JS integrated with tldraw canvas
|
||||
- [ ] #2 OSRM routing backend deployed to Netcup
|
||||
- [ ] #3 Waypoint placement and route calculation working
|
||||
- [ ] #4 Multi-route comparison UI implemented
|
||||
- [ ] #5 Y.js collaboration for shared route editing
|
||||
- [ ] #6 Layer management panel with basemap switching
|
||||
- [ ] #7 Offline tile caching via Service Worker
|
||||
- [ ] #8 Budget tracking per waypoint/route
|
||||
<!-- AC:END -->
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
<!-- SECTION:PLAN:BEGIN -->
|
||||
Phase 1 - Foundation:
|
||||
- Integrate MapLibre GL JS with tldraw
|
||||
- Deploy OSRM to /opt/apps/open-mapping/
|
||||
- Basic waypoint and route UI
|
||||
|
||||
Phase 2 - Multi-Route:
|
||||
- Alternative routes visualization
|
||||
- Route comparison panel
|
||||
- Elevation profiles
|
||||
|
||||
Phase 3 - Collaboration:
|
||||
- Y.js integration
|
||||
- Real-time cursor presence
|
||||
- Share links
|
||||
|
||||
Phase 4 - Layers:
|
||||
- Layer panel UI
|
||||
- Multiple basemaps
|
||||
- Custom overlays
|
||||
|
||||
Phase 5 - Calendar/Budget:
|
||||
- Time windows on waypoints
|
||||
- Cost estimation
|
||||
- iCal export
|
||||
|
||||
Phase 6 - Optimization:
|
||||
- VROOM TSP/VRP
|
||||
- Offline PWA
|
||||
<!-- SECTION:PLAN:END -->
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
---
|
||||
id: task-high.01
|
||||
title: 'MI Bar UX: Modal Fade & Scrollable Try Next'
|
||||
status: Done
|
||||
assignee: []
|
||||
created_date: '2025-12-04 06:34'
|
||||
labels: []
|
||||
dependencies: []
|
||||
parent_task_id: task-high
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||
Improved Mycelial Intelligence bar UX: fades when modals/popups are open, combined Tools + Follow-up suggestions into a single scrollable 'Try Next' section
|
||||
<!-- SECTION:DESCRIPTION:END -->
|
||||
|
||||
## Acceptance Criteria
|
||||
<!-- AC:BEGIN -->
|
||||
- [ ] #1 MI bar fades when settings modal is open
|
||||
- [ ] #2 MI bar fades when auth modal is open
|
||||
- [ ] #3 Suggested tools and follow-ups in single scrollable row
|
||||
<!-- AC:END -->
|
||||
|
|
@ -0,0 +1,24 @@
|
|||
---
|
||||
id: task-high.02
|
||||
title: CryptID Email Recovery in Settings
|
||||
status: Done
|
||||
assignee: []
|
||||
created_date: '2025-12-04 06:35'
|
||||
labels: []
|
||||
dependencies: []
|
||||
parent_task_id: task-high
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||
Added email linking to User Settings modal General tab - allows users to attach their email to their CryptID account for device recovery and verification
|
||||
<!-- SECTION:DESCRIPTION:END -->
|
||||
|
||||
## Acceptance Criteria
|
||||
<!-- AC:BEGIN -->
|
||||
- [ ] #1 Email linking UI in General settings tab
|
||||
- [ ] #2 Shows email verification status
|
||||
- [ ] #3 Sends verification email on link
|
||||
- [ ] #4 Dark mode aware styling
|
||||
<!-- AC:END -->
|
||||
|
|
@ -0,0 +1,38 @@
|
|||
# Canvas Website Docker Compose
|
||||
# Production: jeffemmett.com, www.jeffemmett.com
|
||||
# Staging: staging.jeffemmett.com
|
||||
|
||||
services:
|
||||
canvas-website:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
args:
|
||||
- VITE_TLDRAW_WORKER_URL=https://jeffemmett-canvas.jeffemmett.workers.dev
|
||||
# Add other build args from .env if needed
|
||||
container_name: canvas-website
|
||||
restart: unless-stopped
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.docker.network=traefik-public"
|
||||
# Single service definition (both routers use same backend)
|
||||
- "traefik.http.services.canvas.loadbalancer.server.port=80"
|
||||
# Production deployment (jeffemmett.com and www)
|
||||
- "traefik.http.routers.canvas-prod.rule=Host(`jeffemmett.com`) || Host(`www.jeffemmett.com`)"
|
||||
- "traefik.http.routers.canvas-prod.entrypoints=web"
|
||||
- "traefik.http.routers.canvas-prod.service=canvas"
|
||||
# Staging deployment (keep for testing)
|
||||
- "traefik.http.routers.canvas-staging.rule=Host(`staging.jeffemmett.com`)"
|
||||
- "traefik.http.routers.canvas-staging.entrypoints=web"
|
||||
- "traefik.http.routers.canvas-staging.service=canvas"
|
||||
networks:
|
||||
- traefik-public
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
networks:
|
||||
traefik-public:
|
||||
external: true
|
||||
|
|
@ -0,0 +1,214 @@
|
|||
# Enhanced Audio Transcription with Speaker Identification
|
||||
|
||||
This document describes the enhanced audio transcription system that identifies different speakers and ensures complete transcript preservation in real-time.
|
||||
|
||||
## 🎯 Key Features
|
||||
|
||||
### 1. **Speaker Identification**
|
||||
- **Voice Fingerprinting**: Uses audio analysis to create unique voice profiles for each speaker
|
||||
- **Real-time Detection**: Automatically identifies when speakers change during conversation
|
||||
- **Visual Indicators**: Each speaker gets a unique color and label for easy identification
|
||||
- **Speaker Statistics**: Tracks speaking time and segment count for each participant
|
||||
|
||||
### 2. **Enhanced Transcript Structure**
|
||||
- **Structured Segments**: Each transcript segment includes speaker ID, timestamps, and confidence scores
|
||||
- **Complete Preservation**: No words are lost during real-time updates
|
||||
- **Backward Compatibility**: Maintains legacy transcript format for existing integrations
|
||||
- **Multiple Export Formats**: Support for text, JSON, and SRT subtitle formats
|
||||
|
||||
### 3. **Real-time Updates**
|
||||
- **Live Speaker Detection**: Continuously monitors voice activity and speaker changes
|
||||
- **Interim Text Display**: Shows partial results as they're being spoken
|
||||
- **Smooth Transitions**: Seamless updates between interim and final transcript segments
|
||||
- **Auto-scroll**: Automatically scrolls to show the latest content
|
||||
|
||||
## 🔧 Technical Implementation
|
||||
|
||||
### Audio Analysis System
|
||||
|
||||
The system uses advanced audio analysis to identify speakers:
|
||||
|
||||
```typescript
|
||||
interface VoiceCharacteristics {
|
||||
pitch: number // Fundamental frequency
|
||||
volume: number // Audio amplitude
|
||||
spectralCentroid: number // Frequency distribution center
|
||||
mfcc: number[] // Mel-frequency cepstral coefficients
|
||||
zeroCrossingRate: number // Voice activity indicator
|
||||
energy: number // Overall audio energy
|
||||
}
|
||||
```
|
||||
|
||||
### Speaker Identification Algorithm
|
||||
|
||||
1. **Voice Activity Detection**: Monitors audio levels to detect when someone is speaking
|
||||
2. **Feature Extraction**: Analyzes voice characteristics in real-time
|
||||
3. **Similarity Matching**: Compares current voice with known speaker profiles
|
||||
4. **Profile Creation**: Creates new speaker profiles for unrecognized voices
|
||||
5. **Confidence Scoring**: Assigns confidence levels to speaker identifications
|
||||
|
||||
### Transcript Management
|
||||
|
||||
The enhanced transcript system provides:
|
||||
|
||||
```typescript
|
||||
interface TranscriptSegment {
|
||||
id: string // Unique segment identifier
|
||||
speakerId: string // Associated speaker ID
|
||||
speakerName: string // Display name for speaker
|
||||
text: string // Transcribed text
|
||||
startTime: number // Segment start time (ms)
|
||||
endTime: number // Segment end time (ms)
|
||||
confidence: number // Recognition confidence (0-1)
|
||||
isFinal: boolean // Whether segment is finalized
|
||||
}
|
||||
```
|
||||
|
||||
## 🎨 User Interface Enhancements
|
||||
|
||||
### Speaker Display
|
||||
- **Color-coded Labels**: Each speaker gets a unique color for easy identification
|
||||
- **Speaker List**: Shows all identified speakers with speaking time statistics
|
||||
- **Current Speaker Highlighting**: Highlights the currently speaking participant
|
||||
- **Speaker Management**: Ability to rename speakers and manage their profiles
|
||||
|
||||
### Transcript Controls
|
||||
- **Show/Hide Speaker Labels**: Toggle speaker name display
|
||||
- **Show/Hide Timestamps**: Toggle timestamp display for each segment
|
||||
- **Auto-scroll Toggle**: Control automatic scrolling behavior
|
||||
- **Export Options**: Download transcripts in multiple formats
|
||||
|
||||
### Visual Indicators
|
||||
- **Border Colors**: Each transcript segment has a colored border matching the speaker
|
||||
- **Speaking Status**: Visual indicators show who is currently speaking
|
||||
- **Interim Text**: Italicized, gray text shows partial results
|
||||
- **Final Text**: Regular text shows confirmed transcript segments
|
||||
|
||||
## 📊 Data Export and Analysis
|
||||
|
||||
### Export Formats
|
||||
|
||||
1. **Text Format**:
|
||||
```
|
||||
[00:01:23] Speaker 1: Hello, how are you today?
|
||||
[00:01:28] Speaker 2: I'm doing well, thank you for asking.
|
||||
```
|
||||
|
||||
2. **JSON Format**:
|
||||
```json
|
||||
{
|
||||
"segments": [...],
|
||||
"speakers": [...],
|
||||
"sessionStartTime": 1234567890,
|
||||
"totalDuration": 300000
|
||||
}
|
||||
```
|
||||
|
||||
3. **SRT Subtitle Format**:
|
||||
```
|
||||
1
|
||||
00:00:01,230 --> 00:00:05,180
|
||||
Speaker 1: Hello, how are you today?
|
||||
```
|
||||
|
||||
### Statistics and Analytics
|
||||
|
||||
The system tracks comprehensive statistics:
|
||||
- Total speaking time per speaker
|
||||
- Number of segments per speaker
|
||||
- Average segment length
|
||||
- Session duration and timeline
|
||||
- Recognition confidence scores
|
||||
|
||||
## 🔄 Real-time Processing Flow
|
||||
|
||||
1. **Audio Capture**: Microphone stream is captured and analyzed
|
||||
2. **Voice Activity Detection**: System detects when someone starts/stops speaking
|
||||
3. **Speaker Identification**: Voice characteristics are analyzed and matched to known speakers
|
||||
4. **Speech Recognition**: Web Speech API processes audio into text
|
||||
5. **Transcript Update**: New segments are added with speaker information
|
||||
6. **UI Update**: Interface updates to show new content with speaker labels
|
||||
|
||||
## 🛠️ Configuration Options
|
||||
|
||||
### Audio Analysis Settings
|
||||
- **Voice Activity Threshold**: Sensitivity for detecting speech
|
||||
- **Silence Timeout**: Time before considering a speaker change
|
||||
- **Similarity Threshold**: Minimum similarity for speaker matching
|
||||
- **Feature Update Rate**: How often voice profiles are updated
|
||||
|
||||
### Display Options
|
||||
- **Speaker Colors**: Customizable color palette for speakers
|
||||
- **Timestamp Format**: Choose between different time display formats
|
||||
- **Auto-scroll Behavior**: Control when and how auto-scrolling occurs
|
||||
- **Segment Styling**: Customize visual appearance of transcript segments
|
||||
|
||||
## 🔍 Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
1. **Speaker Not Identified**:
|
||||
- Ensure good microphone quality
|
||||
- Check for background noise
|
||||
- Verify speaker is speaking clearly
|
||||
- Allow time for voice profile creation
|
||||
|
||||
2. **Incorrect Speaker Assignment**:
|
||||
- Check microphone positioning
|
||||
- Verify audio quality
|
||||
- Consider adjusting similarity threshold
|
||||
- Manually rename speakers if needed
|
||||
|
||||
3. **Missing Transcript Segments**:
|
||||
- Check internet connection stability
|
||||
- Verify browser compatibility
|
||||
- Ensure microphone permissions are granted
|
||||
- Check for audio processing errors
|
||||
|
||||
### Performance Optimization
|
||||
|
||||
1. **Audio Quality**: Use high-quality microphones for better speaker identification
|
||||
2. **Environment**: Minimize background noise for clearer voice analysis
|
||||
3. **Browser**: Use Chrome or Chromium-based browsers for best performance
|
||||
4. **Network**: Ensure stable internet connection for speech recognition
|
||||
|
||||
## 🚀 Future Enhancements
|
||||
|
||||
### Planned Features
|
||||
- **Machine Learning Integration**: Improved speaker identification using ML models
|
||||
- **Voice Cloning Detection**: Identify when speakers are using voice modification
|
||||
- **Emotion Recognition**: Detect emotional tone in speech
|
||||
- **Language Detection**: Automatic language identification and switching
|
||||
- **Cloud Processing**: Offload heavy processing to cloud services
|
||||
|
||||
### Integration Possibilities
|
||||
- **Video Analysis**: Combine with video feeds for enhanced speaker detection
|
||||
- **Meeting Platforms**: Integration with Zoom, Teams, and other platforms
|
||||
- **AI Summarization**: Automatic meeting summaries with speaker attribution
|
||||
- **Search and Indexing**: Full-text search across all transcript segments
|
||||
|
||||
## 📝 Usage Examples
|
||||
|
||||
### Basic Usage
|
||||
1. Start a video chat session
|
||||
2. Click the transcription button
|
||||
3. Allow microphone access
|
||||
4. Begin speaking - speakers will be automatically identified
|
||||
5. View real-time transcript with speaker labels
|
||||
|
||||
### Advanced Features
|
||||
1. **Customize Display**: Toggle speaker labels and timestamps
|
||||
2. **Export Transcripts**: Download in your preferred format
|
||||
3. **Manage Speakers**: Rename speakers for better organization
|
||||
4. **Analyze Statistics**: View speaking time and participation metrics
|
||||
|
||||
### Integration with Other Tools
|
||||
- **Meeting Notes**: Combine with note-taking tools
|
||||
- **Action Items**: Extract action items with speaker attribution
|
||||
- **Follow-up**: Use transcripts for meeting follow-up and documentation
|
||||
- **Compliance**: Maintain records for regulatory requirements
|
||||
|
||||
---
|
||||
|
||||
*The enhanced transcription system provides a comprehensive solution for real-time speaker identification and transcript management, ensuring no spoken words are lost while providing rich metadata about conversation participants.*
|
||||
|
||||
|
|
@ -0,0 +1,913 @@
|
|||
# Google Data Sovereignty: Local-First Secure Storage
|
||||
|
||||
This document outlines the architecture for securely importing, storing, and optionally sharing Google Workspace data (Gmail, Drive, Photos, Calendar) using a **local-first, data sovereign** approach.
|
||||
|
||||
## Overview
|
||||
|
||||
**Philosophy**: Your data should be yours. Import it locally, encrypt it client-side, and choose when/what to share.
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────────────┐
|
||||
│ USER'S BROWSER (Data Sovereign Zone) │
|
||||
├─────────────────────────────────────────────────────────────────────────┤
|
||||
│ │
|
||||
│ ┌─────────────┐ ┌──────────────────────────────────────────────┐ │
|
||||
│ │ Google APIs │───>│ Local Processing Layer │ │
|
||||
│ │ (OAuth 2.0) │ │ ├── Fetch data │ │
|
||||
│ └─────────────┘ │ ├── Encrypt with user's WebCrypto keys │ │
|
||||
│ │ └── Store to IndexedDB │ │
|
||||
│ └────────────────────────┬─────────────────────┘ │
|
||||
│ │ │
|
||||
│ ┌───────────────────────────────────────────┴───────────────────────┐ │
|
||||
│ │ IndexedDB Encrypted Storage │ │
|
||||
│ │ ├── gmail_messages (encrypted blobs) │ │
|
||||
│ │ ├── drive_documents (encrypted blobs) │ │
|
||||
│ │ ├── photos_media (encrypted references) │ │
|
||||
│ │ ├── calendar_events (encrypted data) │ │
|
||||
│ │ └── encryption_metadata (key derivation info) │ │
|
||||
│ └─────────────────────────────────────────────────────────────────── │
|
||||
│ │ │
|
||||
│ ┌────────────────────────┴───────────────────────┐ │
|
||||
│ │ Share Decision Layer (User Controlled) │ │
|
||||
│ │ ├── Keep Private (local only) │ │
|
||||
│ │ ├── Share to Board (Automerge sync) │ │
|
||||
│ │ └── Backup to R2 (encrypted cloud backup) │ │
|
||||
│ └────────────────────────────────────────────────┘ │
|
||||
│ │
|
||||
└─────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Browser Storage Capabilities & Limitations
|
||||
|
||||
### IndexedDB Storage
|
||||
|
||||
| Browser | Default Quota | Max Quota | Persistence |
|
||||
|---------|--------------|-----------|-------------|
|
||||
| Chrome/Edge | 60% of disk | Unlimited* | Persistent with permission |
|
||||
| Firefox | 10% up to 10GB | 50% of disk | Persistent with permission |
|
||||
| Safari | 1GB (lax) | ~1GB per origin | Non-persistent (7-day eviction) |
|
||||
|
||||
*Chrome "Unlimited" requires `navigator.storage.persist()` permission
|
||||
|
||||
### Storage API Persistence
|
||||
|
||||
```typescript
|
||||
// Request persistent storage (prevents automatic eviction)
|
||||
async function requestPersistentStorage(): Promise<boolean> {
|
||||
if (navigator.storage && navigator.storage.persist) {
|
||||
const isPersisted = await navigator.storage.persist();
|
||||
console.log(`Persistent storage ${isPersisted ? 'granted' : 'denied'}`);
|
||||
return isPersisted;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check current storage quota
|
||||
async function checkStorageQuota(): Promise<{used: number, quota: number}> {
|
||||
if (navigator.storage && navigator.storage.estimate) {
|
||||
const estimate = await navigator.storage.estimate();
|
||||
return {
|
||||
used: estimate.usage || 0,
|
||||
quota: estimate.quota || 0
|
||||
};
|
||||
}
|
||||
return { used: 0, quota: 0 };
|
||||
}
|
||||
```
|
||||
|
||||
### Safari's 7-Day Eviction Rule
|
||||
|
||||
**CRITICAL for Safari users**: Safari evicts IndexedDB data after 7 days of non-use.
|
||||
|
||||
**Mitigations**:
|
||||
1. Use a Service Worker with periodic background sync to "touch" data
|
||||
2. Prompt Safari users to add to Home Screen (PWA mode bypasses some restrictions)
|
||||
3. Automatically sync important data to R2 backup
|
||||
4. Show clear warnings about Safari limitations
|
||||
|
||||
```typescript
|
||||
// Detect Safari's storage limitations
|
||||
function hasSafariLimitations(): boolean {
|
||||
const isSafari = /^((?!chrome|android).)*safari/i.test(navigator.userAgent);
|
||||
const isIOS = /iPad|iPhone|iPod/.test(navigator.userAgent);
|
||||
return isSafari || isIOS;
|
||||
}
|
||||
|
||||
// Register touch activity to prevent eviction
|
||||
async function touchLocalData(): Promise<void> {
|
||||
const db = await openDatabase();
|
||||
const tx = db.transaction('metadata', 'readwrite');
|
||||
tx.objectStore('metadata').put({
|
||||
key: 'last_accessed',
|
||||
timestamp: Date.now()
|
||||
});
|
||||
}
|
||||
```
|
||||
|
||||
## Data Types & Storage Strategies
|
||||
|
||||
### 1. Gmail Messages
|
||||
|
||||
```typescript
|
||||
interface EncryptedEmailStore {
|
||||
id: string; // Gmail message ID
|
||||
threadId: string; // Thread ID for grouping
|
||||
encryptedSubject: ArrayBuffer; // AES-GCM encrypted
|
||||
encryptedBody: ArrayBuffer; // AES-GCM encrypted
|
||||
encryptedFrom: ArrayBuffer; // Sender info
|
||||
encryptedTo: ArrayBuffer[]; // Recipients
|
||||
date: number; // Timestamp (unencrypted for sorting)
|
||||
labels: string[]; // Gmail labels (encrypted or not based on sensitivity)
|
||||
hasAttachments: boolean; // Flag only, attachments stored separately
|
||||
snippet: ArrayBuffer; // Encrypted preview
|
||||
|
||||
// Metadata for search (encrypted bloom filter or encrypted index)
|
||||
searchIndex: ArrayBuffer;
|
||||
|
||||
// Sync metadata
|
||||
syncedAt: number;
|
||||
localOnly: boolean; // Not yet synced to any external storage
|
||||
}
|
||||
|
||||
// Storage estimate per email:
|
||||
// - Average email: ~20KB raw → ~25KB encrypted
|
||||
// - With attachments: varies, but reference stored, not full attachment
|
||||
// - 10,000 emails ≈ 250MB
|
||||
```
|
||||
|
||||
### 2. Google Drive Documents
|
||||
|
||||
```typescript
|
||||
interface EncryptedDriveDocument {
|
||||
id: string; // Drive file ID
|
||||
encryptedName: ArrayBuffer;
|
||||
encryptedMimeType: ArrayBuffer;
|
||||
encryptedContent: ArrayBuffer; // For text-based docs
|
||||
encryptedPreview: ArrayBuffer; // Thumbnail or preview
|
||||
|
||||
// Large files: store reference, not content
|
||||
contentStrategy: 'inline' | 'reference' | 'chunked';
|
||||
chunks?: string[]; // IDs of content chunks if chunked
|
||||
|
||||
// Hierarchy
|
||||
parentId: string | null;
|
||||
path: ArrayBuffer; // Encrypted path string
|
||||
|
||||
// Sharing & permissions (for UI display)
|
||||
isShared: boolean;
|
||||
|
||||
modifiedTime: number;
|
||||
size: number; // Unencrypted for quota management
|
||||
|
||||
syncedAt: number;
|
||||
}
|
||||
|
||||
// Storage considerations:
|
||||
// - Google Docs: Convert to markdown/HTML, typically 10-100KB
|
||||
// - Spreadsheets: JSON export, 100KB-10MB depending on size
|
||||
// - PDFs: Store reference only, load on demand
|
||||
// - Images: Thumbnail locally, full resolution on demand
|
||||
```
|
||||
|
||||
### 3. Google Photos
|
||||
|
||||
```typescript
|
||||
interface EncryptedPhotoReference {
|
||||
id: string; // Photos media item ID
|
||||
encryptedFilename: ArrayBuffer;
|
||||
encryptedDescription: ArrayBuffer;
|
||||
|
||||
// Thumbnails stored locally (encrypted)
|
||||
thumbnail: {
|
||||
width: number;
|
||||
height: number;
|
||||
encryptedData: ArrayBuffer; // Base64 or blob
|
||||
};
|
||||
|
||||
// Full resolution: reference only (fetch on demand)
|
||||
fullResolution: {
|
||||
width: number;
|
||||
height: number;
|
||||
// NOT storing full image - too large
|
||||
// Fetch via API when user requests
|
||||
};
|
||||
|
||||
mediaType: 'image' | 'video';
|
||||
creationTime: number;
|
||||
|
||||
// Album associations
|
||||
albumIds: string[];
|
||||
|
||||
// Location data (highly sensitive - always encrypted)
|
||||
encryptedLocation?: ArrayBuffer;
|
||||
|
||||
syncedAt: number;
|
||||
}
|
||||
|
||||
// Storage strategy:
|
||||
// - Thumbnails: ~50KB each, store locally
|
||||
// - Full images: NOT stored locally (too large)
|
||||
// - 1,000 photos thumbnails ≈ 50MB
|
||||
// - Full resolution loaded via API on demand
|
||||
```
|
||||
|
||||
### 4. Google Calendar Events
|
||||
|
||||
```typescript
|
||||
interface EncryptedCalendarEvent {
|
||||
id: string; // Calendar event ID
|
||||
calendarId: string;
|
||||
|
||||
encryptedSummary: ArrayBuffer;
|
||||
encryptedDescription: ArrayBuffer;
|
||||
encryptedLocation: ArrayBuffer;
|
||||
|
||||
// Time data (unencrypted for query/sort performance)
|
||||
startTime: number;
|
||||
endTime: number;
|
||||
isAllDay: boolean;
|
||||
timezone: string;
|
||||
|
||||
// Recurrence
|
||||
isRecurring: boolean;
|
||||
encryptedRecurrence?: ArrayBuffer;
|
||||
|
||||
// Attendees (encrypted)
|
||||
encryptedAttendees: ArrayBuffer;
|
||||
|
||||
// Reminders
|
||||
reminders: { method: string; minutes: number }[];
|
||||
|
||||
// Meeting links (encrypted - sensitive)
|
||||
encryptedMeetingLink?: ArrayBuffer;
|
||||
|
||||
syncedAt: number;
|
||||
}
|
||||
|
||||
// Storage estimate:
|
||||
// - Average event: ~5KB encrypted
|
||||
// - 2 years of events (~3000): ~15MB
|
||||
```
|
||||
|
||||
## Encryption Strategy
|
||||
|
||||
### Key Derivation
|
||||
|
||||
Using the existing WebCrypto infrastructure, derive data encryption keys from the user's master key:
|
||||
|
||||
```typescript
|
||||
// Derive a data-specific encryption key from master key
|
||||
async function deriveDataEncryptionKey(
|
||||
masterKey: CryptoKey,
|
||||
purpose: 'gmail' | 'drive' | 'photos' | 'calendar'
|
||||
): Promise<CryptoKey> {
|
||||
const encoder = new TextEncoder();
|
||||
const purposeBytes = encoder.encode(`canvas-data-${purpose}`);
|
||||
|
||||
// Import master key for HKDF
|
||||
const baseKey = await crypto.subtle.importKey(
|
||||
'raw',
|
||||
await crypto.subtle.exportKey('raw', masterKey),
|
||||
'HKDF',
|
||||
false,
|
||||
['deriveKey']
|
||||
);
|
||||
|
||||
// Derive purpose-specific key
|
||||
return await crypto.subtle.deriveKey(
|
||||
{
|
||||
name: 'HKDF',
|
||||
hash: 'SHA-256',
|
||||
salt: purposeBytes,
|
||||
info: new ArrayBuffer(0)
|
||||
},
|
||||
baseKey,
|
||||
{ name: 'AES-GCM', length: 256 },
|
||||
false,
|
||||
['encrypt', 'decrypt']
|
||||
);
|
||||
}
|
||||
```
|
||||
|
||||
### Encryption/Decryption
|
||||
|
||||
```typescript
|
||||
// Encrypt data before storing
|
||||
async function encryptData(
|
||||
data: string | ArrayBuffer,
|
||||
key: CryptoKey
|
||||
): Promise<{encrypted: ArrayBuffer, iv: Uint8Array}> {
|
||||
const iv = crypto.getRandomValues(new Uint8Array(12)); // 96-bit IV for AES-GCM
|
||||
|
||||
const dataBuffer = typeof data === 'string'
|
||||
? new TextEncoder().encode(data)
|
||||
: data;
|
||||
|
||||
const encrypted = await crypto.subtle.encrypt(
|
||||
{ name: 'AES-GCM', iv },
|
||||
key,
|
||||
dataBuffer
|
||||
);
|
||||
|
||||
return { encrypted, iv };
|
||||
}
|
||||
|
||||
// Decrypt data when reading
|
||||
async function decryptData(
|
||||
encrypted: ArrayBuffer,
|
||||
iv: Uint8Array,
|
||||
key: CryptoKey
|
||||
): Promise<ArrayBuffer> {
|
||||
return await crypto.subtle.decrypt(
|
||||
{ name: 'AES-GCM', iv },
|
||||
key,
|
||||
encrypted
|
||||
);
|
||||
}
|
||||
```
|
||||
|
||||
## IndexedDB Schema
|
||||
|
||||
```typescript
|
||||
// Database schema for encrypted Google data
|
||||
const GOOGLE_DATA_DB = 'canvas-google-data';
|
||||
const DB_VERSION = 1;
|
||||
|
||||
interface GoogleDataSchema {
|
||||
gmail: {
|
||||
key: string; // message ID
|
||||
indexes: ['threadId', 'date', 'syncedAt'];
|
||||
};
|
||||
drive: {
|
||||
key: string; // file ID
|
||||
indexes: ['parentId', 'modifiedTime', 'mimeType'];
|
||||
};
|
||||
photos: {
|
||||
key: string; // media item ID
|
||||
indexes: ['creationTime', 'mediaType'];
|
||||
};
|
||||
calendar: {
|
||||
key: string; // event ID
|
||||
indexes: ['calendarId', 'startTime', 'endTime'];
|
||||
};
|
||||
syncMetadata: {
|
||||
key: string; // 'gmail' | 'drive' | 'photos' | 'calendar'
|
||||
// Stores last sync token, sync progress, etc.
|
||||
};
|
||||
encryptionKeys: {
|
||||
key: string; // purpose
|
||||
// Stores IV, salt for key derivation
|
||||
};
|
||||
}
|
||||
|
||||
async function initGoogleDataDB(): Promise<IDBDatabase> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const request = indexedDB.open(GOOGLE_DATA_DB, DB_VERSION);
|
||||
|
||||
request.onerror = () => reject(request.error);
|
||||
request.onsuccess = () => resolve(request.result);
|
||||
|
||||
request.onupgradeneeded = (event) => {
|
||||
const db = (event.target as IDBOpenDBRequest).result;
|
||||
|
||||
// Gmail store
|
||||
if (!db.objectStoreNames.contains('gmail')) {
|
||||
const gmailStore = db.createObjectStore('gmail', { keyPath: 'id' });
|
||||
gmailStore.createIndex('threadId', 'threadId', { unique: false });
|
||||
gmailStore.createIndex('date', 'date', { unique: false });
|
||||
gmailStore.createIndex('syncedAt', 'syncedAt', { unique: false });
|
||||
}
|
||||
|
||||
// Drive store
|
||||
if (!db.objectStoreNames.contains('drive')) {
|
||||
const driveStore = db.createObjectStore('drive', { keyPath: 'id' });
|
||||
driveStore.createIndex('parentId', 'parentId', { unique: false });
|
||||
driveStore.createIndex('modifiedTime', 'modifiedTime', { unique: false });
|
||||
}
|
||||
|
||||
// Photos store
|
||||
if (!db.objectStoreNames.contains('photos')) {
|
||||
const photosStore = db.createObjectStore('photos', { keyPath: 'id' });
|
||||
photosStore.createIndex('creationTime', 'creationTime', { unique: false });
|
||||
photosStore.createIndex('mediaType', 'mediaType', { unique: false });
|
||||
}
|
||||
|
||||
// Calendar store
|
||||
if (!db.objectStoreNames.contains('calendar')) {
|
||||
const calendarStore = db.createObjectStore('calendar', { keyPath: 'id' });
|
||||
calendarStore.createIndex('calendarId', 'calendarId', { unique: false });
|
||||
calendarStore.createIndex('startTime', 'startTime', { unique: false });
|
||||
}
|
||||
|
||||
// Sync metadata
|
||||
if (!db.objectStoreNames.contains('syncMetadata')) {
|
||||
db.createObjectStore('syncMetadata', { keyPath: 'service' });
|
||||
}
|
||||
|
||||
// Encryption metadata
|
||||
if (!db.objectStoreNames.contains('encryptionMeta')) {
|
||||
db.createObjectStore('encryptionMeta', { keyPath: 'purpose' });
|
||||
}
|
||||
};
|
||||
});
|
||||
}
|
||||
```
|
||||
|
||||
## Google OAuth & API Integration
|
||||
|
||||
### OAuth 2.0 Scopes
|
||||
|
||||
```typescript
|
||||
const GOOGLE_SCOPES = {
|
||||
// Read-only access (data sovereignty - we import, not modify)
|
||||
gmail: 'https://www.googleapis.com/auth/gmail.readonly',
|
||||
drive: 'https://www.googleapis.com/auth/drive.readonly',
|
||||
photos: 'https://www.googleapis.com/auth/photoslibrary.readonly',
|
||||
calendar: 'https://www.googleapis.com/auth/calendar.readonly',
|
||||
|
||||
// Profile for user identification
|
||||
profile: 'https://www.googleapis.com/auth/userinfo.profile',
|
||||
email: 'https://www.googleapis.com/auth/userinfo.email'
|
||||
};
|
||||
|
||||
// Selective scope request - user chooses what to import
|
||||
function getRequestedScopes(services: string[]): string {
|
||||
const scopes = [GOOGLE_SCOPES.profile, GOOGLE_SCOPES.email];
|
||||
|
||||
services.forEach(service => {
|
||||
if (GOOGLE_SCOPES[service as keyof typeof GOOGLE_SCOPES]) {
|
||||
scopes.push(GOOGLE_SCOPES[service as keyof typeof GOOGLE_SCOPES]);
|
||||
}
|
||||
});
|
||||
|
||||
return scopes.join(' ');
|
||||
}
|
||||
```
|
||||
|
||||
### OAuth Flow with PKCE
|
||||
|
||||
```typescript
|
||||
interface GoogleAuthState {
|
||||
codeVerifier: string;
|
||||
redirectUri: string;
|
||||
state: string;
|
||||
}
|
||||
|
||||
async function initiateGoogleAuth(services: string[]): Promise<void> {
|
||||
const codeVerifier = generateCodeVerifier();
|
||||
const codeChallenge = await generateCodeChallenge(codeVerifier);
|
||||
const state = crypto.randomUUID();
|
||||
|
||||
// Store state for verification
|
||||
sessionStorage.setItem('google_auth_state', JSON.stringify({
|
||||
codeVerifier,
|
||||
state,
|
||||
redirectUri: window.location.origin + '/oauth/google/callback'
|
||||
}));
|
||||
|
||||
const params = new URLSearchParams({
|
||||
client_id: import.meta.env.VITE_GOOGLE_CLIENT_ID,
|
||||
redirect_uri: window.location.origin + '/oauth/google/callback',
|
||||
response_type: 'code',
|
||||
scope: getRequestedScopes(services),
|
||||
access_type: 'offline', // Get refresh token
|
||||
prompt: 'consent',
|
||||
code_challenge: codeChallenge,
|
||||
code_challenge_method: 'S256',
|
||||
state
|
||||
});
|
||||
|
||||
window.location.href = `https://accounts.google.com/o/oauth2/v2/auth?${params}`;
|
||||
}
|
||||
|
||||
// PKCE helpers
|
||||
function generateCodeVerifier(): string {
|
||||
const array = new Uint8Array(32);
|
||||
crypto.getRandomValues(array);
|
||||
return base64UrlEncode(array);
|
||||
}
|
||||
|
||||
async function generateCodeChallenge(verifier: string): Promise<string> {
|
||||
const encoder = new TextEncoder();
|
||||
const data = encoder.encode(verifier);
|
||||
const hash = await crypto.subtle.digest('SHA-256', data);
|
||||
return base64UrlEncode(new Uint8Array(hash));
|
||||
}
|
||||
```
|
||||
|
||||
### Token Storage (Encrypted)
|
||||
|
||||
```typescript
|
||||
interface EncryptedTokens {
|
||||
accessToken: ArrayBuffer; // Encrypted
|
||||
refreshToken: ArrayBuffer; // Encrypted
|
||||
accessTokenIv: Uint8Array;
|
||||
refreshTokenIv: Uint8Array;
|
||||
expiresAt: number; // Unencrypted for refresh logic
|
||||
scopes: string[]; // Unencrypted for UI display
|
||||
}
|
||||
|
||||
async function storeGoogleTokens(
|
||||
tokens: { access_token: string; refresh_token?: string; expires_in: number },
|
||||
encryptionKey: CryptoKey
|
||||
): Promise<void> {
|
||||
const { encrypted: encAccessToken, iv: accessIv } = await encryptData(
|
||||
tokens.access_token,
|
||||
encryptionKey
|
||||
);
|
||||
|
||||
const encryptedTokens: Partial<EncryptedTokens> = {
|
||||
accessToken: encAccessToken,
|
||||
accessTokenIv: accessIv,
|
||||
expiresAt: Date.now() + (tokens.expires_in * 1000)
|
||||
};
|
||||
|
||||
if (tokens.refresh_token) {
|
||||
const { encrypted: encRefreshToken, iv: refreshIv } = await encryptData(
|
||||
tokens.refresh_token,
|
||||
encryptionKey
|
||||
);
|
||||
encryptedTokens.refreshToken = encRefreshToken;
|
||||
encryptedTokens.refreshTokenIv = refreshIv;
|
||||
}
|
||||
|
||||
const db = await initGoogleDataDB();
|
||||
const tx = db.transaction('encryptionMeta', 'readwrite');
|
||||
tx.objectStore('encryptionMeta').put({
|
||||
purpose: 'google_tokens',
|
||||
...encryptedTokens
|
||||
});
|
||||
}
|
||||
```
|
||||
|
||||
## Data Import Workflow
|
||||
|
||||
### Progressive Import with Background Sync
|
||||
|
||||
```typescript
|
||||
interface ImportProgress {
|
||||
service: 'gmail' | 'drive' | 'photos' | 'calendar';
|
||||
total: number;
|
||||
imported: number;
|
||||
lastSyncToken?: string;
|
||||
status: 'idle' | 'importing' | 'paused' | 'error';
|
||||
errorMessage?: string;
|
||||
}
|
||||
|
||||
class GoogleDataImporter {
|
||||
private encryptionKey: CryptoKey;
|
||||
private db: IDBDatabase;
|
||||
|
||||
async importGmail(options: {
|
||||
maxMessages?: number;
|
||||
labelsFilter?: string[];
|
||||
dateAfter?: Date;
|
||||
}): Promise<void> {
|
||||
const accessToken = await this.getAccessToken();
|
||||
|
||||
// Use pagination for large mailboxes
|
||||
let pageToken: string | undefined;
|
||||
let imported = 0;
|
||||
|
||||
do {
|
||||
const response = await fetch(
|
||||
`https://gmail.googleapis.com/gmail/v1/users/me/messages?${new URLSearchParams({
|
||||
maxResults: '100',
|
||||
...(pageToken && { pageToken }),
|
||||
...(options.labelsFilter && { labelIds: options.labelsFilter.join(',') }),
|
||||
...(options.dateAfter && { q: `after:${Math.floor(options.dateAfter.getTime() / 1000)}` })
|
||||
})}`,
|
||||
{ headers: { Authorization: `Bearer ${accessToken}` } }
|
||||
);
|
||||
|
||||
const data = await response.json();
|
||||
|
||||
// Fetch and encrypt each message
|
||||
for (const msg of data.messages || []) {
|
||||
const fullMessage = await this.fetchGmailMessage(msg.id, accessToken);
|
||||
await this.storeEncryptedEmail(fullMessage);
|
||||
imported++;
|
||||
|
||||
// Update progress
|
||||
this.updateProgress('gmail', imported);
|
||||
|
||||
// Yield to UI periodically
|
||||
if (imported % 10 === 0) {
|
||||
await new Promise(r => setTimeout(r, 0));
|
||||
}
|
||||
}
|
||||
|
||||
pageToken = data.nextPageToken;
|
||||
} while (pageToken && (!options.maxMessages || imported < options.maxMessages));
|
||||
}
|
||||
|
||||
private async storeEncryptedEmail(message: any): Promise<void> {
|
||||
const emailKey = await deriveDataEncryptionKey(this.encryptionKey, 'gmail');
|
||||
|
||||
const encrypted: EncryptedEmailStore = {
|
||||
id: message.id,
|
||||
threadId: message.threadId,
|
||||
encryptedSubject: (await encryptData(
|
||||
this.extractHeader(message, 'Subject') || '',
|
||||
emailKey
|
||||
)).encrypted,
|
||||
encryptedBody: (await encryptData(
|
||||
this.extractBody(message),
|
||||
emailKey
|
||||
)).encrypted,
|
||||
// ... other fields
|
||||
date: parseInt(message.internalDate),
|
||||
syncedAt: Date.now(),
|
||||
localOnly: true
|
||||
};
|
||||
|
||||
const tx = this.db.transaction('gmail', 'readwrite');
|
||||
tx.objectStore('gmail').put(encrypted);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Sharing to Canvas Board
|
||||
|
||||
### Selective Sharing Model
|
||||
|
||||
```typescript
|
||||
interface ShareableItem {
|
||||
type: 'email' | 'document' | 'photo' | 'event';
|
||||
id: string;
|
||||
// Decrypted data for sharing
|
||||
decryptedData: any;
|
||||
}
|
||||
|
||||
class DataSharingService {
|
||||
/**
|
||||
* Share a specific item to the current board
|
||||
* This decrypts the item and adds it to the Automerge document
|
||||
*/
|
||||
async shareToBoard(
|
||||
item: ShareableItem,
|
||||
boardHandle: DocumentHandle<CanvasDoc>,
|
||||
userKey: CryptoKey
|
||||
): Promise<void> {
|
||||
// 1. Decrypt the item
|
||||
const decrypted = await this.decryptItem(item, userKey);
|
||||
|
||||
// 2. Create a canvas shape representation
|
||||
const shape = this.createShapeFromItem(decrypted, item.type);
|
||||
|
||||
// 3. Add to Automerge document (syncs to other board users)
|
||||
boardHandle.change(doc => {
|
||||
doc.shapes[shape.id] = shape;
|
||||
});
|
||||
|
||||
// 4. Mark item as shared (no longer localOnly)
|
||||
await this.markAsShared(item.id, item.type);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a visual shape from data
|
||||
*/
|
||||
private createShapeFromItem(data: any, type: string): TLShape {
|
||||
switch (type) {
|
||||
case 'email':
|
||||
return {
|
||||
id: createShapeId(),
|
||||
type: 'email-card',
|
||||
props: {
|
||||
subject: data.subject,
|
||||
from: data.from,
|
||||
date: data.date,
|
||||
snippet: data.snippet
|
||||
}
|
||||
};
|
||||
case 'event':
|
||||
return {
|
||||
id: createShapeId(),
|
||||
type: 'calendar-event',
|
||||
props: {
|
||||
title: data.summary,
|
||||
startTime: data.startTime,
|
||||
endTime: data.endTime,
|
||||
location: data.location
|
||||
}
|
||||
};
|
||||
// ... other types
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## R2 Encrypted Backup
|
||||
|
||||
### Backup Architecture
|
||||
|
||||
```
|
||||
User Browser Cloudflare Worker R2 Storage
|
||||
│ │ │
|
||||
│ 1. Encrypt data locally │ │
|
||||
│ (already encrypted in IndexedDB) │ │
|
||||
│ │ │
|
||||
│ 2. Generate backup key │ │
|
||||
│ (derived from master key) │ │
|
||||
│ │ │
|
||||
│ 3. POST encrypted blob ──────────> 4. Validate user │
|
||||
│ │ (CryptID auth) │
|
||||
│ │ │
|
||||
│ │ 5. Store blob ─────────────────> │
|
||||
│ │ (already encrypted, │
|
||||
│ │ worker can't read) │
|
||||
│ │ │
|
||||
│ <──────────────────────────────── 6. Return backup ID │
|
||||
```
|
||||
|
||||
### Backup Implementation
|
||||
|
||||
```typescript
|
||||
interface BackupMetadata {
|
||||
id: string;
|
||||
createdAt: number;
|
||||
services: ('gmail' | 'drive' | 'photos' | 'calendar')[];
|
||||
itemCount: number;
|
||||
sizeBytes: number;
|
||||
// Encrypted with user's key - only they can read
|
||||
encryptedManifest: ArrayBuffer;
|
||||
}
|
||||
|
||||
class R2BackupService {
|
||||
private workerUrl = '/api/backup';
|
||||
|
||||
async createBackup(
|
||||
services: string[],
|
||||
encryptionKey: CryptoKey
|
||||
): Promise<BackupMetadata> {
|
||||
// 1. Gather all encrypted data from IndexedDB
|
||||
const dataToBackup = await this.gatherData(services);
|
||||
|
||||
// 2. Create a manifest (encrypted)
|
||||
const manifest = {
|
||||
version: 1,
|
||||
createdAt: Date.now(),
|
||||
services,
|
||||
itemCounts: dataToBackup.counts
|
||||
};
|
||||
const { encrypted: encManifest } = await encryptData(
|
||||
JSON.stringify(manifest),
|
||||
encryptionKey
|
||||
);
|
||||
|
||||
// 3. Serialize and chunk if large
|
||||
const blob = await this.serializeForBackup(dataToBackup);
|
||||
|
||||
// 4. Upload to R2 via worker
|
||||
const response = await fetch(this.workerUrl, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/octet-stream',
|
||||
'X-Backup-Manifest': base64Encode(encManifest)
|
||||
},
|
||||
body: blob
|
||||
});
|
||||
|
||||
const { backupId } = await response.json();
|
||||
|
||||
return {
|
||||
id: backupId,
|
||||
createdAt: Date.now(),
|
||||
services: services as any,
|
||||
itemCount: Object.values(dataToBackup.counts).reduce((a, b) => a + b, 0),
|
||||
sizeBytes: blob.size,
|
||||
encryptedManifest: encManifest
|
||||
};
|
||||
}
|
||||
|
||||
async restoreBackup(
|
||||
backupId: string,
|
||||
encryptionKey: CryptoKey
|
||||
): Promise<void> {
|
||||
// 1. Fetch encrypted blob from R2
|
||||
const response = await fetch(`${this.workerUrl}/${backupId}`);
|
||||
const encryptedBlob = await response.arrayBuffer();
|
||||
|
||||
// 2. Data is already encrypted with user's key
|
||||
// Just write directly to IndexedDB
|
||||
await this.writeToIndexedDB(encryptedBlob);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Privacy & Security Guarantees
|
||||
|
||||
### What Never Leaves the Browser (Unencrypted)
|
||||
|
||||
1. **Email content** - body, subject, attachments
|
||||
2. **Document content** - file contents, names
|
||||
3. **Photo data** - images, location metadata
|
||||
4. **Calendar details** - event descriptions, attendee info
|
||||
5. **OAuth tokens** - access/refresh tokens
|
||||
|
||||
### What the Server Never Sees
|
||||
|
||||
1. **Encryption keys** - derived locally, never transmitted
|
||||
2. **Plaintext data** - all API calls are client-side
|
||||
3. **User's Google account data** - we use read-only scopes
|
||||
|
||||
### Data Flow Summary
|
||||
|
||||
```
|
||||
┌─────────────────────┐
|
||||
│ Google APIs │
|
||||
│ (authenticated) │
|
||||
└──────────┬──────────┘
|
||||
│
|
||||
┌─────────▼─────────┐
|
||||
│ Browser Fetch │
|
||||
│ (client-side) │
|
||||
└─────────┬─────────┘
|
||||
│
|
||||
┌─────────▼─────────┐
|
||||
│ Encrypt with │
|
||||
│ WebCrypto │
|
||||
│ (AES-256-GCM) │
|
||||
└─────────┬─────────┘
|
||||
│
|
||||
┌────────────────────┼────────────────────┐
|
||||
│ │ │
|
||||
┌─────────▼─────────┐ ┌───────▼────────┐ ┌────────▼───────┐
|
||||
│ IndexedDB │ │ Share to │ │ R2 Backup │
|
||||
│ (local only) │ │ Board │ │ (encrypted) │
|
||||
│ │ │ (Automerge) │ │ │
|
||||
└───────────────────┘ └────────────────┘ └────────────────┘
|
||||
│ │ │
|
||||
▼ ▼ ▼
|
||||
Only you can read Board members Only you can
|
||||
(your keys) see shared items decrypt backup
|
||||
```
|
||||
|
||||
## Implementation Phases
|
||||
|
||||
### Phase 1: Foundation
|
||||
- [ ] IndexedDB schema for encrypted data
|
||||
- [ ] Key derivation from existing WebCrypto keys
|
||||
- [ ] Encrypt/decrypt utility functions
|
||||
- [ ] Storage quota monitoring
|
||||
|
||||
### Phase 2: Google OAuth
|
||||
- [ ] OAuth 2.0 with PKCE flow
|
||||
- [ ] Token encryption and storage
|
||||
- [ ] Token refresh logic
|
||||
- [ ] Scope selection UI
|
||||
|
||||
### Phase 3: Data Import
|
||||
- [ ] Gmail import with pagination
|
||||
- [ ] Drive document import
|
||||
- [ ] Photos thumbnail import
|
||||
- [ ] Calendar event import
|
||||
- [ ] Progress tracking UI
|
||||
|
||||
### Phase 4: Canvas Integration
|
||||
- [ ] Email card shape
|
||||
- [ ] Document preview shape
|
||||
- [ ] Photo thumbnail shape
|
||||
- [ ] Calendar event shape
|
||||
- [ ] Share to board functionality
|
||||
|
||||
### Phase 5: R2 Backup
|
||||
- [ ] Encrypted backup creation
|
||||
- [ ] Backup restore
|
||||
- [ ] Backup management UI
|
||||
- [ ] Automatic backup scheduling
|
||||
|
||||
### Phase 6: Polish
|
||||
- [ ] Safari storage warnings
|
||||
- [ ] Offline data access
|
||||
- [ ] Search within encrypted data
|
||||
- [ ] Data export (Google Takeout style)
|
||||
|
||||
## Security Checklist
|
||||
|
||||
- [ ] All data encrypted before storage
|
||||
- [ ] Keys never leave browser unencrypted
|
||||
- [ ] OAuth tokens encrypted at rest
|
||||
- [ ] PKCE used for OAuth flow
|
||||
- [ ] Read-only Google API scopes
|
||||
- [ ] Safari 7-day eviction handled
|
||||
- [ ] Storage quota warnings
|
||||
- [ ] Secure context required (HTTPS)
|
||||
- [ ] CSP headers configured
|
||||
- [ ] No sensitive data in console logs
|
||||
|
||||
## Related Documents
|
||||
|
||||
- [Local File Upload](./LOCAL_FILE_UPLOAD.md) - Multi-item upload with same encryption model
|
||||
- [Offline Storage Feasibility](../OFFLINE_STORAGE_FEASIBILITY.md) - IndexedDB + Automerge foundation
|
||||
|
||||
## References
|
||||
|
||||
- [IndexedDB API](https://developer.mozilla.org/en-US/docs/Web/API/IndexedDB_API)
|
||||
- [Web Crypto API](https://developer.mozilla.org/en-US/docs/Web/API/Web_Crypto_API)
|
||||
- [Storage API](https://developer.mozilla.org/en-US/docs/Web/API/Storage_API)
|
||||
- [Google OAuth 2.0](https://developers.google.com/identity/protocols/oauth2)
|
||||
- [Gmail API](https://developers.google.com/gmail/api)
|
||||
- [Drive API](https://developers.google.com/drive/api)
|
||||
- [Photos Library API](https://developers.google.com/photos/library/reference/rest)
|
||||
- [Calendar API](https://developers.google.com/calendar/api)
|
||||
|
|
@ -0,0 +1,862 @@
|
|||
# Local File Upload: Multi-Item Encrypted Import
|
||||
|
||||
A simpler, more broadly compatible approach to importing local files into the canvas with the same privacy-first, encrypted storage model.
|
||||
|
||||
## Overview
|
||||
|
||||
Instead of maintaining persistent folder connections (which have browser compatibility issues), provide a **drag-and-drop / file picker** interface for batch importing files into encrypted local storage.
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────────────┐
|
||||
│ UPLOAD INTERFACE │
|
||||
├─────────────────────────────────────────────────────────────────────────┤
|
||||
│ │
|
||||
│ ┌─────────────────────────────────────────────────────────────────┐ │
|
||||
│ │ │ │
|
||||
│ │ 📁 Drop files here or click to browse │ │
|
||||
│ │ │ │
|
||||
│ │ Supports: Images, PDFs, Documents, Text, Audio, Video │ │
|
||||
│ │ │ │
|
||||
│ └─────────────────────────────────────────────────────────────────┘ │
|
||||
│ │
|
||||
│ ┌──────────────────────────────────────────────────────────────────┐ │
|
||||
│ │ Import Queue [Upload] │ │
|
||||
│ ├──────────────────────────────────────────────────────────────────┤ │
|
||||
│ │ ☑ photo_001.jpg (2.4 MB) 🔒 Encrypt 📤 Share │ │
|
||||
│ │ ☑ meeting_notes.pdf (450 KB) 🔒 Encrypt ☐ Private │ │
|
||||
│ │ ☑ project_plan.md (12 KB) 🔒 Encrypt ☐ Private │ │
|
||||
│ │ ☐ sensitive_doc.docx (1.2 MB) 🔒 Encrypt ☐ Private │ │
|
||||
│ └──────────────────────────────────────────────────────────────────┘ │
|
||||
│ │
|
||||
│ Storage: 247 MB used / ~5 GB available │
|
||||
│ │
|
||||
└─────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Why Multi-Item Upload vs. Folder Connection
|
||||
|
||||
| Feature | Folder Connection | Multi-Item Upload |
|
||||
|---------|------------------|-------------------|
|
||||
| Browser Support | Chrome/Edge only | All browsers |
|
||||
| Persistent Access | Yes (with permission) | No (one-time import) |
|
||||
| Implementation | Complex | Simple |
|
||||
| User Control | Less explicit | Very explicit |
|
||||
| Privacy UX | Hidden | Clear per-file choices |
|
||||
|
||||
**Recommendation**: Multi-item upload is better for privacy-conscious users who want explicit control over what enters the system.
|
||||
|
||||
## Supported File Types
|
||||
|
||||
### Documents
|
||||
| Type | Extension | Processing | Storage Strategy |
|
||||
|------|-----------|-----------|------------------|
|
||||
| Markdown | `.md` | Parse frontmatter, render | Full content |
|
||||
| PDF | `.pdf` | Extract text, thumbnail | Text + thumbnail |
|
||||
| Word | `.docx` | Convert to markdown | Converted content |
|
||||
| Text | `.txt`, `.csv`, `.json` | Direct | Full content |
|
||||
| Code | `.js`, `.ts`, `.py`, etc. | Syntax highlight | Full content |
|
||||
|
||||
### Images
|
||||
| Type | Extension | Processing | Storage Strategy |
|
||||
|------|-----------|-----------|------------------|
|
||||
| Photos | `.jpg`, `.png`, `.webp` | Generate thumbnail | Thumbnail + full |
|
||||
| Vector | `.svg` | Direct | Full content |
|
||||
| GIF | `.gif` | First frame thumb | Thumbnail + full |
|
||||
|
||||
### Media
|
||||
| Type | Extension | Processing | Storage Strategy |
|
||||
|------|-----------|-----------|------------------|
|
||||
| Audio | `.mp3`, `.wav`, `.m4a` | Waveform preview | Reference + metadata |
|
||||
| Video | `.mp4`, `.webm` | Frame thumbnail | Reference + metadata |
|
||||
|
||||
### Archives (Future)
|
||||
| Type | Extension | Processing |
|
||||
|------|-----------|-----------|
|
||||
| ZIP | `.zip` | List contents, selective extract |
|
||||
| Obsidian Export | `.zip` | Vault structure import |
|
||||
|
||||
## Architecture
|
||||
|
||||
```typescript
|
||||
interface UploadedFile {
|
||||
id: string; // Generated UUID
|
||||
originalName: string; // User's filename
|
||||
mimeType: string;
|
||||
size: number;
|
||||
|
||||
// Processing results
|
||||
processed: {
|
||||
thumbnail?: ArrayBuffer; // For images/PDFs/videos
|
||||
extractedText?: string; // For searchable docs
|
||||
metadata?: Record<string, any>; // EXIF, frontmatter, etc.
|
||||
};
|
||||
|
||||
// Encryption
|
||||
encrypted: {
|
||||
content: ArrayBuffer; // Encrypted file content
|
||||
iv: Uint8Array;
|
||||
keyId: string; // Reference to encryption key
|
||||
};
|
||||
|
||||
// User choices
|
||||
sharing: {
|
||||
localOnly: boolean; // Default true
|
||||
sharedToBoard?: string; // Board ID if shared
|
||||
backedUpToR2?: boolean;
|
||||
};
|
||||
|
||||
// Timestamps
|
||||
importedAt: number;
|
||||
lastAccessedAt: number;
|
||||
}
|
||||
```
|
||||
|
||||
## Implementation
|
||||
|
||||
### 1. File Input Component
|
||||
|
||||
```typescript
|
||||
import React, { useCallback, useState } from 'react';
|
||||
|
||||
interface FileUploadProps {
|
||||
onFilesSelected: (files: File[]) => void;
|
||||
maxFileSize?: number; // bytes
|
||||
maxFiles?: number;
|
||||
acceptedTypes?: string[];
|
||||
}
|
||||
|
||||
export function FileUploadZone({
|
||||
onFilesSelected,
|
||||
maxFileSize = 100 * 1024 * 1024, // 100MB default
|
||||
maxFiles = 50,
|
||||
acceptedTypes
|
||||
}: FileUploadProps) {
|
||||
const [isDragging, setIsDragging] = useState(false);
|
||||
const [errors, setErrors] = useState<string[]>([]);
|
||||
|
||||
const handleDrop = useCallback((e: React.DragEvent) => {
|
||||
e.preventDefault();
|
||||
setIsDragging(false);
|
||||
|
||||
const files = Array.from(e.dataTransfer.files);
|
||||
validateAndProcess(files);
|
||||
}, []);
|
||||
|
||||
const handleFileInput = useCallback((e: React.ChangeEvent<HTMLInputElement>) => {
|
||||
const files = Array.from(e.target.files || []);
|
||||
validateAndProcess(files);
|
||||
}, []);
|
||||
|
||||
const validateAndProcess = (files: File[]) => {
|
||||
const errors: string[] = [];
|
||||
const validFiles: File[] = [];
|
||||
|
||||
for (const file of files.slice(0, maxFiles)) {
|
||||
if (file.size > maxFileSize) {
|
||||
errors.push(`${file.name}: exceeds ${maxFileSize / 1024 / 1024}MB limit`);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (acceptedTypes && !acceptedTypes.some(t => file.type.match(t))) {
|
||||
errors.push(`${file.name}: unsupported file type`);
|
||||
continue;
|
||||
}
|
||||
|
||||
validFiles.push(file);
|
||||
}
|
||||
|
||||
if (files.length > maxFiles) {
|
||||
errors.push(`Only first ${maxFiles} files will be imported`);
|
||||
}
|
||||
|
||||
setErrors(errors);
|
||||
if (validFiles.length > 0) {
|
||||
onFilesSelected(validFiles);
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<div
|
||||
onDrop={handleDrop}
|
||||
onDragOver={(e) => { e.preventDefault(); setIsDragging(true); }}
|
||||
onDragLeave={() => setIsDragging(false)}
|
||||
className={`upload-zone ${isDragging ? 'dragging' : ''}`}
|
||||
>
|
||||
<input
|
||||
type="file"
|
||||
multiple
|
||||
onChange={handleFileInput}
|
||||
accept={acceptedTypes?.join(',')}
|
||||
id="file-upload"
|
||||
hidden
|
||||
/>
|
||||
<label htmlFor="file-upload">
|
||||
<span className="upload-icon">📁</span>
|
||||
<span>Drop files here or click to browse</span>
|
||||
<span className="upload-hint">
|
||||
Images, PDFs, Documents, Text files
|
||||
</span>
|
||||
</label>
|
||||
|
||||
{errors.length > 0 && (
|
||||
<div className="upload-errors">
|
||||
{errors.map((err, i) => <div key={i}>{err}</div>)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
```
|
||||
|
||||
### 2. File Processing Pipeline
|
||||
|
||||
```typescript
|
||||
interface ProcessedFile {
|
||||
file: File;
|
||||
thumbnail?: Blob;
|
||||
extractedText?: string;
|
||||
metadata?: Record<string, any>;
|
||||
}
|
||||
|
||||
class FileProcessor {
|
||||
|
||||
async process(file: File): Promise<ProcessedFile> {
|
||||
const result: ProcessedFile = { file };
|
||||
|
||||
// Route based on MIME type
|
||||
if (file.type.startsWith('image/')) {
|
||||
return this.processImage(file, result);
|
||||
} else if (file.type === 'application/pdf') {
|
||||
return this.processPDF(file, result);
|
||||
} else if (file.type.startsWith('text/') || this.isTextFile(file)) {
|
||||
return this.processText(file, result);
|
||||
} else if (file.type.startsWith('video/')) {
|
||||
return this.processVideo(file, result);
|
||||
} else if (file.type.startsWith('audio/')) {
|
||||
return this.processAudio(file, result);
|
||||
}
|
||||
|
||||
// Default: store as-is
|
||||
return result;
|
||||
}
|
||||
|
||||
private async processImage(file: File, result: ProcessedFile): Promise<ProcessedFile> {
|
||||
// Generate thumbnail
|
||||
const img = await createImageBitmap(file);
|
||||
const canvas = new OffscreenCanvas(200, 200);
|
||||
const ctx = canvas.getContext('2d')!;
|
||||
|
||||
// Calculate aspect-ratio preserving dimensions
|
||||
const scale = Math.min(200 / img.width, 200 / img.height);
|
||||
const w = img.width * scale;
|
||||
const h = img.height * scale;
|
||||
|
||||
ctx.drawImage(img, (200 - w) / 2, (200 - h) / 2, w, h);
|
||||
result.thumbnail = await canvas.convertToBlob({ type: 'image/webp', quality: 0.8 });
|
||||
|
||||
// Extract EXIF if available
|
||||
if (file.type === 'image/jpeg') {
|
||||
result.metadata = await this.extractExif(file);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
private async processPDF(file: File, result: ProcessedFile): Promise<ProcessedFile> {
|
||||
// Use pdf.js for text extraction and thumbnail
|
||||
const pdfjsLib = await import('pdfjs-dist');
|
||||
const arrayBuffer = await file.arrayBuffer();
|
||||
const pdf = await pdfjsLib.getDocument({ data: arrayBuffer }).promise;
|
||||
|
||||
// Get first page as thumbnail
|
||||
const page = await pdf.getPage(1);
|
||||
const viewport = page.getViewport({ scale: 0.5 });
|
||||
const canvas = new OffscreenCanvas(viewport.width, viewport.height);
|
||||
const ctx = canvas.getContext('2d')!;
|
||||
|
||||
await page.render({ canvasContext: ctx, viewport }).promise;
|
||||
result.thumbnail = await canvas.convertToBlob({ type: 'image/webp' });
|
||||
|
||||
// Extract text from all pages
|
||||
let text = '';
|
||||
for (let i = 1; i <= pdf.numPages; i++) {
|
||||
const page = await pdf.getPage(i);
|
||||
const content = await page.getTextContent();
|
||||
text += content.items.map((item: any) => item.str).join(' ') + '\n';
|
||||
}
|
||||
result.extractedText = text;
|
||||
|
||||
result.metadata = { pageCount: pdf.numPages };
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
private async processText(file: File, result: ProcessedFile): Promise<ProcessedFile> {
|
||||
result.extractedText = await file.text();
|
||||
|
||||
// Parse markdown frontmatter if applicable
|
||||
if (file.name.endsWith('.md')) {
|
||||
const frontmatter = this.parseFrontmatter(result.extractedText);
|
||||
if (frontmatter) {
|
||||
result.metadata = frontmatter;
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
private async processVideo(file: File, result: ProcessedFile): Promise<ProcessedFile> {
|
||||
// Generate thumbnail from first frame
|
||||
const video = document.createElement('video');
|
||||
video.preload = 'metadata';
|
||||
video.src = URL.createObjectURL(file);
|
||||
|
||||
await new Promise(resolve => video.addEventListener('loadedmetadata', resolve));
|
||||
video.currentTime = 1; // First second
|
||||
await new Promise(resolve => video.addEventListener('seeked', resolve));
|
||||
|
||||
const canvas = new OffscreenCanvas(200, 200);
|
||||
const ctx = canvas.getContext('2d')!;
|
||||
const scale = Math.min(200 / video.videoWidth, 200 / video.videoHeight);
|
||||
ctx.drawImage(video, 0, 0, video.videoWidth * scale, video.videoHeight * scale);
|
||||
|
||||
result.thumbnail = await canvas.convertToBlob({ type: 'image/webp' });
|
||||
result.metadata = {
|
||||
duration: video.duration,
|
||||
width: video.videoWidth,
|
||||
height: video.videoHeight
|
||||
};
|
||||
|
||||
URL.revokeObjectURL(video.src);
|
||||
return result;
|
||||
}
|
||||
|
||||
private async processAudio(file: File, result: ProcessedFile): Promise<ProcessedFile> {
|
||||
// Extract duration and basic metadata
|
||||
const audio = document.createElement('audio');
|
||||
audio.src = URL.createObjectURL(file);
|
||||
|
||||
await new Promise(resolve => audio.addEventListener('loadedmetadata', resolve));
|
||||
|
||||
result.metadata = {
|
||||
duration: audio.duration
|
||||
};
|
||||
|
||||
URL.revokeObjectURL(audio.src);
|
||||
return result;
|
||||
}
|
||||
|
||||
private isTextFile(file: File): boolean {
|
||||
const textExtensions = ['.md', '.txt', '.json', '.csv', '.yaml', '.yml', '.xml', '.html', '.css', '.js', '.ts', '.py', '.sh'];
|
||||
return textExtensions.some(ext => file.name.toLowerCase().endsWith(ext));
|
||||
}
|
||||
|
||||
private parseFrontmatter(content: string): Record<string, any> | null {
|
||||
const match = content.match(/^---\n([\s\S]*?)\n---/);
|
||||
if (!match) return null;
|
||||
|
||||
try {
|
||||
// Simple YAML-like parsing (or use a proper YAML parser)
|
||||
const lines = match[1].split('\n');
|
||||
const result: Record<string, any> = {};
|
||||
for (const line of lines) {
|
||||
const [key, ...valueParts] = line.split(':');
|
||||
if (key && valueParts.length) {
|
||||
result[key.trim()] = valueParts.join(':').trim();
|
||||
}
|
||||
}
|
||||
return result;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private async extractExif(file: File): Promise<Record<string, any>> {
|
||||
// Would use exif-js or similar library
|
||||
return {};
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Encryption & Storage
|
||||
|
||||
```typescript
|
||||
class LocalFileStore {
|
||||
private db: IDBDatabase;
|
||||
private encryptionKey: CryptoKey;
|
||||
|
||||
async storeFile(processed: ProcessedFile, options: {
|
||||
shareToBoard?: boolean;
|
||||
} = {}): Promise<UploadedFile> {
|
||||
const fileId = crypto.randomUUID();
|
||||
|
||||
// Read file content
|
||||
const content = await processed.file.arrayBuffer();
|
||||
|
||||
// Encrypt content
|
||||
const iv = crypto.getRandomValues(new Uint8Array(12));
|
||||
const encryptedContent = await crypto.subtle.encrypt(
|
||||
{ name: 'AES-GCM', iv },
|
||||
this.encryptionKey,
|
||||
content
|
||||
);
|
||||
|
||||
// Encrypt thumbnail if present
|
||||
let encryptedThumbnail: ArrayBuffer | undefined;
|
||||
let thumbnailIv: Uint8Array | undefined;
|
||||
if (processed.thumbnail) {
|
||||
thumbnailIv = crypto.getRandomValues(new Uint8Array(12));
|
||||
const thumbBuffer = await processed.thumbnail.arrayBuffer();
|
||||
encryptedThumbnail = await crypto.subtle.encrypt(
|
||||
{ name: 'AES-GCM', iv: thumbnailIv },
|
||||
this.encryptionKey,
|
||||
thumbBuffer
|
||||
);
|
||||
}
|
||||
|
||||
const uploadedFile: UploadedFile = {
|
||||
id: fileId,
|
||||
originalName: processed.file.name,
|
||||
mimeType: processed.file.type,
|
||||
size: processed.file.size,
|
||||
processed: {
|
||||
extractedText: processed.extractedText,
|
||||
metadata: processed.metadata
|
||||
},
|
||||
encrypted: {
|
||||
content: encryptedContent,
|
||||
iv,
|
||||
keyId: 'user-master-key'
|
||||
},
|
||||
sharing: {
|
||||
localOnly: !options.shareToBoard,
|
||||
sharedToBoard: options.shareToBoard ? getCurrentBoardId() : undefined
|
||||
},
|
||||
importedAt: Date.now(),
|
||||
lastAccessedAt: Date.now()
|
||||
};
|
||||
|
||||
// Store encrypted thumbnail separately (for faster listing)
|
||||
if (encryptedThumbnail && thumbnailIv) {
|
||||
await this.storeThumbnail(fileId, encryptedThumbnail, thumbnailIv);
|
||||
}
|
||||
|
||||
// Store to IndexedDB
|
||||
const tx = this.db.transaction('files', 'readwrite');
|
||||
tx.objectStore('files').put(uploadedFile);
|
||||
|
||||
return uploadedFile;
|
||||
}
|
||||
|
||||
async getFile(fileId: string): Promise<{
|
||||
file: UploadedFile;
|
||||
decryptedContent: ArrayBuffer;
|
||||
} | null> {
|
||||
const tx = this.db.transaction('files', 'readonly');
|
||||
const file = await new Promise<UploadedFile | undefined>(resolve => {
|
||||
const req = tx.objectStore('files').get(fileId);
|
||||
req.onsuccess = () => resolve(req.result);
|
||||
});
|
||||
|
||||
if (!file) return null;
|
||||
|
||||
// Decrypt content
|
||||
const decryptedContent = await crypto.subtle.decrypt(
|
||||
{ name: 'AES-GCM', iv: file.encrypted.iv },
|
||||
this.encryptionKey,
|
||||
file.encrypted.content
|
||||
);
|
||||
|
||||
return { file, decryptedContent };
|
||||
}
|
||||
|
||||
async listFiles(options?: {
|
||||
mimeTypeFilter?: string;
|
||||
limit?: number;
|
||||
offset?: number;
|
||||
}): Promise<UploadedFile[]> {
|
||||
const tx = this.db.transaction('files', 'readonly');
|
||||
const store = tx.objectStore('files');
|
||||
|
||||
return new Promise(resolve => {
|
||||
const files: UploadedFile[] = [];
|
||||
const req = store.openCursor();
|
||||
|
||||
req.onsuccess = (e) => {
|
||||
const cursor = (e.target as IDBRequest).result;
|
||||
if (cursor) {
|
||||
const file = cursor.value as UploadedFile;
|
||||
|
||||
// Filter by MIME type if specified
|
||||
if (!options?.mimeTypeFilter || file.mimeType.startsWith(options.mimeTypeFilter)) {
|
||||
files.push(file);
|
||||
}
|
||||
|
||||
cursor.continue();
|
||||
} else {
|
||||
resolve(files);
|
||||
}
|
||||
};
|
||||
});
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 4. IndexedDB Schema
|
||||
|
||||
```typescript
|
||||
const LOCAL_FILES_DB = 'canvas-local-files';
|
||||
const DB_VERSION = 1;
|
||||
|
||||
async function initLocalFilesDB(): Promise<IDBDatabase> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const request = indexedDB.open(LOCAL_FILES_DB, DB_VERSION);
|
||||
|
||||
request.onerror = () => reject(request.error);
|
||||
request.onsuccess = () => resolve(request.result);
|
||||
|
||||
request.onupgradeneeded = (event) => {
|
||||
const db = (event.target as IDBOpenDBRequest).result;
|
||||
|
||||
// Main files store
|
||||
if (!db.objectStoreNames.contains('files')) {
|
||||
const store = db.createObjectStore('files', { keyPath: 'id' });
|
||||
store.createIndex('mimeType', 'mimeType', { unique: false });
|
||||
store.createIndex('importedAt', 'importedAt', { unique: false });
|
||||
store.createIndex('originalName', 'originalName', { unique: false });
|
||||
store.createIndex('sharedToBoard', 'sharing.sharedToBoard', { unique: false });
|
||||
}
|
||||
|
||||
// Thumbnails store (separate for faster listing)
|
||||
if (!db.objectStoreNames.contains('thumbnails')) {
|
||||
db.createObjectStore('thumbnails', { keyPath: 'fileId' });
|
||||
}
|
||||
|
||||
// Search index (encrypted full-text search)
|
||||
if (!db.objectStoreNames.contains('searchIndex')) {
|
||||
const searchStore = db.createObjectStore('searchIndex', { keyPath: 'fileId' });
|
||||
searchStore.createIndex('tokens', 'tokens', { unique: false, multiEntry: true });
|
||||
}
|
||||
};
|
||||
});
|
||||
}
|
||||
```
|
||||
|
||||
## UI Components
|
||||
|
||||
### Import Dialog
|
||||
|
||||
```tsx
|
||||
function ImportFilesDialog({ isOpen, onClose }: { isOpen: boolean; onClose: () => void }) {
|
||||
const [selectedFiles, setSelectedFiles] = useState<ProcessedFile[]>([]);
|
||||
const [importing, setImporting] = useState(false);
|
||||
const [progress, setProgress] = useState(0);
|
||||
const fileStore = useLocalFileStore();
|
||||
|
||||
const handleFilesSelected = async (files: File[]) => {
|
||||
const processor = new FileProcessor();
|
||||
const processed: ProcessedFile[] = [];
|
||||
|
||||
for (const file of files) {
|
||||
processed.push(await processor.process(file));
|
||||
}
|
||||
|
||||
setSelectedFiles(prev => [...prev, ...processed]);
|
||||
};
|
||||
|
||||
const handleImport = async () => {
|
||||
setImporting(true);
|
||||
|
||||
for (let i = 0; i < selectedFiles.length; i++) {
|
||||
await fileStore.storeFile(selectedFiles[i]);
|
||||
setProgress((i + 1) / selectedFiles.length * 100);
|
||||
}
|
||||
|
||||
setImporting(false);
|
||||
onClose();
|
||||
};
|
||||
|
||||
return (
|
||||
<Dialog open={isOpen} onClose={onClose}>
|
||||
<DialogTitle>Import Files</DialogTitle>
|
||||
|
||||
<FileUploadZone onFilesSelected={handleFilesSelected} />
|
||||
|
||||
{selectedFiles.length > 0 && (
|
||||
<div className="file-list">
|
||||
{selectedFiles.map((pf, i) => (
|
||||
<FilePreviewRow
|
||||
key={i}
|
||||
file={pf}
|
||||
onRemove={() => setSelectedFiles(prev => prev.filter((_, j) => j !== i))}
|
||||
/>
|
||||
))}
|
||||
</div>
|
||||
)}
|
||||
|
||||
{importing && (
|
||||
<progress value={progress} max={100} />
|
||||
)}
|
||||
|
||||
<DialogActions>
|
||||
<button onClick={onClose}>Cancel</button>
|
||||
<button
|
||||
onClick={handleImport}
|
||||
disabled={selectedFiles.length === 0 || importing}
|
||||
>
|
||||
Import {selectedFiles.length} files
|
||||
</button>
|
||||
</DialogActions>
|
||||
</Dialog>
|
||||
);
|
||||
}
|
||||
```
|
||||
|
||||
### File Browser Panel
|
||||
|
||||
```tsx
|
||||
function LocalFilesBrowser() {
|
||||
const [files, setFiles] = useState<UploadedFile[]>([]);
|
||||
const [filter, setFilter] = useState<string>('all');
|
||||
const fileStore = useLocalFileStore();
|
||||
|
||||
useEffect(() => {
|
||||
loadFiles();
|
||||
}, [filter]);
|
||||
|
||||
const loadFiles = async () => {
|
||||
const mimeFilter = filter === 'all' ? undefined : filter;
|
||||
setFiles(await fileStore.listFiles({ mimeTypeFilter: mimeFilter }));
|
||||
};
|
||||
|
||||
const handleDragToCanvas = (file: UploadedFile) => {
|
||||
// Create a shape from the file and add to canvas
|
||||
};
|
||||
|
||||
return (
|
||||
<div className="local-files-browser">
|
||||
<div className="filter-bar">
|
||||
<button onClick={() => setFilter('all')}>All</button>
|
||||
<button onClick={() => setFilter('image/')}>Images</button>
|
||||
<button onClick={() => setFilter('application/pdf')}>PDFs</button>
|
||||
<button onClick={() => setFilter('text/')}>Documents</button>
|
||||
</div>
|
||||
|
||||
<div className="files-grid">
|
||||
{files.map(file => (
|
||||
<FileCard
|
||||
key={file.id}
|
||||
file={file}
|
||||
onDragStart={() => handleDragToCanvas(file)}
|
||||
/>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
```
|
||||
|
||||
## Canvas Integration
|
||||
|
||||
### Drag Files to Canvas
|
||||
|
||||
```typescript
|
||||
// When user drags a local file onto the canvas
|
||||
async function createShapeFromLocalFile(
|
||||
file: UploadedFile,
|
||||
position: { x: number; y: number },
|
||||
editor: Editor
|
||||
): Promise<TLShapeId> {
|
||||
const fileStore = getLocalFileStore();
|
||||
const { decryptedContent } = await fileStore.getFile(file.id);
|
||||
|
||||
if (file.mimeType.startsWith('image/')) {
|
||||
// Create image shape
|
||||
const blob = new Blob([decryptedContent], { type: file.mimeType });
|
||||
const assetId = AssetRecordType.createId();
|
||||
|
||||
await editor.createAssets([{
|
||||
id: assetId,
|
||||
type: 'image',
|
||||
typeName: 'asset',
|
||||
props: {
|
||||
name: file.originalName,
|
||||
src: URL.createObjectURL(blob),
|
||||
w: 400,
|
||||
h: 300,
|
||||
mimeType: file.mimeType,
|
||||
isAnimated: file.mimeType === 'image/gif'
|
||||
}
|
||||
}]);
|
||||
|
||||
return editor.createShape({
|
||||
type: 'image',
|
||||
x: position.x,
|
||||
y: position.y,
|
||||
props: { assetId, w: 400, h: 300 }
|
||||
}).id;
|
||||
|
||||
} else if (file.mimeType === 'application/pdf') {
|
||||
// Create PDF embed or preview shape
|
||||
return editor.createShape({
|
||||
type: 'pdf-preview',
|
||||
x: position.x,
|
||||
y: position.y,
|
||||
props: {
|
||||
fileId: file.id,
|
||||
name: file.originalName,
|
||||
pageCount: file.processed.metadata?.pageCount
|
||||
}
|
||||
}).id;
|
||||
|
||||
} else if (file.mimeType.startsWith('text/') || file.originalName.endsWith('.md')) {
|
||||
// Create note shape with content
|
||||
const text = new TextDecoder().decode(decryptedContent);
|
||||
return editor.createShape({
|
||||
type: 'note',
|
||||
x: position.x,
|
||||
y: position.y,
|
||||
props: {
|
||||
text: text.slice(0, 1000), // Truncate for display
|
||||
fileId: file.id,
|
||||
fullContentAvailable: text.length > 1000
|
||||
}
|
||||
}).id;
|
||||
}
|
||||
|
||||
// Default: generic file card
|
||||
return editor.createShape({
|
||||
type: 'file-card',
|
||||
x: position.x,
|
||||
y: position.y,
|
||||
props: {
|
||||
fileId: file.id,
|
||||
name: file.originalName,
|
||||
size: file.size,
|
||||
mimeType: file.mimeType
|
||||
}
|
||||
}).id;
|
||||
}
|
||||
```
|
||||
|
||||
## Storage Considerations
|
||||
|
||||
### Size Limits & Recommendations
|
||||
|
||||
| File Type | Max Recommended | Notes |
|
||||
|-----------|----------------|-------|
|
||||
| Images | 20MB each | Larger images get resized |
|
||||
| PDFs | 50MB each | Text extracted for search |
|
||||
| Videos | 100MB each | Store reference, thumbnail only |
|
||||
| Audio | 50MB each | Store with waveform preview |
|
||||
| Documents | 10MB each | Full content stored |
|
||||
|
||||
### Total Storage Budget
|
||||
|
||||
```typescript
|
||||
const STORAGE_CONFIG = {
|
||||
// Soft warning at 500MB
|
||||
warningThreshold: 500 * 1024 * 1024,
|
||||
|
||||
// Hard limit at 2GB (leaves room for other data)
|
||||
maxStorage: 2 * 1024 * 1024 * 1024,
|
||||
|
||||
// Auto-cleanup: remove thumbnails for files not accessed in 30 days
|
||||
thumbnailRetentionDays: 30
|
||||
};
|
||||
|
||||
async function checkStorageQuota(): Promise<{
|
||||
used: number;
|
||||
available: number;
|
||||
warning: boolean;
|
||||
}> {
|
||||
const estimate = await navigator.storage.estimate();
|
||||
const used = estimate.usage || 0;
|
||||
const quota = estimate.quota || 0;
|
||||
|
||||
return {
|
||||
used,
|
||||
available: Math.min(quota - used, STORAGE_CONFIG.maxStorage - used),
|
||||
warning: used > STORAGE_CONFIG.warningThreshold
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
## Privacy Features
|
||||
|
||||
### Per-File Privacy Controls
|
||||
|
||||
```typescript
|
||||
interface FilePrivacySettings {
|
||||
// Encryption is always on - this is about sharing
|
||||
localOnly: boolean; // Never leaves browser
|
||||
shareableToBoard: boolean; // Can be added to shared board
|
||||
includeInR2Backup: boolean; // Include in cloud backup
|
||||
|
||||
// Metadata privacy
|
||||
stripExif: boolean; // Remove location/camera data from images
|
||||
anonymizeFilename: boolean; // Use generated name instead of original
|
||||
}
|
||||
|
||||
const DEFAULT_PRIVACY: FilePrivacySettings = {
|
||||
localOnly: true,
|
||||
shareableToBoard: false,
|
||||
includeInR2Backup: true,
|
||||
stripExif: true,
|
||||
anonymizeFilename: false
|
||||
};
|
||||
```
|
||||
|
||||
### Sharing Flow
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ User drags local file onto shared board │
|
||||
├─────────────────────────────────────────────────────────────────┤
|
||||
│ │
|
||||
│ ⚠️ Share "meeting_notes.pdf" to this board? │
|
||||
│ │
|
||||
│ This file is currently private. Sharing it will: │
|
||||
│ • Make it visible to all board members │
|
||||
│ • Upload an encrypted copy to sync storage │
|
||||
│ • Keep the original encrypted on your device │
|
||||
│ │
|
||||
│ [Keep Private] [Share to Board] │
|
||||
│ │
|
||||
└─────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Implementation Checklist
|
||||
|
||||
### Phase 1: Core Upload
|
||||
- [ ] File drop zone component
|
||||
- [ ] File type detection
|
||||
- [ ] Image thumbnail generation
|
||||
- [ ] PDF text extraction & thumbnail
|
||||
- [ ] Encryption before storage
|
||||
- [ ] IndexedDB schema & storage
|
||||
|
||||
### Phase 2: File Management
|
||||
- [ ] File browser panel
|
||||
- [ ] Filter by type
|
||||
- [ ] Search within files
|
||||
- [ ] Delete files
|
||||
- [ ] Storage quota display
|
||||
|
||||
### Phase 3: Canvas Integration
|
||||
- [ ] Drag files to canvas
|
||||
- [ ] Image shape from file
|
||||
- [ ] PDF preview shape
|
||||
- [ ] Document/note shape
|
||||
- [ ] Generic file card shape
|
||||
|
||||
### Phase 4: Sharing & Backup
|
||||
- [ ] Share confirmation dialog
|
||||
- [ ] Upload to Automerge sync
|
||||
- [ ] Include in R2 backup
|
||||
- [ ] Privacy settings per file
|
||||
|
||||
## Related Documents
|
||||
|
||||
- [Google Data Sovereignty](./GOOGLE_DATA_SOVEREIGNTY.md) - Same encryption model for Google imports
|
||||
- [Offline Storage Feasibility](../OFFLINE_STORAGE_FEASIBILITY.md) - IndexedDB + Automerge foundation
|
||||
|
|
@ -0,0 +1,157 @@
|
|||
# Obsidian Vault Integration
|
||||
|
||||
This document describes the Obsidian vault integration feature that allows you to import and work with your Obsidian notes directly on the canvas.
|
||||
|
||||
## Features
|
||||
|
||||
- **Vault Import**: Load your local Obsidian vault using the File System Access API
|
||||
- **Searchable Interface**: Browse and search through all your obs_notes with real-time filtering
|
||||
- **Tag-based Filtering**: Filter obs_notes by tags for better organization
|
||||
- **Canvas Integration**: Drag obs_notes from the browser directly onto the canvas as rectangle shapes
|
||||
- **Rich ObsNote Display**: ObsNotes show title, content preview, tags, and metadata
|
||||
- **Markdown Rendering**: Support for basic markdown formatting in obs_note previews
|
||||
|
||||
## How to Use
|
||||
|
||||
### 1. Access the Obsidian Browser
|
||||
|
||||
You can access the Obsidian browser in multiple ways:
|
||||
|
||||
- **Toolbar Button**: Click the "Obsidian Note" button in the toolbar (file-text icon)
|
||||
- **Context Menu**: Right-click on the canvas and select "Open Obsidian Browser"
|
||||
- **Keyboard Shortcut**: Press `Alt+O` to open the browser
|
||||
- **Tool Selection**: Select the "Obsidian Note" tool from the toolbar or context menu
|
||||
|
||||
This will open the Obsidian Vault Browser overlay
|
||||
|
||||
### 2. Load Your Vault
|
||||
|
||||
The browser will attempt to use the File System Access API to let you select your Obsidian vault directory. If this isn't supported in your browser, it will fall back to demo data.
|
||||
|
||||
**Supported Browsers for File System Access API:**
|
||||
- Chrome 86+
|
||||
- Edge 86+
|
||||
- Opera 72+
|
||||
|
||||
### 3. Browse and Search ObsNotes
|
||||
|
||||
- **Search**: Use the search box to find obs_notes by title, content, or tags
|
||||
- **Filter by Tags**: Click on any tag to filter obs_notes by that tag
|
||||
- **Clear Filters**: Click "Clear Filters" to remove all active filters
|
||||
|
||||
### 4. Add ObsNotes to Canvas
|
||||
|
||||
- Click on any obs_note in the browser to add it to the canvas
|
||||
- The obs_note will appear as a rectangle shape at the center of your current view
|
||||
- You can move, resize, and style the obs_note shapes like any other canvas element
|
||||
|
||||
### 5. Keyboard Shortcuts
|
||||
|
||||
- **Alt+O**: Open Obsidian browser or select Obsidian Note tool
|
||||
- **Escape**: Close the Obsidian browser
|
||||
- **Enter**: Select the currently highlighted obs_note (when browsing)
|
||||
|
||||
## ObsNote Shape Features
|
||||
|
||||
### Display Options
|
||||
- **Title**: Shows the obs_note title at the top
|
||||
- **Content Preview**: Displays a formatted preview of the obs_note content
|
||||
- **Tags**: Shows up to 3 tags, with a "+N" indicator for additional tags
|
||||
- **Metadata**: Displays file path and link count
|
||||
|
||||
### Styling
|
||||
- **Background Color**: Customizable background color
|
||||
- **Text Color**: Customizable text color
|
||||
- **Preview Mode**: Toggle between preview and full content view
|
||||
|
||||
### Markdown Support
|
||||
The obs_note shapes support basic markdown formatting:
|
||||
- Headers (# ## ###)
|
||||
- Bold (**text**)
|
||||
- Italic (*text*)
|
||||
- Inline code (`code`)
|
||||
- Lists (- item, 1. item)
|
||||
- Wiki links ([[link]])
|
||||
- External links ([text](url))
|
||||
|
||||
## File Structure
|
||||
|
||||
```
|
||||
src/
|
||||
├── lib/
|
||||
│ └── obsidianImporter.ts # Core vault import logic
|
||||
├── shapes/
|
||||
│ └── NoteShapeUtil.tsx # Canvas shape for displaying notes
|
||||
├── tools/
|
||||
│ └── NoteTool.ts # Tool for creating note shapes
|
||||
├── components/
|
||||
│ ├── ObsidianVaultBrowser.tsx # Main browser interface
|
||||
│ └── ObsidianToolbarButton.tsx # Toolbar button component
|
||||
└── css/
|
||||
├── obsidian-browser.css # Browser styling
|
||||
└── obsidian-toolbar.css # Toolbar button styling
|
||||
```
|
||||
|
||||
## Technical Details
|
||||
|
||||
### ObsidianImporter Class
|
||||
|
||||
The `ObsidianImporter` class handles:
|
||||
- Reading markdown files from directories
|
||||
- Parsing frontmatter and metadata
|
||||
- Extracting tags, links, and other obs_note properties
|
||||
- Searching and filtering functionality
|
||||
|
||||
### ObsNoteShape Class
|
||||
|
||||
The `ObsNoteShape` class extends TLDraw's `BaseBoxShapeUtil` and provides:
|
||||
- Rich obs_note display with markdown rendering
|
||||
- Interactive preview/full content toggle
|
||||
- Customizable styling options
|
||||
- Integration with TLDraw's shape system
|
||||
|
||||
### File System Access
|
||||
|
||||
The integration uses the modern File System Access API when available, with graceful fallback to demo data for browsers that don't support it.
|
||||
|
||||
## Browser Compatibility
|
||||
|
||||
- **File System Access API**: Chrome 86+, Edge 86+, Opera 72+
|
||||
- **Fallback Mode**: All modern browsers (uses demo data)
|
||||
- **Canvas Rendering**: All browsers supported by TLDraw
|
||||
|
||||
## Future Enhancements
|
||||
|
||||
Potential improvements for future versions:
|
||||
- Real-time vault synchronization
|
||||
- Bidirectional editing (edit obs_notes on canvas, sync back to vault)
|
||||
- Advanced search with regex support
|
||||
- ObsNote linking and backlink visualization
|
||||
- Custom obs_note templates
|
||||
- Export canvas content back to Obsidian
|
||||
- Support for Obsidian plugins and custom CSS
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Vault Won't Load
|
||||
- Ensure you're using a supported browser
|
||||
- Check that the selected directory contains markdown files
|
||||
- Verify you have read permissions for the directory
|
||||
|
||||
### ObsNotes Not Displaying Correctly
|
||||
- Check that the markdown files are properly formatted
|
||||
- Ensure the files have `.md` extensions
|
||||
- Verify the obs_note content isn't corrupted
|
||||
|
||||
### Performance Issues
|
||||
- Large vaults may take time to load initially
|
||||
- Consider filtering by tags to reduce the number of displayed obs_notes
|
||||
- Use search to quickly find specific obs_notes
|
||||
|
||||
## Contributing
|
||||
|
||||
To extend the Obsidian integration:
|
||||
1. Add new features to the `ObsidianImporter` class
|
||||
2. Extend the `NoteShape` for new display options
|
||||
3. Update the `ObsidianVaultBrowser` for new UI features
|
||||
4. Add corresponding CSS styles for new components
|
||||
|
|
@ -0,0 +1,171 @@
|
|||
# Transcription Tool for Canvas
|
||||
|
||||
The Transcription Tool is a powerful feature that allows you to transcribe audio from participants in your Canvas sessions using the Web Speech API. This tool provides real-time speech-to-text conversion, making it easy to capture and document conversations, presentations, and discussions.
|
||||
|
||||
## Features
|
||||
|
||||
### 🎤 Real-time Transcription
|
||||
- Live speech-to-text conversion using the Web Speech API
|
||||
- Support for multiple languages including English, Spanish, French, German, and more
|
||||
- Continuous recording with interim and final results
|
||||
|
||||
### 🌐 Multi-language Support
|
||||
- **English (US/UK)**: Primary language support
|
||||
- **European Languages**: Spanish, French, German, Italian, Portuguese
|
||||
- **Asian Languages**: Japanese, Korean, Chinese (Simplified)
|
||||
- Easy language switching during recording sessions
|
||||
|
||||
### 👥 Participant Management
|
||||
- Automatic participant detection and tracking
|
||||
- Individual transcript tracking for each speaker
|
||||
- Visual indicators for speaking status
|
||||
|
||||
### 📝 Transcript Management
|
||||
- Real-time transcript display with auto-scroll
|
||||
- Clear transcript functionality
|
||||
- Download transcripts as text files
|
||||
- Persistent storage within the Canvas session
|
||||
|
||||
### ⚙️ Advanced Controls
|
||||
- Auto-scroll toggle for better reading experience
|
||||
- Recording start/stop controls
|
||||
- Error handling and status indicators
|
||||
- Microphone permission management
|
||||
|
||||
## How to Use
|
||||
|
||||
### 1. Adding the Tool to Your Canvas
|
||||
|
||||
1. In your Canvas session, look for the **Transcribe** tool in the toolbar
|
||||
2. Click on the Transcribe tool icon
|
||||
3. Click and drag on the canvas to create a transcription widget
|
||||
4. The widget will appear with default dimensions (400x300 pixels)
|
||||
|
||||
### 2. Starting a Recording Session
|
||||
|
||||
1. **Select Language**: Choose your preferred language from the dropdown menu
|
||||
2. **Enable Auto-scroll**: Check the auto-scroll checkbox for automatic scrolling
|
||||
3. **Start Recording**: Click the "🎤 Start Recording" button
|
||||
4. **Grant Permissions**: Allow microphone access when prompted by your browser
|
||||
|
||||
### 3. During Recording
|
||||
|
||||
- **Live Transcription**: See real-time text as people speak
|
||||
- **Participant Tracking**: Monitor who is speaking
|
||||
- **Status Indicators**: Red dot shows active recording
|
||||
- **Auto-scroll**: Transcript automatically scrolls to show latest content
|
||||
|
||||
### 4. Managing Your Transcript
|
||||
|
||||
- **Stop Recording**: Click "⏹️ Stop Recording" to end the session
|
||||
- **Clear Transcript**: Use "🗑️ Clear" to reset the transcript
|
||||
- **Download**: Click "💾 Download" to save as a text file
|
||||
|
||||
## Browser Compatibility
|
||||
|
||||
### ✅ Supported Browsers
|
||||
- **Chrome/Chromium**: Full support with `webkitSpeechRecognition`
|
||||
- **Edge (Chromium)**: Full support
|
||||
- **Safari**: Limited support (may require additional setup)
|
||||
|
||||
### ❌ Unsupported Browsers
|
||||
- **Firefox**: No native support for Web Speech API
|
||||
- **Internet Explorer**: No support
|
||||
|
||||
### 🔧 Recommended Setup
|
||||
For the best experience, use **Chrome** or **Chromium-based browsers** with:
|
||||
- Microphone access enabled
|
||||
- HTTPS connection (required for microphone access)
|
||||
- Stable internet connection
|
||||
|
||||
## Technical Details
|
||||
|
||||
### Web Speech API Integration
|
||||
The tool uses the Web Speech API's `SpeechRecognition` interface:
|
||||
- **Continuous Mode**: Enables ongoing transcription
|
||||
- **Interim Results**: Shows partial results in real-time
|
||||
- **Language Detection**: Automatically adjusts to selected language
|
||||
- **Error Handling**: Graceful fallback for unsupported features
|
||||
|
||||
### Audio Processing
|
||||
- **Microphone Access**: Secure microphone permission handling
|
||||
- **Audio Stream Management**: Proper cleanup of audio resources
|
||||
- **Quality Optimization**: Optimized for voice recognition
|
||||
|
||||
### Data Persistence
|
||||
- **Session Storage**: Transcripts persist during the Canvas session
|
||||
- **Shape Properties**: All settings and data stored in the Canvas shape
|
||||
- **Real-time Updates**: Changes sync across all participants
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
#### "Speech recognition not supported in this browser"
|
||||
- **Solution**: Use Chrome or a Chromium-based browser
|
||||
- **Alternative**: Check if you're using the latest browser version
|
||||
|
||||
#### "Unable to access microphone"
|
||||
- **Solution**: Check browser permissions for microphone access
|
||||
- **Alternative**: Ensure you're on an HTTPS connection
|
||||
|
||||
#### Poor transcription quality
|
||||
- **Solutions**:
|
||||
- Speak clearly and at a moderate pace
|
||||
- Reduce background noise
|
||||
- Ensure good microphone positioning
|
||||
- Check internet connection stability
|
||||
|
||||
#### Language not working correctly
|
||||
- **Solution**: Verify the selected language matches the spoken language
|
||||
- **Alternative**: Try restarting the recording session
|
||||
|
||||
### Performance Tips
|
||||
|
||||
1. **Close unnecessary tabs** to free up system resources
|
||||
2. **Use a good quality microphone** for better accuracy
|
||||
3. **Minimize background noise** in your environment
|
||||
4. **Speak at a natural pace** - not too fast or slow
|
||||
5. **Ensure stable internet connection** for optimal performance
|
||||
|
||||
## Future Enhancements
|
||||
|
||||
### Planned Features
|
||||
- **Speaker Identification**: Advanced voice recognition for multiple speakers
|
||||
- **Export Formats**: Support for PDF, Word, and other document formats
|
||||
- **Real-time Translation**: Multi-language translation capabilities
|
||||
- **Voice Commands**: Canvas control through voice commands
|
||||
- **Cloud Storage**: Automatic transcript backup and sharing
|
||||
|
||||
### Integration Possibilities
|
||||
- **Daily.co Integration**: Enhanced participant detection from video sessions
|
||||
- **AI Enhancement**: Improved accuracy using machine learning
|
||||
- **Collaborative Editing**: Real-time transcript editing by multiple users
|
||||
- **Search and Indexing**: Full-text search within transcripts
|
||||
|
||||
## Support and Feedback
|
||||
|
||||
If you encounter issues or have suggestions for improvements:
|
||||
|
||||
1. **Check Browser Compatibility**: Ensure you're using a supported browser
|
||||
2. **Review Permissions**: Verify microphone access is granted
|
||||
3. **Check Network**: Ensure stable internet connection
|
||||
4. **Report Issues**: Contact the development team with detailed error information
|
||||
|
||||
## Privacy and Security
|
||||
|
||||
### Data Handling
|
||||
- **Local Processing**: Speech recognition happens locally in your browser
|
||||
- **No Cloud Storage**: Transcripts are not automatically uploaded to external services
|
||||
- **Session Privacy**: Data is only shared within your Canvas session
|
||||
- **User Control**: You control when and what to record
|
||||
|
||||
### Best Practices
|
||||
- **Inform Participants**: Let others know when recording
|
||||
- **Respect Privacy**: Don't record sensitive or confidential information
|
||||
- **Secure Sharing**: Be careful when sharing transcript files
|
||||
- **Regular Cleanup**: Clear transcripts when no longer needed
|
||||
|
||||
---
|
||||
|
||||
*The Transcription Tool is designed to enhance collaboration and documentation in Canvas sessions. Use it responsibly and respect the privacy of all participants.*
|
||||
|
|
@ -0,0 +1,304 @@
|
|||
# WebCryptoAPI Authentication Implementation
|
||||
|
||||
This document describes the complete WebCryptoAPI authentication system implemented in this project.
|
||||
|
||||
## Overview
|
||||
|
||||
The WebCryptoAPI authentication system provides cryptographic authentication using ECDSA P-256 key pairs, challenge-response authentication, and secure key storage. This is the primary authentication mechanism for the application.
|
||||
|
||||
## Architecture
|
||||
|
||||
### Core Components
|
||||
|
||||
1. **Crypto Module** (`src/lib/auth/crypto.ts`)
|
||||
- WebCryptoAPI wrapper functions
|
||||
- Key pair generation (ECDSA P-256)
|
||||
- Public key export/import
|
||||
- Data signing and verification
|
||||
- User credential storage
|
||||
|
||||
2. **CryptoAuthService** (`src/lib/auth/cryptoAuthService.ts`)
|
||||
- High-level authentication service
|
||||
- Challenge-response authentication
|
||||
- User registration and login
|
||||
- Credential verification
|
||||
|
||||
3. **AuthService** (`src/lib/auth/authService.ts`)
|
||||
- Simplified authentication service
|
||||
- Session management
|
||||
- Integration with CryptoAuthService
|
||||
|
||||
4. **UI Components**
|
||||
- `CryptID.tsx` - Cryptographic authentication UI
|
||||
- `CryptoDebug.tsx` - Debug component for verification
|
||||
- `CryptoTest.tsx` - Test component for verification
|
||||
|
||||
## Features
|
||||
|
||||
### ✅ Implemented
|
||||
|
||||
- **ECDSA P-256 Key Pairs**: Secure cryptographic key generation
|
||||
- **Challenge-Response Authentication**: Prevents replay attacks
|
||||
- **Public Key Infrastructure**: Store and verify public keys
|
||||
- **Browser Support Detection**: Checks for WebCryptoAPI availability
|
||||
- **Secure Context Validation**: Ensures HTTPS requirement
|
||||
- **Modern UI**: Responsive design with dark mode support
|
||||
- **Comprehensive Testing**: Test component for verification
|
||||
|
||||
### 🔧 Technical Details
|
||||
|
||||
#### Key Generation
|
||||
```typescript
|
||||
const keyPair = await crypto.generateKeyPair();
|
||||
// Returns CryptoKeyPair with public and private keys
|
||||
```
|
||||
|
||||
#### Public Key Export/Import
|
||||
```typescript
|
||||
const publicKeyBase64 = await crypto.exportPublicKey(keyPair.publicKey);
|
||||
const importedKey = await crypto.importPublicKey(publicKeyBase64);
|
||||
```
|
||||
|
||||
#### Data Signing and Verification
|
||||
```typescript
|
||||
const signature = await crypto.signData(privateKey, data);
|
||||
const isValid = await crypto.verifySignature(publicKey, signature, data);
|
||||
```
|
||||
|
||||
#### Challenge-Response Authentication
|
||||
```typescript
|
||||
// Generate challenge
|
||||
const challenge = `${username}:${timestamp}:${random}`;
|
||||
|
||||
// Sign challenge during registration
|
||||
const signature = await crypto.signData(privateKey, challenge);
|
||||
|
||||
// Verify during login
|
||||
const isValid = await crypto.verifySignature(publicKey, signature, challenge);
|
||||
```
|
||||
|
||||
## Browser Requirements
|
||||
|
||||
### Minimum Requirements
|
||||
- **WebCryptoAPI Support**: `window.crypto.subtle`
|
||||
- **Secure Context**: HTTPS or localhost
|
||||
- **Modern Browser**: Chrome 37+, Firefox 34+, Safari 11+, Edge 12+
|
||||
|
||||
### Feature Detection
|
||||
```typescript
|
||||
const hasWebCrypto = typeof window.crypto !== 'undefined' &&
|
||||
typeof window.crypto.subtle !== 'undefined';
|
||||
const isSecure = window.isSecureContext;
|
||||
```
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### ✅ Implemented Security Measures
|
||||
|
||||
1. **Secure Context Requirement**: Only works over HTTPS
|
||||
2. **ECDSA P-256**: Industry-standard elliptic curve
|
||||
3. **Challenge-Response**: Prevents replay attacks
|
||||
4. **Key Storage**: Public keys stored securely in localStorage
|
||||
5. **Input Validation**: Username format validation
|
||||
6. **Error Handling**: Comprehensive error management
|
||||
|
||||
### ⚠️ Security Notes
|
||||
|
||||
1. **Private Key Storage**: Currently uses localStorage for demo purposes
|
||||
- In production, consider using Web Crypto API's non-extractable keys
|
||||
- Consider hardware security modules (HSM)
|
||||
- Implement proper key derivation
|
||||
|
||||
2. **Session Management**:
|
||||
- Uses localStorage for session persistence
|
||||
- Consider implementing JWT tokens for server-side verification
|
||||
- Add session expiration and refresh logic
|
||||
|
||||
3. **Network Security**:
|
||||
- All crypto operations happen client-side
|
||||
- No private keys transmitted over network
|
||||
- Consider adding server-side signature verification
|
||||
|
||||
## Usage
|
||||
|
||||
### Basic Authentication Flow
|
||||
|
||||
```typescript
|
||||
import { CryptoAuthService } from './lib/auth/cryptoAuthService';
|
||||
|
||||
// Register a new user
|
||||
const registerResult = await CryptoAuthService.register('username');
|
||||
if (registerResult.success) {
|
||||
console.log('User registered successfully');
|
||||
}
|
||||
|
||||
// Login with existing user
|
||||
const loginResult = await CryptoAuthService.login('username');
|
||||
if (loginResult.success) {
|
||||
console.log('User authenticated successfully');
|
||||
}
|
||||
```
|
||||
|
||||
### Integration with React Context
|
||||
|
||||
```typescript
|
||||
import { useAuth } from './context/AuthContext';
|
||||
|
||||
const { login, register } = useAuth();
|
||||
|
||||
// AuthService automatically uses crypto auth
|
||||
const success = await login('username');
|
||||
```
|
||||
|
||||
### Using the CryptID Component
|
||||
|
||||
```typescript
|
||||
import CryptID from './components/auth/CryptID';
|
||||
|
||||
// Render the authentication component
|
||||
<CryptID
|
||||
onSuccess={() => console.log('Login successful')}
|
||||
onCancel={() => console.log('Login cancelled')}
|
||||
/>
|
||||
```
|
||||
|
||||
### Testing the Implementation
|
||||
|
||||
```typescript
|
||||
import CryptoTest from './components/auth/CryptoTest';
|
||||
|
||||
// Render the test component to verify functionality
|
||||
<CryptoTest />
|
||||
```
|
||||
|
||||
## File Structure
|
||||
|
||||
```
|
||||
src/
|
||||
├── lib/
|
||||
│ ├── auth/
|
||||
│ │ ├── crypto.ts # WebCryptoAPI wrapper
|
||||
│ │ ├── cryptoAuthService.ts # High-level auth service
|
||||
│ │ ├── authService.ts # Simplified auth service
|
||||
│ │ ├── sessionPersistence.ts # Session storage utilities
|
||||
│ │ └── types.ts # TypeScript types
|
||||
│ └── utils/
|
||||
│ └── browser.ts # Browser support detection
|
||||
├── components/
|
||||
│ └── auth/
|
||||
│ ├── CryptID.tsx # Main crypto auth UI
|
||||
│ ├── CryptoDebug.tsx # Debug component
|
||||
│ └── CryptoTest.tsx # Test component
|
||||
├── context/
|
||||
│ └── AuthContext.tsx # React context for auth state
|
||||
└── css/
|
||||
└── crypto-auth.css # Styles for crypto components
|
||||
```
|
||||
|
||||
## Dependencies
|
||||
|
||||
### Required Packages
|
||||
- `one-webcrypto`: WebCryptoAPI polyfill (^1.0.3)
|
||||
|
||||
### Browser APIs Used
|
||||
- `window.crypto.subtle`: WebCryptoAPI
|
||||
- `window.localStorage`: Key and session storage
|
||||
- `window.isSecureContext`: Security context check
|
||||
|
||||
## Storage
|
||||
|
||||
### localStorage Keys Used
|
||||
- `registeredUsers`: Array of registered usernames
|
||||
- `${username}_publicKey`: User's public key (Base64)
|
||||
- `${username}_authData`: Authentication data (challenge, signature, timestamp)
|
||||
- `session`: Current user session data
|
||||
|
||||
## Testing
|
||||
|
||||
### Manual Testing
|
||||
1. Navigate to the application
|
||||
2. Use the `CryptoTest` component to run automated tests
|
||||
3. Verify all test cases pass
|
||||
4. Test on different browsers and devices
|
||||
|
||||
### Test Cases
|
||||
- [x] Browser support detection
|
||||
- [x] Secure context validation
|
||||
- [x] Key pair generation
|
||||
- [x] Public key export/import
|
||||
- [x] Data signing and verification
|
||||
- [x] User registration
|
||||
- [x] User login
|
||||
- [x] Credential verification
|
||||
- [x] Session persistence
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
1. **"Browser not supported"**
|
||||
- Ensure you're using a modern browser
|
||||
- Check if WebCryptoAPI is available
|
||||
- Verify HTTPS or localhost
|
||||
|
||||
2. **"Secure context required"**
|
||||
- Access the application over HTTPS
|
||||
- For development, use localhost
|
||||
|
||||
3. **"Key generation failed"**
|
||||
- Check browser console for errors
|
||||
- Verify WebCryptoAPI permissions
|
||||
- Try refreshing the page
|
||||
|
||||
4. **"Authentication failed"**
|
||||
- Verify user exists in localStorage
|
||||
- Check stored credentials
|
||||
- Clear browser data and retry
|
||||
|
||||
### Debug Mode
|
||||
|
||||
Enable debug logging by opening the browser console:
|
||||
```typescript
|
||||
localStorage.setItem('debug_crypto', 'true');
|
||||
```
|
||||
|
||||
## Future Enhancements
|
||||
|
||||
### Planned Improvements
|
||||
1. **Enhanced Key Storage**: Use Web Crypto API's non-extractable keys
|
||||
2. **Server-Side Verification**: Add server-side signature verification
|
||||
3. **Multi-Factor Authentication**: Add additional authentication factors
|
||||
4. **Key Rotation**: Implement automatic key rotation
|
||||
5. **Hardware Security**: Support for hardware security modules
|
||||
|
||||
### Advanced Features
|
||||
1. **Zero-Knowledge Proofs**: Implement ZKP for enhanced privacy
|
||||
2. **Threshold Cryptography**: Distributed key management
|
||||
3. **Post-Quantum Cryptography**: Prepare for quantum threats
|
||||
4. **Biometric Integration**: Add biometric authentication
|
||||
|
||||
## Integration with Automerge Sync
|
||||
|
||||
The authentication system works seamlessly with the Automerge-based real-time collaboration:
|
||||
|
||||
- **User Identification**: Each user is identified by their username in Automerge
|
||||
- **Session Management**: Sessions persist across page reloads via localStorage
|
||||
- **Collaboration**: Authenticated users can join shared canvas rooms
|
||||
- **Privacy**: Only authenticated users can access canvas data
|
||||
|
||||
## Contributing
|
||||
|
||||
When contributing to the WebCryptoAPI authentication system:
|
||||
|
||||
1. **Security First**: All changes must maintain security standards
|
||||
2. **Test Thoroughly**: Run the test suite before submitting
|
||||
3. **Document Changes**: Update this documentation
|
||||
4. **Browser Compatibility**: Test on multiple browsers
|
||||
5. **Performance**: Ensure crypto operations don't block UI
|
||||
|
||||
## References
|
||||
|
||||
- [WebCryptoAPI Specification](https://www.w3.org/TR/WebCryptoAPI/)
|
||||
- [ECDSA Algorithm](https://en.wikipedia.org/wiki/Elliptic_Curve_Digital_Signature_Algorithm)
|
||||
- [P-256 Curve](https://en.wikipedia.org/wiki/NIST_Curve_P-256)
|
||||
- [Challenge-Response Authentication](https://en.wikipedia.org/wiki/Challenge%E2%80%93response_authentication)
|
||||
|
|
@ -0,0 +1,125 @@
|
|||
# GitHub Integration Setup for Quartz Sync
|
||||
|
||||
## Quick Setup Guide
|
||||
|
||||
### 1. Create GitHub Personal Access Token
|
||||
|
||||
1. Go to: https://github.com/settings/tokens
|
||||
2. Click "Generate new token" → "Generate new token (classic)"
|
||||
3. Configure:
|
||||
- **Note:** "Canvas Website Quartz Sync"
|
||||
- **Expiration:** 90 days (or your preference)
|
||||
- **Scopes:**
|
||||
- ✅ `repo` (Full control of private repositories)
|
||||
- ✅ `workflow` (Update GitHub Action workflows)
|
||||
4. Click "Generate token" and **copy it immediately**
|
||||
|
||||
### 2. Set Up Your Quartz Repository
|
||||
|
||||
For the Jeff-Emmett/quartz repository, you can either:
|
||||
|
||||
**Option A: Use the existing Jeff-Emmett/quartz repository**
|
||||
- Fork the repository to your GitHub account
|
||||
- Clone your fork locally
|
||||
- Set up the environment variables to point to your fork
|
||||
|
||||
**Option B: Create a new Quartz repository**
|
||||
```bash
|
||||
# Create a new Quartz site
|
||||
git clone https://github.com/jackyzha0/quartz.git your-quartz-site
|
||||
cd your-quartz-site
|
||||
npm install
|
||||
npx quartz create
|
||||
|
||||
# Push to GitHub
|
||||
git add .
|
||||
git commit -m "Initial Quartz setup"
|
||||
git remote add origin https://github.com/your-username/your-quartz-repo.git
|
||||
git push -u origin main
|
||||
```
|
||||
|
||||
### 3. Configure Environment Variables
|
||||
|
||||
Create a `.env.local` file in your project root:
|
||||
|
||||
```bash
|
||||
# GitHub Integration for Quartz Sync
|
||||
NEXT_PUBLIC_GITHUB_TOKEN=your_github_token_here
|
||||
NEXT_PUBLIC_QUARTZ_REPO=Jeff-Emmett/quartz
|
||||
NEXT_PUBLIC_QUARTZ_BRANCH=main
|
||||
```
|
||||
|
||||
### 4. Enable GitHub Pages
|
||||
|
||||
1. Go to your repository → Settings → Pages
|
||||
2. Source: "GitHub Actions"
|
||||
3. This will automatically deploy your Quartz site when you push changes
|
||||
|
||||
### 5. Test the Integration
|
||||
|
||||
1. Start your development server: `npm run dev`
|
||||
2. Import some Obsidian notes or create new ones
|
||||
3. Edit a note and click "Sync Updates"
|
||||
4. Check your GitHub repository - you should see new/updated files in the `content/` directory
|
||||
5. Your Quartz site should automatically rebuild and show the changes
|
||||
|
||||
## How It Works
|
||||
|
||||
1. **When you sync a note:**
|
||||
- The system creates/updates a Markdown file in your GitHub repository
|
||||
- File is placed in the `content/` directory with proper frontmatter
|
||||
- GitHub Actions automatically rebuilds and deploys your Quartz site
|
||||
|
||||
2. **File structure in your repository:**
|
||||
```
|
||||
your-quartz-repo/
|
||||
├── content/
|
||||
│ ├── note-1.md
|
||||
│ ├── note-2.md
|
||||
│ └── ...
|
||||
├── .github/workflows/
|
||||
│ └── quartz-sync.yml
|
||||
└── ...
|
||||
```
|
||||
|
||||
3. **Automatic deployment:**
|
||||
- Changes trigger GitHub Actions workflow
|
||||
- Quartz site rebuilds automatically
|
||||
- Changes appear on your live site within minutes
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
1. **"GitHub API error: 401 Unauthorized"**
|
||||
- Check your GitHub token is correct
|
||||
- Verify the token has `repo` permissions
|
||||
|
||||
2. **"Repository not found"**
|
||||
- Check the repository name format: `username/repo-name`
|
||||
- Ensure the repository exists and is accessible
|
||||
|
||||
3. **"Sync successful but no changes on site"**
|
||||
- Check GitHub Actions tab for workflow status
|
||||
- Verify GitHub Pages is enabled
|
||||
- Wait a few minutes for the build to complete
|
||||
|
||||
### Debug Mode
|
||||
|
||||
Check the browser console for detailed sync logs:
|
||||
- Look for "✅ Successfully synced to Quartz!" messages
|
||||
- Check for any error messages in red
|
||||
|
||||
## Security Notes
|
||||
|
||||
- Never commit your `.env.local` file to version control
|
||||
- Use fine-grained tokens with minimal required permissions
|
||||
- Regularly rotate your GitHub tokens
|
||||
|
||||
## Next Steps
|
||||
|
||||
Once set up, you can:
|
||||
- Edit notes directly in the canvas
|
||||
- Sync changes to your Quartz site
|
||||
- Share your live Quartz site with others
|
||||
- Use GitHub's version control for your notes
|
||||
|
|
@ -0,0 +1,44 @@
|
|||
<!DOCTYPE html>
|
||||
<html>
|
||||
|
||||
<head>
|
||||
<title>Jeff Emmett</title>
|
||||
<meta charset="UTF-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no">
|
||||
<meta http-equiv="Permissions-Policy" content="midi=*, microphone=*, camera=*, autoplay=*">
|
||||
<link rel="preconnect" href="https://fonts.googleapis.com" />
|
||||
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin />
|
||||
<link
|
||||
href="https://fonts.googleapis.com/css2?family=Recursive:slnt,wght,CASL,CRSV,MONO@-15..0,300..1000,0..1,0..1,0..1&display=swap"
|
||||
rel="stylesheet">
|
||||
|
||||
<!-- Social Meta Tags -->
|
||||
<meta name="description"
|
||||
content="My research investigates the intersection of computing, human-system interfaces, and emancipatory politics. I am interested in the potential of computing as a medium for thought, as a tool for collective action, and as a means of emancipation.">
|
||||
|
||||
<meta property="og:url" content="https://jeffemmett.com">
|
||||
<meta property="og:type" content="website">
|
||||
<meta property="og:title" content="Jeff Emmett">
|
||||
<meta property="og:description"
|
||||
content="My research doesn't investigate the intersection of computing, human-system interfaces, and emancipatory politics. I am interested in the potential of computing as a medium for thought, as a tool for collective action, and as a means of emancipation.">
|
||||
<meta property="og:image" content="/website-embed.png">
|
||||
|
||||
<meta name="twitter:card" content="summary_large_image">
|
||||
<meta property="twitter:domain" content="jeffemmett.com">
|
||||
<meta property="twitter:url" content="https://jeffemmett.com">
|
||||
<meta name="twitter:title" content="Jeff Emmett">
|
||||
<meta name="twitter:description"
|
||||
content="My research doesn't investigate the intersection of computing, human-system interfaces, and emancipatory politics. I am interested in the potential of computing as a medium for thought, as a tool for collective action, and as a means of emancipation.">
|
||||
<meta name="twitter:image" content="/website-embed.png">
|
||||
|
||||
<!-- Analytics -->
|
||||
<script data-goatcounter="https://jeff.goatcounter.com/count" async src="//gc.zgo.at/count.js"></script>
|
||||
<meta name="mobile-web-app-capable" content="yes">
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<div id="root"></div>
|
||||
<script type="module" src="/src/App.tsx"></script>
|
||||
</body>
|
||||
|
||||
</html>
|
||||
|
|
@ -0,0 +1,8 @@
|
|||
node_modules
|
||||
packages/*/node_modules
|
||||
packages/*/dist
|
||||
*.log
|
||||
.git
|
||||
.gitignore
|
||||
README.md
|
||||
infrastructure/
|
||||
|
|
@ -0,0 +1,35 @@
|
|||
# Dependencies
|
||||
node_modules/
|
||||
package-lock.json
|
||||
|
||||
# Build outputs
|
||||
dist/
|
||||
*.tsbuildinfo
|
||||
|
||||
# Logs
|
||||
logs/
|
||||
*.log
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
pm2.log
|
||||
|
||||
# Environment variables
|
||||
.env
|
||||
.env.local
|
||||
.env.*.local
|
||||
|
||||
# IDE
|
||||
.vscode/
|
||||
.idea/
|
||||
*.swp
|
||||
*.swo
|
||||
*~
|
||||
|
||||
# OS
|
||||
.DS_Store
|
||||
Thumbs.db
|
||||
|
||||
# PM2
|
||||
ecosystem.config.js
|
||||
.pm2/
|
||||
|
|
@ -0,0 +1,32 @@
|
|||
# mulTmux Server Dockerfile
|
||||
FROM node:20-slim
|
||||
|
||||
# Install tmux and build dependencies for node-pty
|
||||
RUN apt-get update && apt-get install -y \
|
||||
tmux \
|
||||
python3 \
|
||||
make \
|
||||
g++ \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy workspace root files
|
||||
COPY package.json ./
|
||||
COPY tsconfig.json ./
|
||||
|
||||
# Copy packages
|
||||
COPY packages/server ./packages/server
|
||||
COPY packages/cli ./packages/cli
|
||||
|
||||
# Install dependencies (including node-pty native compilation)
|
||||
RUN npm install --workspaces
|
||||
|
||||
# Build TypeScript
|
||||
RUN npm run build
|
||||
|
||||
# Expose port
|
||||
EXPOSE 3002
|
||||
|
||||
# Run the server
|
||||
CMD ["node", "packages/server/dist/index.js"]
|
||||
|
|
@ -0,0 +1,240 @@
|
|||
# mulTmux
|
||||
|
||||
A collaborative terminal tool that lets multiple users interact with the same tmux session in real-time.
|
||||
|
||||
## Features
|
||||
|
||||
- **Real-time Collaboration**: Multiple users can connect to the same terminal session
|
||||
- **tmux Backend**: Leverages tmux for robust terminal multiplexing
|
||||
- **Token-based Auth**: Secure invite links with expiration
|
||||
- **Presence Indicators**: See who's connected to your session
|
||||
- **Low Resource Usage**: ~200-300MB RAM for typical usage
|
||||
- **Easy Deployment**: Works alongside existing services on your server
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
┌─────────────┐ ┌──────────────────┐
|
||||
│ Client │ ──── WebSocket ────────> │ Server │
|
||||
│ (CLI) │ (token auth) │ │
|
||||
└─────────────┘ │ ┌────────────┐ │
|
||||
│ │ Node.js │ │
|
||||
┌─────────────┐ │ │ Backend │ │
|
||||
│ Client 2 │ ──── Invite Link ──────> │ └─────┬──────┘ │
|
||||
│ (CLI) │ │ │ │
|
||||
└─────────────┘ │ ┌─────▼──────┐ │
|
||||
│ │ tmux │ │
|
||||
│ │ Sessions │ │
|
||||
│ └────────────┘ │
|
||||
└──────────────────┘
|
||||
```
|
||||
|
||||
## Installation
|
||||
|
||||
### Server Setup
|
||||
|
||||
1. **Deploy to your AI server:**
|
||||
```bash
|
||||
cd multmux
|
||||
chmod +x infrastructure/deploy.sh
|
||||
./infrastructure/deploy.sh
|
||||
```
|
||||
|
||||
This will:
|
||||
- Install tmux if needed
|
||||
- Build the server
|
||||
- Set up PM2 for process management
|
||||
- Start the server
|
||||
|
||||
2. **(Optional) Set up nginx reverse proxy:**
|
||||
```bash
|
||||
sudo cp infrastructure/nginx.conf /etc/nginx/sites-available/multmux
|
||||
sudo ln -s /etc/nginx/sites-available/multmux /etc/nginx/sites-enabled/
|
||||
# Edit the file to set your domain
|
||||
sudo nano /etc/nginx/sites-available/multmux
|
||||
sudo nginx -t
|
||||
sudo systemctl reload nginx
|
||||
```
|
||||
|
||||
### CLI Installation
|
||||
|
||||
**On your local machine:**
|
||||
```bash
|
||||
cd multmux/packages/cli
|
||||
npm install
|
||||
npm run build
|
||||
npm link # Installs 'multmux' command globally
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
### Create a Session
|
||||
|
||||
```bash
|
||||
multmux create my-project --repo /path/to/repo
|
||||
```
|
||||
|
||||
This outputs an invite link like:
|
||||
```
|
||||
multmux join a1b2c3d4e5f6g7h8i9j0k1l2m3n4o5p6
|
||||
```
|
||||
|
||||
### Join a Session
|
||||
|
||||
```bash
|
||||
multmux join a1b2c3d4e5f6g7h8i9j0k1l2m3n4o5p6
|
||||
```
|
||||
|
||||
### List Active Sessions
|
||||
|
||||
```bash
|
||||
multmux list
|
||||
```
|
||||
|
||||
### Using a Remote Server
|
||||
|
||||
If your server is on a different machine:
|
||||
|
||||
```bash
|
||||
# Create session
|
||||
multmux create my-project --server http://your-server:3000
|
||||
|
||||
# Join session
|
||||
multmux join <token> --server ws://your-server:3001
|
||||
```
|
||||
|
||||
## CLI Commands
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `multmux create <name>` | Create a new collaborative session |
|
||||
| `multmux join <token>` | Join an existing session |
|
||||
| `multmux list` | List all active sessions |
|
||||
|
||||
### Options
|
||||
|
||||
**create:**
|
||||
- `-s, --server <url>` - Server URL (default: http://localhost:3000)
|
||||
- `-r, --repo <path>` - Repository path to cd into
|
||||
|
||||
**join:**
|
||||
- `-s, --server <url>` - WebSocket server URL (default: ws://localhost:3001)
|
||||
|
||||
**list:**
|
||||
- `-s, --server <url>` - Server URL (default: http://localhost:3000)
|
||||
|
||||
## Server Management
|
||||
|
||||
### PM2 Commands
|
||||
|
||||
```bash
|
||||
pm2 status # Check server status
|
||||
pm2 logs multmux-server # View server logs
|
||||
pm2 restart multmux-server # Restart server
|
||||
pm2 stop multmux-server # Stop server
|
||||
```
|
||||
|
||||
### Resource Usage
|
||||
|
||||
- **Idle**: ~100-150MB RAM
|
||||
- **Per session**: ~5-10MB RAM
|
||||
- **Per user**: ~1-2MB RAM
|
||||
- **Typical usage**: 200-300MB RAM total
|
||||
|
||||
## API Reference
|
||||
|
||||
### HTTP API (default: port 3000)
|
||||
|
||||
| Endpoint | Method | Description |
|
||||
|----------|--------|-------------|
|
||||
| `/api/sessions` | POST | Create a new session |
|
||||
| `/api/sessions` | GET | List active sessions |
|
||||
| `/api/sessions/:id` | GET | Get session info |
|
||||
| `/api/sessions/:id/tokens` | POST | Generate new invite token |
|
||||
| `/api/health` | GET | Health check |
|
||||
|
||||
### WebSocket (default: port 3001)
|
||||
|
||||
Connect with: `ws://localhost:3001?token=<your-token>`
|
||||
|
||||
**Message Types:**
|
||||
- `output` - Terminal output from server
|
||||
- `input` - User input to terminal
|
||||
- `resize` - Terminal resize event
|
||||
- `presence` - User join/leave notifications
|
||||
- `joined` - Connection confirmation
|
||||
|
||||
## Security
|
||||
|
||||
- **Token Expiration**: Invite tokens expire after 60 minutes (configurable)
|
||||
- **Session Isolation**: Each session runs in its own tmux instance
|
||||
- **Input Validation**: All terminal input is validated
|
||||
- **No Persistence**: Sessions are destroyed when all users leave
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Server won't start
|
||||
|
||||
Check if ports are available:
|
||||
```bash
|
||||
netstat -tlnp | grep -E '3000|3001'
|
||||
```
|
||||
|
||||
### Can't connect to server
|
||||
|
||||
1. Check server is running: `pm2 status`
|
||||
2. Check logs: `pm2 logs multmux-server`
|
||||
3. Verify firewall allows ports 3000 and 3001
|
||||
|
||||
### Terminal not responding
|
||||
|
||||
1. Check WebSocket connection in browser console
|
||||
2. Verify token hasn't expired
|
||||
3. Restart session: `pm2 restart multmux-server`
|
||||
|
||||
## Development
|
||||
|
||||
### Project Structure
|
||||
|
||||
```
|
||||
multmux/
|
||||
├── packages/
|
||||
│ ├── server/ # Backend server
|
||||
│ │ ├── src/
|
||||
│ │ │ ├── managers/ # Session & token management
|
||||
│ │ │ ├── websocket/ # WebSocket handler
|
||||
│ │ │ └── api/ # HTTP routes
|
||||
│ └── cli/ # CLI client
|
||||
│ ├── src/
|
||||
│ │ ├── commands/ # CLI commands
|
||||
│ │ ├── connection/ # WebSocket client
|
||||
│ │ └── ui/ # Terminal UI
|
||||
└── infrastructure/ # Deployment scripts
|
||||
```
|
||||
|
||||
### Running in Development
|
||||
|
||||
**Terminal 1 - Server:**
|
||||
```bash
|
||||
npm run dev:server
|
||||
```
|
||||
|
||||
**Terminal 2 - CLI:**
|
||||
```bash
|
||||
cd packages/cli
|
||||
npm run dev -- create test-session
|
||||
```
|
||||
|
||||
### Building
|
||||
|
||||
```bash
|
||||
npm run build # Builds both packages
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
MIT
|
||||
|
||||
## Contributing
|
||||
|
||||
Contributions welcome! Please open an issue or PR.
|
||||
|
|
@ -0,0 +1,33 @@
|
|||
version: '3.8'
|
||||
|
||||
services:
|
||||
multmux:
|
||||
build: .
|
||||
container_name: multmux-server
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- NODE_ENV=production
|
||||
- PORT=3002
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
# HTTP router
|
||||
- "traefik.http.routers.multmux.rule=Host(`terminal.jeffemmett.com`)"
|
||||
- "traefik.http.routers.multmux.entrypoints=web"
|
||||
- "traefik.http.services.multmux.loadbalancer.server.port=3002"
|
||||
# WebSocket support - Traefik handles this automatically for HTTP/1.1 upgrades
|
||||
# Enable sticky sessions for WebSocket connections
|
||||
- "traefik.http.services.multmux.loadbalancer.sticky.cookie=true"
|
||||
- "traefik.http.services.multmux.loadbalancer.sticky.cookie.name=multmux_session"
|
||||
networks:
|
||||
- traefik-public
|
||||
# Health check
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:3002/api/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 10s
|
||||
|
||||
networks:
|
||||
traefik-public:
|
||||
external: true
|
||||
|
|
@ -0,0 +1,91 @@
|
|||
#!/bin/bash
|
||||
|
||||
# mulTmux Deployment Script for AI Server
|
||||
# This script sets up mulTmux on your existing droplet
|
||||
|
||||
set -e
|
||||
|
||||
echo "🚀 mulTmux Deployment Script"
|
||||
echo "============================"
|
||||
echo ""
|
||||
|
||||
# Check if tmux is installed
|
||||
if ! command -v tmux &> /dev/null; then
|
||||
echo "📦 Installing tmux..."
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y tmux
|
||||
else
|
||||
echo "✅ tmux is already installed"
|
||||
fi
|
||||
|
||||
# Check if Node.js is installed
|
||||
if ! command -v node &> /dev/null; then
|
||||
echo "📦 Installing Node.js..."
|
||||
curl -fsSL https://deb.nodesource.com/setup_20.x | sudo -E bash -
|
||||
sudo apt-get install -y nodejs
|
||||
else
|
||||
echo "✅ Node.js is already installed ($(node --version))"
|
||||
fi
|
||||
|
||||
# Check if npm is installed
|
||||
if ! command -v npm &> /dev/null; then
|
||||
echo "❌ npm is not installed. Please install npm first."
|
||||
exit 1
|
||||
else
|
||||
echo "✅ npm is already installed ($(npm --version))"
|
||||
fi
|
||||
|
||||
# Build the server
|
||||
echo ""
|
||||
echo "🔨 Building mulTmux..."
|
||||
cd "$(dirname "$0")/.."
|
||||
npm install
|
||||
npm run build
|
||||
|
||||
echo ""
|
||||
echo "📝 Setting up PM2 for process management..."
|
||||
if ! command -v pm2 &> /dev/null; then
|
||||
sudo npm install -g pm2
|
||||
fi
|
||||
|
||||
# Create PM2 ecosystem file
|
||||
cat > ecosystem.config.js << EOF
|
||||
module.exports = {
|
||||
apps: [{
|
||||
name: 'multmux-server',
|
||||
script: './packages/server/dist/index.js',
|
||||
instances: 1,
|
||||
autorestart: true,
|
||||
watch: false,
|
||||
max_memory_restart: '500M',
|
||||
env: {
|
||||
NODE_ENV: 'production',
|
||||
PORT: 3000,
|
||||
WS_PORT: 3001
|
||||
}
|
||||
}]
|
||||
};
|
||||
EOF
|
||||
|
||||
echo ""
|
||||
echo "🚀 Starting mulTmux server with PM2..."
|
||||
pm2 start ecosystem.config.js
|
||||
pm2 save
|
||||
pm2 startup | tail -n 1 | bash || true
|
||||
|
||||
echo ""
|
||||
echo "✅ mulTmux deployed successfully!"
|
||||
echo ""
|
||||
echo "Server is running on:"
|
||||
echo " HTTP API: http://localhost:3000"
|
||||
echo " WebSocket: ws://localhost:3001"
|
||||
echo ""
|
||||
echo "Useful PM2 commands:"
|
||||
echo " pm2 status - Check server status"
|
||||
echo " pm2 logs multmux-server - View logs"
|
||||
echo " pm2 restart multmux-server - Restart server"
|
||||
echo " pm2 stop multmux-server - Stop server"
|
||||
echo ""
|
||||
echo "To install the CLI globally:"
|
||||
echo " cd packages/cli && npm link"
|
||||
echo ""
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
# nginx configuration for mulTmux
|
||||
# Place this in /etc/nginx/sites-available/multmux
|
||||
# Then: sudo ln -s /etc/nginx/sites-available/multmux /etc/nginx/sites-enabled/
|
||||
|
||||
upstream multmux_api {
|
||||
server localhost:3000;
|
||||
}
|
||||
|
||||
upstream multmux_ws {
|
||||
server localhost:3001;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 80;
|
||||
server_name your-server-domain.com; # Change this to your domain or IP
|
||||
|
||||
# HTTP API
|
||||
location /api {
|
||||
proxy_pass http://multmux_api;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection 'upgrade';
|
||||
proxy_set_header Host $host;
|
||||
proxy_cache_bypass $http_upgrade;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
# WebSocket
|
||||
location /ws {
|
||||
proxy_pass http://multmux_ws;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_read_timeout 86400;
|
||||
}
|
||||
}
|
||||
|
||||
# Optional: SSL configuration (if using Let's Encrypt)
|
||||
# server {
|
||||
# listen 443 ssl http2;
|
||||
# server_name your-server-domain.com;
|
||||
#
|
||||
# ssl_certificate /etc/letsencrypt/live/your-server-domain.com/fullchain.pem;
|
||||
# ssl_certificate_key /etc/letsencrypt/live/your-server-domain.com/privkey.pem;
|
||||
#
|
||||
# # Same location blocks as above...
|
||||
# }
|
||||
|
|
@ -0,0 +1,19 @@
|
|||
{
|
||||
"name": "multmux",
|
||||
"version": "0.1.0",
|
||||
"private": true,
|
||||
"description": "Collaborative terminal tool with tmux backend",
|
||||
"workspaces": [
|
||||
"packages/*"
|
||||
],
|
||||
"scripts": {
|
||||
"build": "npm run build -ws",
|
||||
"dev:server": "npm run dev -w @multmux/server",
|
||||
"dev:cli": "npm run dev -w @multmux/cli",
|
||||
"start:server": "npm run start -w @multmux/server"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^20.0.0",
|
||||
"typescript": "^5.0.0"
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,30 @@
|
|||
{
|
||||
"name": "@multmux/cli",
|
||||
"version": "0.1.0",
|
||||
"description": "mulTmux CLI - collaborative terminal client",
|
||||
"main": "dist/index.js",
|
||||
"bin": {
|
||||
"multmux": "./dist/index.js"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "tsc",
|
||||
"dev": "tsx src/index.ts",
|
||||
"start": "node dist/index.js"
|
||||
},
|
||||
"dependencies": {
|
||||
"commander": "^11.1.0",
|
||||
"ws": "^8.16.0",
|
||||
"blessed": "^0.1.81",
|
||||
"chalk": "^4.1.2",
|
||||
"ora": "^5.4.1",
|
||||
"node-fetch": "^2.7.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/ws": "^8.5.10",
|
||||
"@types/node": "^20.0.0",
|
||||
"@types/blessed": "^0.1.25",
|
||||
"@types/node-fetch": "^2.6.9",
|
||||
"tsx": "^4.7.0",
|
||||
"typescript": "^5.0.0"
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,50 @@
|
|||
import fetch from 'node-fetch';
|
||||
import chalk from 'chalk';
|
||||
import ora from 'ora';
|
||||
|
||||
export async function createSession(
|
||||
name: string,
|
||||
options: { server?: string; repo?: string }
|
||||
): Promise<void> {
|
||||
const serverUrl = options.server || 'http://localhost:3000';
|
||||
const spinner = ora('Creating session...').start();
|
||||
|
||||
try {
|
||||
const response = await fetch(`${serverUrl}/api/sessions`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
name,
|
||||
repoPath: options.repo,
|
||||
}),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to create session: ${response.statusText}`);
|
||||
}
|
||||
|
||||
const data: any = await response.json();
|
||||
|
||||
spinner.succeed('Session created!');
|
||||
|
||||
console.log('');
|
||||
console.log(chalk.bold('Session Details:'));
|
||||
console.log(` Name: ${chalk.cyan(data.session.name)}`);
|
||||
console.log(` ID: ${chalk.gray(data.session.id)}`);
|
||||
console.log(` Created: ${new Date(data.session.createdAt).toLocaleString()}`);
|
||||
console.log('');
|
||||
console.log(chalk.bold('To join this session:'));
|
||||
console.log(chalk.green(` ${data.inviteUrl}`));
|
||||
console.log('');
|
||||
console.log(chalk.bold('Or share this token:'));
|
||||
console.log(` ${chalk.yellow(data.token)}`);
|
||||
console.log('');
|
||||
console.log(chalk.dim('Token expires in 60 minutes'));
|
||||
} catch (error) {
|
||||
spinner.fail('Failed to create session');
|
||||
console.error(chalk.red((error as Error).message));
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,45 @@
|
|||
import chalk from 'chalk';
|
||||
import ora from 'ora';
|
||||
import { WebSocketClient } from '../connection/WebSocketClient';
|
||||
import { TerminalUI } from '../ui/Terminal';
|
||||
|
||||
export async function joinSession(
|
||||
token: string,
|
||||
options: { server?: string }
|
||||
): Promise<void> {
|
||||
const serverUrl = options.server || 'ws://localhost:3001';
|
||||
const spinner = ora('Connecting to session...').start();
|
||||
|
||||
try {
|
||||
const client = new WebSocketClient(serverUrl, token);
|
||||
|
||||
// Wait for connection
|
||||
await client.connect();
|
||||
spinner.succeed('Connected!');
|
||||
|
||||
// Wait a moment for the 'joined' event
|
||||
await new Promise((resolve) => {
|
||||
client.once('joined', resolve);
|
||||
setTimeout(resolve, 1000); // Fallback timeout
|
||||
});
|
||||
|
||||
console.log(chalk.green('\nJoined session! Press ESC or Ctrl-C to exit.\n'));
|
||||
|
||||
// Create terminal UI
|
||||
const ui = new TerminalUI(client);
|
||||
|
||||
// Handle errors
|
||||
client.on('error', (error: Error) => {
|
||||
console.error(chalk.red('\nConnection error:'), error.message);
|
||||
});
|
||||
|
||||
client.on('reconnect-failed', () => {
|
||||
console.error(chalk.red('\nFailed to reconnect. Exiting...'));
|
||||
process.exit(1);
|
||||
});
|
||||
} catch (error) {
|
||||
spinner.fail('Failed to connect');
|
||||
console.error(chalk.red((error as Error).message));
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,38 @@
|
|||
import fetch from 'node-fetch';
|
||||
import chalk from 'chalk';
|
||||
import ora from 'ora';
|
||||
|
||||
export async function listSessions(options: { server?: string }): Promise<void> {
|
||||
const serverUrl = options.server || 'http://localhost:3000';
|
||||
const spinner = ora('Fetching sessions...').start();
|
||||
|
||||
try {
|
||||
const response = await fetch(`${serverUrl}/api/sessions`);
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to fetch sessions: ${response.statusText}`);
|
||||
}
|
||||
|
||||
const data: any = await response.json();
|
||||
spinner.stop();
|
||||
|
||||
if (data.sessions.length === 0) {
|
||||
console.log(chalk.yellow('No active sessions found.'));
|
||||
return;
|
||||
}
|
||||
|
||||
console.log(chalk.bold(`\nActive Sessions (${data.sessions.length}):\n`));
|
||||
|
||||
data.sessions.forEach((session: any) => {
|
||||
console.log(chalk.cyan(` ${session.name}`));
|
||||
console.log(` ID: ${chalk.gray(session.id)}`);
|
||||
console.log(` Clients: ${session.activeClients}`);
|
||||
console.log(` Created: ${new Date(session.createdAt).toLocaleString()}`);
|
||||
console.log('');
|
||||
});
|
||||
} catch (error) {
|
||||
spinner.fail('Failed to fetch sessions');
|
||||
console.error(chalk.red((error as Error).message));
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,120 @@
|
|||
import WebSocket from 'ws';
|
||||
import { EventEmitter } from 'events';
|
||||
|
||||
export interface TerminalMessage {
|
||||
type: 'output' | 'input' | 'resize' | 'join' | 'leave' | 'presence' | 'joined' | 'error';
|
||||
data?: any;
|
||||
clientId?: string;
|
||||
timestamp?: number;
|
||||
sessionId?: string;
|
||||
sessionName?: string;
|
||||
message?: string;
|
||||
}
|
||||
|
||||
export class WebSocketClient extends EventEmitter {
|
||||
private ws: WebSocket | null = null;
|
||||
private reconnectAttempts = 0;
|
||||
private maxReconnectAttempts = 5;
|
||||
|
||||
constructor(private url: string, private token: string) {
|
||||
super();
|
||||
}
|
||||
|
||||
connect(): Promise<void> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const wsUrl = `${this.url}?token=${this.token}`;
|
||||
this.ws = new WebSocket(wsUrl);
|
||||
|
||||
this.ws.on('open', () => {
|
||||
this.reconnectAttempts = 0;
|
||||
this.emit('connected');
|
||||
resolve();
|
||||
});
|
||||
|
||||
this.ws.on('message', (data) => {
|
||||
try {
|
||||
const message: TerminalMessage = JSON.parse(data.toString());
|
||||
this.handleMessage(message);
|
||||
} catch (error) {
|
||||
console.error('Failed to parse message:', error);
|
||||
}
|
||||
});
|
||||
|
||||
this.ws.on('close', () => {
|
||||
this.emit('disconnected');
|
||||
this.attemptReconnect();
|
||||
});
|
||||
|
||||
this.ws.on('error', (error) => {
|
||||
this.emit('error', error);
|
||||
reject(error);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
private handleMessage(message: TerminalMessage): void {
|
||||
switch (message.type) {
|
||||
case 'output':
|
||||
this.emit('output', message.data);
|
||||
break;
|
||||
case 'joined':
|
||||
this.emit('joined', {
|
||||
sessionId: message.sessionId,
|
||||
sessionName: message.sessionName,
|
||||
clientId: message.clientId,
|
||||
});
|
||||
break;
|
||||
case 'presence':
|
||||
this.emit('presence', message.data);
|
||||
break;
|
||||
case 'error':
|
||||
this.emit('error', new Error(message.message || 'Unknown error'));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
sendInput(data: string): void {
|
||||
if (this.ws && this.ws.readyState === WebSocket.OPEN) {
|
||||
this.ws.send(
|
||||
JSON.stringify({
|
||||
type: 'input',
|
||||
data,
|
||||
timestamp: Date.now(),
|
||||
})
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
resize(cols: number, rows: number): void {
|
||||
if (this.ws && this.ws.readyState === WebSocket.OPEN) {
|
||||
this.ws.send(
|
||||
JSON.stringify({
|
||||
type: 'resize',
|
||||
data: { cols, rows },
|
||||
timestamp: Date.now(),
|
||||
})
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
disconnect(): void {
|
||||
if (this.ws) {
|
||||
this.ws.close();
|
||||
this.ws = null;
|
||||
}
|
||||
}
|
||||
|
||||
private attemptReconnect(): void {
|
||||
if (this.reconnectAttempts < this.maxReconnectAttempts) {
|
||||
this.reconnectAttempts++;
|
||||
setTimeout(() => {
|
||||
this.emit('reconnecting', this.reconnectAttempts);
|
||||
this.connect().catch(() => {
|
||||
// Reconnection failed, will retry
|
||||
});
|
||||
}, 1000 * this.reconnectAttempts);
|
||||
} else {
|
||||
this.emit('reconnect-failed');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,34 @@
|
|||
#!/usr/bin/env node
|
||||
|
||||
import { Command } from 'commander';
|
||||
import { createSession } from './commands/create';
|
||||
import { joinSession } from './commands/join';
|
||||
import { listSessions } from './commands/list';
|
||||
|
||||
const program = new Command();
|
||||
|
||||
program
|
||||
.name('multmux')
|
||||
.description('Collaborative terminal tool with tmux backend')
|
||||
.version('0.1.0');
|
||||
|
||||
program
|
||||
.command('create <name>')
|
||||
.description('Create a new collaborative session')
|
||||
.option('-s, --server <url>', 'Server URL', 'http://localhost:3000')
|
||||
.option('-r, --repo <path>', 'Repository path to use')
|
||||
.action(createSession);
|
||||
|
||||
program
|
||||
.command('join <token>')
|
||||
.description('Join an existing session with a token')
|
||||
.option('-s, --server <url>', 'WebSocket server URL', 'ws://localhost:3001')
|
||||
.action(joinSession);
|
||||
|
||||
program
|
||||
.command('list')
|
||||
.description('List active sessions')
|
||||
.option('-s, --server <url>', 'Server URL', 'http://localhost:3000')
|
||||
.action(listSessions);
|
||||
|
||||
program.parse();
|
||||
|
|
@ -0,0 +1,154 @@
|
|||
import blessed from 'blessed';
|
||||
import { WebSocketClient } from '../connection/WebSocketClient';
|
||||
|
||||
export class TerminalUI {
|
||||
private screen: blessed.Widgets.Screen;
|
||||
private terminal: blessed.Widgets.BoxElement;
|
||||
private statusBar: blessed.Widgets.BoxElement;
|
||||
private buffer: string = '';
|
||||
|
||||
constructor(private client: WebSocketClient) {
|
||||
// Create screen
|
||||
this.screen = blessed.screen({
|
||||
smartCSR: true,
|
||||
title: 'mulTmux',
|
||||
});
|
||||
|
||||
// Status bar
|
||||
this.statusBar = blessed.box({
|
||||
top: 0,
|
||||
left: 0,
|
||||
width: '100%',
|
||||
height: 1,
|
||||
style: {
|
||||
fg: 'white',
|
||||
bg: 'blue',
|
||||
},
|
||||
content: ' mulTmux - Connecting...',
|
||||
});
|
||||
|
||||
// Terminal output
|
||||
this.terminal = blessed.box({
|
||||
top: 1,
|
||||
left: 0,
|
||||
width: '100%',
|
||||
height: '100%-1',
|
||||
scrollable: true,
|
||||
alwaysScroll: true,
|
||||
scrollbar: {
|
||||
style: {
|
||||
bg: 'blue',
|
||||
},
|
||||
},
|
||||
keys: true,
|
||||
vi: true,
|
||||
mouse: true,
|
||||
content: '',
|
||||
});
|
||||
|
||||
this.screen.append(this.statusBar);
|
||||
this.screen.append(this.terminal);
|
||||
|
||||
// Focus terminal
|
||||
this.terminal.focus();
|
||||
|
||||
// Setup event handlers
|
||||
this.setupEventHandlers();
|
||||
|
||||
// Render
|
||||
this.screen.render();
|
||||
}
|
||||
|
||||
private setupEventHandlers(): void {
|
||||
// Handle terminal output from server
|
||||
this.client.on('output', (data: string) => {
|
||||
this.buffer += data;
|
||||
this.terminal.setContent(this.buffer);
|
||||
this.terminal.setScrollPerc(100);
|
||||
this.screen.render();
|
||||
});
|
||||
|
||||
// Handle connection events
|
||||
this.client.on('connected', () => {
|
||||
this.updateStatus('Connected', 'green');
|
||||
});
|
||||
|
||||
this.client.on('joined', (info: any) => {
|
||||
this.updateStatus(`Session: ${info.sessionName} (${info.clientId.slice(0, 8)})`, 'green');
|
||||
});
|
||||
|
||||
this.client.on('disconnected', () => {
|
||||
this.updateStatus('Disconnected', 'red');
|
||||
});
|
||||
|
||||
this.client.on('reconnecting', (attempt: number) => {
|
||||
this.updateStatus(`Reconnecting (${attempt}/5)...`, 'yellow');
|
||||
});
|
||||
|
||||
this.client.on('presence', (data: any) => {
|
||||
if (data.action === 'join') {
|
||||
this.showNotification(`User joined (${data.totalClients} online)`);
|
||||
} else if (data.action === 'leave') {
|
||||
this.showNotification(`User left (${data.totalClients} online)`);
|
||||
}
|
||||
});
|
||||
|
||||
// Handle keyboard input
|
||||
this.screen.on('keypress', (ch: string, key: any) => {
|
||||
if (key.name === 'escape' || (key.ctrl && key.name === 'c')) {
|
||||
this.close();
|
||||
return;
|
||||
}
|
||||
|
||||
// Send input to server
|
||||
if (ch) {
|
||||
this.client.sendInput(ch);
|
||||
} else if (key.name) {
|
||||
// Handle special keys
|
||||
const specialKeys: { [key: string]: string } = {
|
||||
enter: '\r',
|
||||
backspace: '\x7f',
|
||||
tab: '\t',
|
||||
up: '\x1b[A',
|
||||
down: '\x1b[B',
|
||||
right: '\x1b[C',
|
||||
left: '\x1b[D',
|
||||
};
|
||||
|
||||
if (specialKeys[key.name]) {
|
||||
this.client.sendInput(specialKeys[key.name]);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Handle resize
|
||||
this.screen.on('resize', () => {
|
||||
const { width, height } = this.terminal;
|
||||
this.client.resize(width as number, (height as number) - 1);
|
||||
});
|
||||
|
||||
// Quit on Ctrl-C
|
||||
this.screen.key(['C-c'], () => {
|
||||
this.close();
|
||||
});
|
||||
}
|
||||
|
||||
private updateStatus(text: string, color: string = 'blue'): void {
|
||||
this.statusBar.style.bg = color;
|
||||
this.statusBar.setContent(` mulTmux - ${text}`);
|
||||
this.screen.render();
|
||||
}
|
||||
|
||||
private showNotification(text: string): void {
|
||||
// Append notification to buffer
|
||||
this.buffer += `\n[mulTmux] ${text}\n`;
|
||||
this.terminal.setContent(this.buffer);
|
||||
this.screen.render();
|
||||
}
|
||||
|
||||
close(): void {
|
||||
this.client.disconnect();
|
||||
this.screen.destroy();
|
||||
process.exit(0);
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,8 @@
|
|||
{
|
||||
"extends": "../../tsconfig.json",
|
||||
"compilerOptions": {
|
||||
"outDir": "./dist",
|
||||
"rootDir": "./src"
|
||||
},
|
||||
"include": ["src/**/*"]
|
||||
}
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
{
|
||||
"name": "@multmux/server",
|
||||
"version": "0.1.0",
|
||||
"description": "mulTmux server - collaborative terminal backend",
|
||||
"main": "dist/index.js",
|
||||
"scripts": {
|
||||
"build": "tsc",
|
||||
"dev": "tsx watch src/index.ts",
|
||||
"start": "node dist/index.js"
|
||||
},
|
||||
"dependencies": {
|
||||
"express": "^4.18.0",
|
||||
"ws": "^8.16.0",
|
||||
"node-pty": "^1.0.0",
|
||||
"nanoid": "^3.3.7",
|
||||
"cors": "^2.8.5"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/express": "^4.17.21",
|
||||
"@types/ws": "^8.5.10",
|
||||
"@types/node": "^20.0.0",
|
||||
"@types/cors": "^2.8.17",
|
||||
"tsx": "^4.7.0",
|
||||
"typescript": "^5.0.0"
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,116 @@
|
|||
import { Router } from 'express';
|
||||
import { SessionManager } from '../managers/SessionManager';
|
||||
import { TokenManager } from '../managers/TokenManager';
|
||||
|
||||
export function createRouter(
|
||||
sessionManager: SessionManager,
|
||||
tokenManager: TokenManager
|
||||
): Router {
|
||||
const router = Router();
|
||||
|
||||
// Create a new session
|
||||
router.post('/sessions', async (req, res) => {
|
||||
try {
|
||||
const { name, repoPath } = req.body;
|
||||
|
||||
if (!name || typeof name !== 'string') {
|
||||
return res.status(400).json({ error: 'Session name is required' });
|
||||
}
|
||||
|
||||
const session = await sessionManager.createSession(name, repoPath);
|
||||
const token = tokenManager.generateToken(session.id, 60, 'write');
|
||||
|
||||
res.json({
|
||||
session: {
|
||||
id: session.id,
|
||||
name: session.name,
|
||||
createdAt: session.createdAt,
|
||||
},
|
||||
token,
|
||||
inviteUrl: `multmux join ${token}`,
|
||||
});
|
||||
} catch (error) {
|
||||
console.error('Failed to create session:', error);
|
||||
res.status(500).json({ error: 'Failed to create session' });
|
||||
}
|
||||
});
|
||||
|
||||
// List active sessions
|
||||
router.get('/sessions', (req, res) => {
|
||||
const sessions = sessionManager.listSessions();
|
||||
res.json({
|
||||
sessions: sessions.map((s) => ({
|
||||
id: s.id,
|
||||
name: s.name,
|
||||
createdAt: s.createdAt,
|
||||
activeClients: s.clients.size,
|
||||
})),
|
||||
});
|
||||
});
|
||||
|
||||
// Get session info
|
||||
router.get('/sessions/:id', (req, res) => {
|
||||
const session = sessionManager.getSession(req.params.id);
|
||||
|
||||
if (!session) {
|
||||
return res.status(404).json({ error: 'Session not found' });
|
||||
}
|
||||
|
||||
res.json({
|
||||
id: session.id,
|
||||
name: session.name,
|
||||
createdAt: session.createdAt,
|
||||
activeClients: session.clients.size,
|
||||
});
|
||||
});
|
||||
|
||||
// Join an existing session (generates a new token and returns session info)
|
||||
router.post('/sessions/:id/join', (req, res) => {
|
||||
const session = sessionManager.getSession(req.params.id);
|
||||
|
||||
if (!session) {
|
||||
return res.status(404).json({ error: 'Session not found' });
|
||||
}
|
||||
|
||||
// Generate a new token for this joining client
|
||||
const token = tokenManager.generateToken(session.id, 60, 'write');
|
||||
|
||||
res.json({
|
||||
id: session.id,
|
||||
name: session.name,
|
||||
token,
|
||||
createdAt: session.createdAt,
|
||||
activeClients: session.clients.size,
|
||||
});
|
||||
});
|
||||
|
||||
// Generate new invite token for existing session
|
||||
router.post('/sessions/:id/tokens', (req, res) => {
|
||||
const session = sessionManager.getSession(req.params.id);
|
||||
|
||||
if (!session) {
|
||||
return res.status(404).json({ error: 'Session not found' });
|
||||
}
|
||||
|
||||
const { expiresInMinutes = 60, permissions = 'write' } = req.body;
|
||||
const token = tokenManager.generateToken(session.id, expiresInMinutes, permissions);
|
||||
|
||||
res.json({
|
||||
token,
|
||||
inviteUrl: `multmux join ${token}`,
|
||||
expiresInMinutes,
|
||||
permissions,
|
||||
});
|
||||
});
|
||||
|
||||
// Health check
|
||||
router.get('/health', (req, res) => {
|
||||
res.json({
|
||||
status: 'ok',
|
||||
activeSessions: sessionManager.listSessions().length,
|
||||
activeTokens: tokenManager.getActiveTokens(),
|
||||
});
|
||||
});
|
||||
|
||||
return router;
|
||||
}
|
||||
|
|
@ -0,0 +1,55 @@
|
|||
import express from 'express';
|
||||
import { createServer } from 'http';
|
||||
import { WebSocketServer } from 'ws';
|
||||
import cors from 'cors';
|
||||
import { SessionManager } from './managers/SessionManager';
|
||||
import { TokenManager } from './managers/TokenManager';
|
||||
import { TerminalHandler } from './websocket/TerminalHandler';
|
||||
import { createRouter } from './api/routes';
|
||||
|
||||
const PORT = process.env.PORT || 3002;
|
||||
|
||||
async function main() {
|
||||
// Initialize managers
|
||||
const sessionManager = new SessionManager();
|
||||
const tokenManager = new TokenManager();
|
||||
const terminalHandler = new TerminalHandler(sessionManager, tokenManager);
|
||||
|
||||
// HTTP API Server
|
||||
const app = express();
|
||||
app.use(cors());
|
||||
app.use(express.json());
|
||||
app.use('/api', createRouter(sessionManager, tokenManager));
|
||||
|
||||
// Create HTTP server to share with WebSocket
|
||||
const server = createServer(app);
|
||||
|
||||
// WebSocket Server on same port, handles upgrade requests
|
||||
const wss = new WebSocketServer({ server, path: '/ws' });
|
||||
|
||||
wss.on('connection', (ws, req) => {
|
||||
// Extract token from query string
|
||||
const url = new URL(req.url || '', `http://localhost:${PORT}`);
|
||||
const token = url.searchParams.get('token');
|
||||
|
||||
if (!token) {
|
||||
ws.send(JSON.stringify({ type: 'error', message: 'Token required' }));
|
||||
ws.close();
|
||||
return;
|
||||
}
|
||||
|
||||
terminalHandler.handleConnection(ws, token);
|
||||
});
|
||||
|
||||
server.listen(PORT, () => {
|
||||
console.log('');
|
||||
console.log('mulTmux server is ready!');
|
||||
console.log(`API: http://localhost:${PORT}/api`);
|
||||
console.log(`WebSocket: ws://localhost:${PORT}/ws`);
|
||||
});
|
||||
}
|
||||
|
||||
main().catch((error) => {
|
||||
console.error('Failed to start server:', error);
|
||||
process.exit(1);
|
||||
});
|
||||
|
|
@ -0,0 +1,114 @@
|
|||
import { spawn, ChildProcess } from 'child_process';
|
||||
import * as pty from 'node-pty';
|
||||
import { Session } from '../types';
|
||||
import { nanoid } from 'nanoid';
|
||||
|
||||
export class SessionManager {
|
||||
private sessions: Map<string, Session> = new Map();
|
||||
private terminals: Map<string, pty.IPty> = new Map();
|
||||
|
||||
async createSession(name: string, repoPath?: string): Promise<Session> {
|
||||
const id = nanoid(16);
|
||||
const tmuxSessionName = `multmux-${id}`;
|
||||
|
||||
const session: Session = {
|
||||
id,
|
||||
name,
|
||||
createdAt: new Date(),
|
||||
tmuxSessionName,
|
||||
clients: new Set(),
|
||||
repoPath,
|
||||
};
|
||||
|
||||
this.sessions.set(id, session);
|
||||
|
||||
// Create tmux session
|
||||
await this.createTmuxSession(tmuxSessionName, repoPath);
|
||||
|
||||
// Attach to tmux session with pty
|
||||
const terminal = pty.spawn('tmux', ['attach-session', '-t', tmuxSessionName], {
|
||||
name: 'xterm-256color',
|
||||
cols: 80,
|
||||
rows: 24,
|
||||
cwd: repoPath || process.cwd(),
|
||||
env: process.env as { [key: string]: string },
|
||||
});
|
||||
|
||||
this.terminals.set(id, terminal);
|
||||
|
||||
return session;
|
||||
}
|
||||
|
||||
private async createTmuxSession(name: string, cwd?: string): Promise<void> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const args = ['new-session', '-d', '-s', name];
|
||||
if (cwd) {
|
||||
args.push('-c', cwd);
|
||||
}
|
||||
|
||||
const proc = spawn('tmux', args);
|
||||
|
||||
proc.on('exit', (code) => {
|
||||
if (code === 0) {
|
||||
resolve();
|
||||
} else {
|
||||
reject(new Error(`Failed to create tmux session: exit code ${code}`));
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
getSession(id: string): Session | undefined {
|
||||
return this.sessions.get(id);
|
||||
}
|
||||
|
||||
getTerminal(sessionId: string): pty.IPty | undefined {
|
||||
return this.terminals.get(sessionId);
|
||||
}
|
||||
|
||||
addClient(sessionId: string, clientId: string): void {
|
||||
const session = this.sessions.get(sessionId);
|
||||
if (session) {
|
||||
session.clients.add(clientId);
|
||||
}
|
||||
}
|
||||
|
||||
removeClient(sessionId: string, clientId: string): void {
|
||||
const session = this.sessions.get(sessionId);
|
||||
if (session) {
|
||||
session.clients.delete(clientId);
|
||||
|
||||
// Clean up session if no clients left
|
||||
if (session.clients.size === 0) {
|
||||
this.destroySession(sessionId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private async destroySession(sessionId: string): Promise<void> {
|
||||
const session = this.sessions.get(sessionId);
|
||||
if (!session) return;
|
||||
|
||||
const terminal = this.terminals.get(sessionId);
|
||||
if (terminal) {
|
||||
terminal.kill();
|
||||
this.terminals.delete(sessionId);
|
||||
}
|
||||
|
||||
// Kill tmux session
|
||||
spawn('tmux', ['kill-session', '-t', session.tmuxSessionName]);
|
||||
|
||||
this.sessions.delete(sessionId);
|
||||
}
|
||||
|
||||
listSessions(): Session[] {
|
||||
return Array.from(this.sessions.values());
|
||||
}
|
||||
|
||||
resizeTerminal(sessionId: string, cols: number, rows: number): void {
|
||||
const terminal = this.terminals.get(sessionId);
|
||||
if (terminal) {
|
||||
terminal.resize(cols, rows);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,50 @@
|
|||
import { nanoid } from 'nanoid';
|
||||
import { SessionToken } from '../types';
|
||||
|
||||
export class TokenManager {
|
||||
private tokens: Map<string, SessionToken> = new Map();
|
||||
|
||||
generateToken(
|
||||
sessionId: string,
|
||||
expiresInMinutes: number = 60,
|
||||
permissions: 'read' | 'write' = 'write'
|
||||
): string {
|
||||
const token = nanoid(32);
|
||||
const expiresAt = new Date(Date.now() + expiresInMinutes * 60 * 1000);
|
||||
|
||||
this.tokens.set(token, {
|
||||
token,
|
||||
sessionId,
|
||||
expiresAt,
|
||||
permissions,
|
||||
});
|
||||
|
||||
// Clean up expired token after expiration
|
||||
setTimeout(() => this.tokens.delete(token), expiresInMinutes * 60 * 1000);
|
||||
|
||||
return token;
|
||||
}
|
||||
|
||||
validateToken(token: string): SessionToken | null {
|
||||
const sessionToken = this.tokens.get(token);
|
||||
|
||||
if (!sessionToken) {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (sessionToken.expiresAt < new Date()) {
|
||||
this.tokens.delete(token);
|
||||
return null;
|
||||
}
|
||||
|
||||
return sessionToken;
|
||||
}
|
||||
|
||||
revokeToken(token: string): void {
|
||||
this.tokens.delete(token);
|
||||
}
|
||||
|
||||
getActiveTokens(): number {
|
||||
return this.tokens.size;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,29 @@
|
|||
export interface Session {
|
||||
id: string;
|
||||
name: string;
|
||||
createdAt: Date;
|
||||
tmuxSessionName: string;
|
||||
clients: Set<string>;
|
||||
repoPath?: string;
|
||||
}
|
||||
|
||||
export interface SessionToken {
|
||||
token: string;
|
||||
sessionId: string;
|
||||
expiresAt: Date;
|
||||
permissions: 'read' | 'write';
|
||||
}
|
||||
|
||||
export interface ClientConnection {
|
||||
id: string;
|
||||
sessionId: string;
|
||||
username?: string;
|
||||
permissions: 'read' | 'write';
|
||||
}
|
||||
|
||||
export interface TerminalMessage {
|
||||
type: 'output' | 'input' | 'resize' | 'join' | 'leave' | 'presence';
|
||||
data: any;
|
||||
clientId?: string;
|
||||
timestamp: number;
|
||||
}
|
||||
|
|
@ -0,0 +1,175 @@
|
|||
import { WebSocket } from 'ws';
|
||||
import { nanoid } from 'nanoid';
|
||||
import { SessionManager } from '../managers/SessionManager';
|
||||
import { TokenManager } from '../managers/TokenManager';
|
||||
import { TerminalMessage, ClientConnection } from '../types';
|
||||
|
||||
export class TerminalHandler {
|
||||
private clients: Map<string, { ws: WebSocket; connection: ClientConnection }> = new Map();
|
||||
|
||||
constructor(
|
||||
private sessionManager: SessionManager,
|
||||
private tokenManager: TokenManager
|
||||
) {}
|
||||
|
||||
handleConnection(ws: WebSocket, token: string): void {
|
||||
// Validate token
|
||||
const sessionToken = this.tokenManager.validateToken(token);
|
||||
if (!sessionToken) {
|
||||
ws.send(JSON.stringify({ type: 'error', message: 'Invalid or expired token' }));
|
||||
ws.close();
|
||||
return;
|
||||
}
|
||||
|
||||
// Verify session exists
|
||||
const session = this.sessionManager.getSession(sessionToken.sessionId);
|
||||
if (!session) {
|
||||
ws.send(JSON.stringify({ type: 'error', message: 'Session not found' }));
|
||||
ws.close();
|
||||
return;
|
||||
}
|
||||
|
||||
const clientId = nanoid(16);
|
||||
const connection: ClientConnection = {
|
||||
id: clientId,
|
||||
sessionId: sessionToken.sessionId,
|
||||
permissions: sessionToken.permissions,
|
||||
};
|
||||
|
||||
this.clients.set(clientId, { ws, connection });
|
||||
this.sessionManager.addClient(sessionToken.sessionId, clientId);
|
||||
|
||||
// Attach terminal output to WebSocket
|
||||
const terminal = this.sessionManager.getTerminal(sessionToken.sessionId);
|
||||
if (terminal) {
|
||||
const onData = (data: string) => {
|
||||
const message: TerminalMessage = {
|
||||
type: 'output',
|
||||
data,
|
||||
timestamp: Date.now(),
|
||||
};
|
||||
ws.send(JSON.stringify(message));
|
||||
};
|
||||
|
||||
const dataListener = terminal.onData(onData);
|
||||
|
||||
// Clean up on disconnect
|
||||
ws.on('close', () => {
|
||||
dataListener.dispose();
|
||||
this.handleDisconnect(clientId);
|
||||
});
|
||||
}
|
||||
|
||||
// Send join confirmation
|
||||
ws.send(
|
||||
JSON.stringify({
|
||||
type: 'joined',
|
||||
sessionId: session.id,
|
||||
sessionName: session.name,
|
||||
clientId,
|
||||
})
|
||||
);
|
||||
|
||||
// Broadcast presence
|
||||
this.broadcastToSession(sessionToken.sessionId, {
|
||||
type: 'presence',
|
||||
data: {
|
||||
action: 'join',
|
||||
clientId,
|
||||
totalClients: session.clients.size,
|
||||
},
|
||||
timestamp: Date.now(),
|
||||
});
|
||||
|
||||
// Handle incoming messages
|
||||
ws.on('message', (data) => {
|
||||
this.handleMessage(clientId, data.toString());
|
||||
});
|
||||
}
|
||||
|
||||
private handleMessage(clientId: string, rawMessage: string): void {
|
||||
const client = this.clients.get(clientId);
|
||||
if (!client) return;
|
||||
|
||||
try {
|
||||
const message: TerminalMessage = JSON.parse(rawMessage);
|
||||
|
||||
switch (message.type) {
|
||||
case 'input':
|
||||
this.handleInput(client.connection, message.data);
|
||||
break;
|
||||
case 'resize':
|
||||
this.handleResize(client.connection, message.data);
|
||||
break;
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Failed to parse message:', error);
|
||||
}
|
||||
}
|
||||
|
||||
private handleInput(connection: ClientConnection, data: string): void {
|
||||
if (connection.permissions !== 'write') {
|
||||
return; // Read-only clients can't send input
|
||||
}
|
||||
|
||||
const terminal = this.sessionManager.getTerminal(connection.sessionId);
|
||||
if (terminal) {
|
||||
terminal.write(data);
|
||||
}
|
||||
|
||||
// Broadcast input to other clients for cursor tracking
|
||||
this.broadcastToSession(
|
||||
connection.sessionId,
|
||||
{
|
||||
type: 'input',
|
||||
data,
|
||||
clientId: connection.id,
|
||||
timestamp: Date.now(),
|
||||
},
|
||||
connection.id // Exclude sender
|
||||
);
|
||||
}
|
||||
|
||||
private handleResize(connection: ClientConnection, data: { cols: number; rows: number }): void {
|
||||
this.sessionManager.resizeTerminal(connection.sessionId, data.cols, data.rows);
|
||||
}
|
||||
|
||||
private handleDisconnect(clientId: string): void {
|
||||
const client = this.clients.get(clientId);
|
||||
if (!client) return;
|
||||
|
||||
this.sessionManager.removeClient(client.connection.sessionId, clientId);
|
||||
this.clients.delete(clientId);
|
||||
|
||||
// Broadcast leave
|
||||
const session = this.sessionManager.getSession(client.connection.sessionId);
|
||||
if (session) {
|
||||
this.broadcastToSession(client.connection.sessionId, {
|
||||
type: 'presence',
|
||||
data: {
|
||||
action: 'leave',
|
||||
clientId,
|
||||
totalClients: session.clients.size,
|
||||
},
|
||||
timestamp: Date.now(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
private broadcastToSession(
|
||||
sessionId: string,
|
||||
message: TerminalMessage,
|
||||
excludeClientId?: string
|
||||
): void {
|
||||
const session = this.sessionManager.getSession(sessionId);
|
||||
if (!session) return;
|
||||
|
||||
const messageStr = JSON.stringify(message);
|
||||
|
||||
for (const [clientId, client] of this.clients.entries()) {
|
||||
if (client.connection.sessionId === sessionId && clientId !== excludeClientId) {
|
||||
client.ws.send(messageStr);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,8 @@
|
|||
{
|
||||
"extends": "../../tsconfig.json",
|
||||
"compilerOptions": {
|
||||
"outDir": "./dist",
|
||||
"rootDir": "./src"
|
||||
},
|
||||
"include": ["src/**/*"]
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue