rspace-online/server/local-first/migration/run-migration.ts

126 lines
3.8 KiB
TypeScript

/**
* Run all PG → Automerge migrations for real.
*
* Usage (inside rspace container):
* bun run server/local-first/migration/run-migration.ts [space]
*
* Default space: "demo". Creates disk backups in /data/docs-backup/.
* Idempotent: skips docs that already exist in the SyncServer.
*/
import postgres from 'postgres';
import * as Automerge from '@automerge/automerge';
import { mkdirSync, writeFileSync } from 'node:fs';
import {
migrateModule,
allMigrations,
type MigrationResult,
} from './pg-to-automerge';
import { syncServer } from '../../sync-instance';
import { loadAllDocs, docIdToPath } from '../doc-persistence';
const DATABASE_URL = process.env.DATABASE_URL;
if (!DATABASE_URL) {
throw new Error('DATABASE_URL environment variable is required');
}
const sql = postgres(DATABASE_URL, { max: 5, idle_timeout: 10 });
// Wrap postgres.js in a pg-compatible pool.query() interface
const pool = {
async query(text: string, params?: any[]) {
const result = params
? await sql.unsafe(text, params)
: await sql.unsafe(text);
return { rows: Array.from(result) };
},
};
const space = process.argv[2] || 'demo';
const BACKUP_DIR = '/data/docs-backup';
async function main() {
console.log(`\n=== PG → AUTOMERGE MIGRATION (space: "${space}") ===\n`);
// Load any existing docs so idempotency checks work
await loadAllDocs(syncServer);
const results: MigrationResult[] = [];
for (const migration of allMigrations) {
const result = await migrateModule(migration, pool, space, syncServer, {
dryRun: false,
backupDir: BACKUP_DIR,
});
results.push(result);
console.log('');
}
// Flush all docs to /data/docs/ (setDoc doesn't trigger onDocChange,
// so debounced saves won't fire — we save explicitly here)
console.log('[Migration] Saving all docs to /data/docs/...');
const { mkdirSync: mkdir } = await import('node:fs');
const { dirname } = await import('node:path');
let saved = 0;
for (const docId of syncServer.getDocIds()) {
const doc = syncServer.getDoc(docId);
if (!doc) continue;
try {
const filePath = docIdToPath(docId);
mkdir(dirname(filePath), { recursive: true });
const binary = Automerge.save(doc);
writeFileSync(filePath, binary);
saved++;
} catch (e) {
console.error(`[Migration] Failed to save ${docId}:`, e);
}
}
console.log(`[Migration] Saved ${saved} docs to disk.`);
console.log('\n=== SUMMARY ===\n');
console.log(
`${'Module'.padEnd(12)} ${'Created'.padStart(8)} ${'Skipped'.padStart(8)} ${'Rows'.padStart(6)} ${'Errors'.padStart(7)} ${'Time'.padStart(8)}`
);
console.log('-'.repeat(52));
let totalCreated = 0;
let totalSkipped = 0;
let totalRows = 0;
let totalErrors = 0;
for (const r of results) {
console.log(
`${r.module.padEnd(12)} ${String(r.docsCreated).padStart(8)} ${String(r.docsSkipped).padStart(8)} ${String(r.rowsMigrated).padStart(6)} ${String(r.errors.length).padStart(7)} ${(r.durationMs + 'ms').padStart(8)}`
);
totalCreated += r.docsCreated;
totalSkipped += r.docsSkipped;
totalRows += r.rowsMigrated;
totalErrors += r.errors.length;
}
console.log('-'.repeat(52));
console.log(
`${'TOTAL'.padEnd(12)} ${String(totalCreated).padStart(8)} ${String(totalSkipped).padStart(8)} ${String(totalRows).padStart(6)} ${String(totalErrors).padStart(7)}`
);
if (totalErrors > 0) {
console.log('\n=== ERRORS ===\n');
for (const r of results) {
for (const e of r.errors) {
console.error(`[${r.module}] ${e}`);
}
}
}
console.log(`\nBackups: ${BACKUP_DIR}/`);
console.log(`Persistent: /data/docs/`);
console.log(`Total docs in SyncServer: ${syncServer.getDocIds().length}`);
await sql.end();
}
main().catch((e) => {
console.error('Fatal:', e);
process.exit(1);
});