feat: add Phase 1 provisioning API (Hono/Bun)
Self-service API for provisioning Postiz instances at <slug>.rsocials.online. - Hono server on port 3001 with API key auth - SQLite instance registry with provision logging - Template engine (TypeScript port of generate.sh) - Docker compose deployer with health checks - Sablier config auto-management (add/remove routing) - Cloudflare tunnel hostname auto-management - Resource monitor (/proc/meminfo, max 12 instances) - Secret generation (JWT + Postgres password) Routes: POST/GET/DELETE /v1/spaces, GET /health Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
parent
8ef5c678c2
commit
dc78c119b3
|
|
@ -43,3 +43,5 @@ infisical/.env
|
|||
# typescript
|
||||
*.tsbuildinfo
|
||||
next-env.d.ts
|
||||
api/node_modules/
|
||||
api/data/
|
||||
|
|
|
|||
|
|
@ -0,0 +1,18 @@
|
|||
FROM oven/bun:1-alpine
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY package.json bun.lock* ./
|
||||
RUN bun install --frozen-lockfile 2>/dev/null || bun install
|
||||
|
||||
COPY tsconfig.json ./
|
||||
COPY src/ ./src/
|
||||
|
||||
RUN mkdir -p /data
|
||||
|
||||
ENV PORT=3001
|
||||
ENV DATABASE_PATH=/data/instances.db
|
||||
|
||||
EXPOSE 3001
|
||||
|
||||
CMD ["bun", "run", "src/index.ts"]
|
||||
|
|
@ -0,0 +1,40 @@
|
|||
{
|
||||
"lockfileVersion": 1,
|
||||
"configVersion": 1,
|
||||
"workspaces": {
|
||||
"": {
|
||||
"name": "rsocials-api",
|
||||
"dependencies": {
|
||||
"hono": "^4.7.0",
|
||||
"js-yaml": "^4.1.0",
|
||||
"nanoid": "^5.1.0",
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/bun": "^1.3.9",
|
||||
"@types/js-yaml": "^4.0.9",
|
||||
"typescript": "^5.7.0",
|
||||
},
|
||||
},
|
||||
},
|
||||
"packages": {
|
||||
"@types/bun": ["@types/bun@1.3.9", "", { "dependencies": { "bun-types": "1.3.9" } }, "sha512-KQ571yULOdWJiMH+RIWIOZ7B2RXQGpL1YQrBtLIV3FqDcCu6FsbFUBwhdKUlCKUpS3PJDsHlJ1QKlpxoVR+xtw=="],
|
||||
|
||||
"@types/js-yaml": ["@types/js-yaml@4.0.9", "", {}, "sha512-k4MGaQl5TGo/iipqb2UDG2UwjXziSWkh0uysQelTlJpX1qGlpUZYm8PnO4DxG1qBomtJUdYJ6qR6xdIah10JLg=="],
|
||||
|
||||
"@types/node": ["@types/node@25.3.0", "", { "dependencies": { "undici-types": "~7.18.0" } }, "sha512-4K3bqJpXpqfg2XKGK9bpDTc6xO/xoUP/RBWS7AtRMug6zZFaRekiLzjVtAoZMquxoAbzBvy5nxQ7veS5eYzf8A=="],
|
||||
|
||||
"argparse": ["argparse@2.0.1", "", {}, "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q=="],
|
||||
|
||||
"bun-types": ["bun-types@1.3.9", "", { "dependencies": { "@types/node": "*" } }, "sha512-+UBWWOakIP4Tswh0Bt0QD0alpTY8cb5hvgiYeWCMet9YukHbzuruIEeXC2D7nMJPB12kbh8C7XJykSexEqGKJg=="],
|
||||
|
||||
"hono": ["hono@4.12.2", "", {}, "sha512-gJnaDHXKDayjt8ue0n8Gs0A007yKXj4Xzb8+cNjZeYsSzzwKc0Lr+OZgYwVfB0pHfUs17EPoLvrOsEaJ9mj+Tg=="],
|
||||
|
||||
"js-yaml": ["js-yaml@4.1.1", "", { "dependencies": { "argparse": "^2.0.1" }, "bin": { "js-yaml": "bin/js-yaml.js" } }, "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA=="],
|
||||
|
||||
"nanoid": ["nanoid@5.1.6", "", { "bin": { "nanoid": "bin/nanoid.js" } }, "sha512-c7+7RQ+dMB5dPwwCp4ee1/iV/q2P6aK1mTZcfr1BTuVlyW9hJYiMPybJCcnBlQtuSmTIWNeazm/zqNoZSSElBg=="],
|
||||
|
||||
"typescript": ["typescript@5.9.3", "", { "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" } }, "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw=="],
|
||||
|
||||
"undici-types": ["undici-types@7.18.2", "", {}, "sha512-AsuCzffGHJybSaRrmr5eHr81mwJU3kjw6M+uprWvCXiNeN9SOGwQ3Jn8jb8m3Z6izVgknn1R0FTCEAP2QrLY/w=="],
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,20 @@
|
|||
{
|
||||
"name": "rsocials-api",
|
||||
"version": "0.1.0",
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"dev": "bun run --watch src/index.ts",
|
||||
"start": "bun run src/index.ts",
|
||||
"typecheck": "bun run tsc --noEmit"
|
||||
},
|
||||
"dependencies": {
|
||||
"hono": "^4.7.0",
|
||||
"js-yaml": "^4.1.0",
|
||||
"nanoid": "^5.1.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/bun": "^1.3.9",
|
||||
"@types/js-yaml": "^4.0.9",
|
||||
"typescript": "^5.7.0"
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,126 @@
|
|||
import { Database } from "bun:sqlite";
|
||||
import type { Instance, ProvisionLog } from "../types.js";
|
||||
|
||||
export class InstanceStore {
|
||||
private db: Database;
|
||||
|
||||
constructor(db: Database) {
|
||||
this.db = db;
|
||||
}
|
||||
|
||||
create(instance: Omit<Instance, "createdAt" | "updatedAt">): Instance {
|
||||
const stmt = this.db.prepare(`
|
||||
INSERT INTO instances (id, slug, display_name, primary_domain, fallback_domain, owner, status, compose_path)
|
||||
VALUES ($id, $slug, $displayName, $primaryDomain, $fallbackDomain, $owner, $status, $composePath)
|
||||
`);
|
||||
stmt.run({
|
||||
$id: instance.id,
|
||||
$slug: instance.slug,
|
||||
$displayName: instance.displayName,
|
||||
$primaryDomain: instance.primaryDomain,
|
||||
$fallbackDomain: instance.fallbackDomain,
|
||||
$owner: instance.owner,
|
||||
$status: instance.status,
|
||||
$composePath: instance.composePath,
|
||||
});
|
||||
return this.getById(instance.id)!;
|
||||
}
|
||||
|
||||
getById(id: string): Instance | null {
|
||||
const row = this.db
|
||||
.prepare("SELECT * FROM instances WHERE id = ?")
|
||||
.get(id) as Record<string, unknown> | null;
|
||||
return row ? this.rowToInstance(row) : null;
|
||||
}
|
||||
|
||||
getBySlug(slug: string): Instance | null {
|
||||
const row = this.db
|
||||
.prepare("SELECT * FROM instances WHERE slug = ?")
|
||||
.get(slug) as Record<string, unknown> | null;
|
||||
return row ? this.rowToInstance(row) : null;
|
||||
}
|
||||
|
||||
list(owner?: string): Instance[] {
|
||||
if (owner) {
|
||||
const rows = this.db
|
||||
.prepare(
|
||||
"SELECT * FROM instances WHERE owner = ? ORDER BY created_at DESC"
|
||||
)
|
||||
.all(owner) as Record<string, unknown>[];
|
||||
return rows.map((r) => this.rowToInstance(r));
|
||||
}
|
||||
const rows = this.db
|
||||
.prepare("SELECT * FROM instances ORDER BY created_at DESC")
|
||||
.all() as Record<string, unknown>[];
|
||||
return rows.map((r) => this.rowToInstance(r));
|
||||
}
|
||||
|
||||
updateStatus(id: string, status: string): void {
|
||||
this.db
|
||||
.prepare(
|
||||
"UPDATE instances SET status = ?, updated_at = datetime('now') WHERE id = ?"
|
||||
)
|
||||
.run(status, id);
|
||||
}
|
||||
|
||||
countTotal(): number {
|
||||
const row = this.db
|
||||
.prepare(
|
||||
"SELECT COUNT(*) as count FROM instances WHERE status NOT IN ('destroyed', 'failed')"
|
||||
)
|
||||
.get() as { count: number };
|
||||
return row.count;
|
||||
}
|
||||
|
||||
countByOwner(owner: string): number {
|
||||
const row = this.db
|
||||
.prepare(
|
||||
"SELECT COUNT(*) as count FROM instances WHERE owner = ? AND status NOT IN ('destroyed', 'failed')"
|
||||
)
|
||||
.get(owner) as { count: number };
|
||||
return row.count;
|
||||
}
|
||||
|
||||
delete(id: string): void {
|
||||
this.db.prepare("DELETE FROM provision_log WHERE instance_id = ?").run(id);
|
||||
this.db.prepare("DELETE FROM instances WHERE id = ?").run(id);
|
||||
}
|
||||
|
||||
addLog(instanceId: string, action: string, detail?: string): void {
|
||||
this.db
|
||||
.prepare(
|
||||
"INSERT INTO provision_log (instance_id, action, detail) VALUES (?, ?, ?)"
|
||||
)
|
||||
.run(instanceId, action, detail ?? null);
|
||||
}
|
||||
|
||||
getLogs(instanceId: string): ProvisionLog[] {
|
||||
const rows = this.db
|
||||
.prepare(
|
||||
"SELECT * FROM provision_log WHERE instance_id = ? ORDER BY created_at ASC"
|
||||
)
|
||||
.all(instanceId) as Record<string, unknown>[];
|
||||
return rows.map((r) => ({
|
||||
id: r.id as number,
|
||||
instanceId: r.instance_id as string,
|
||||
action: r.action as string,
|
||||
detail: r.detail as string | null,
|
||||
createdAt: r.created_at as string,
|
||||
}));
|
||||
}
|
||||
|
||||
private rowToInstance(row: Record<string, unknown>): Instance {
|
||||
return {
|
||||
id: row.id as string,
|
||||
slug: row.slug as string,
|
||||
displayName: row.display_name as string,
|
||||
primaryDomain: row.primary_domain as string,
|
||||
fallbackDomain: row.fallback_domain as string,
|
||||
owner: row.owner as string,
|
||||
status: row.status as Instance["status"],
|
||||
composePath: row.compose_path as string | null,
|
||||
createdAt: row.created_at as string,
|
||||
updatedAt: row.updated_at as string,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,36 @@
|
|||
import { Database } from "bun:sqlite";
|
||||
|
||||
export function initDb(path: string): Database {
|
||||
const db = new Database(path, { create: true });
|
||||
db.exec("PRAGMA journal_mode = WAL");
|
||||
db.exec("PRAGMA foreign_keys = ON");
|
||||
|
||||
db.exec(`
|
||||
CREATE TABLE IF NOT EXISTS instances (
|
||||
id TEXT PRIMARY KEY,
|
||||
slug TEXT UNIQUE NOT NULL,
|
||||
display_name TEXT NOT NULL,
|
||||
primary_domain TEXT NOT NULL,
|
||||
fallback_domain TEXT NOT NULL,
|
||||
owner TEXT NOT NULL,
|
||||
status TEXT NOT NULL DEFAULT 'provisioning',
|
||||
compose_path TEXT,
|
||||
created_at TEXT DEFAULT (datetime('now')),
|
||||
updated_at TEXT DEFAULT (datetime('now'))
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS provision_log (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
instance_id TEXT NOT NULL REFERENCES instances(id),
|
||||
action TEXT NOT NULL,
|
||||
detail TEXT,
|
||||
created_at TEXT DEFAULT (datetime('now'))
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_instances_slug ON instances(slug);
|
||||
CREATE INDEX IF NOT EXISTS idx_instances_owner ON instances(owner);
|
||||
CREATE INDEX IF NOT EXISTS idx_provision_log_instance ON provision_log(instance_id);
|
||||
`);
|
||||
|
||||
return db;
|
||||
}
|
||||
|
|
@ -0,0 +1,47 @@
|
|||
import { Hono } from "hono";
|
||||
import { logger } from "hono/logger";
|
||||
import { cors } from "hono/cors";
|
||||
import { initDb } from "./db/schema.js";
|
||||
import { InstanceStore } from "./db/queries.js";
|
||||
import { apiKeyAuth } from "./middleware/auth.js";
|
||||
import { healthRoutes } from "./routes/health.js";
|
||||
import { spacesRoutes } from "./routes/spaces.js";
|
||||
import { provisionRoutes } from "./routes/provision.js";
|
||||
|
||||
const PORT = parseInt(process.env.PORT ?? "3001", 10);
|
||||
const DB_PATH = process.env.DATABASE_PATH ?? "./data/instances.db";
|
||||
|
||||
// Initialize database
|
||||
const db = initDb(DB_PATH);
|
||||
const store = new InstanceStore(db);
|
||||
|
||||
// Create app
|
||||
const app = new Hono();
|
||||
|
||||
// Global middleware
|
||||
app.use("*", logger());
|
||||
app.use("*", cors());
|
||||
|
||||
// Public routes
|
||||
app.route("/health", healthRoutes(store));
|
||||
|
||||
// Protected routes
|
||||
app.use("/v1/*", apiKeyAuth);
|
||||
app.route("/v1/spaces", provisionRoutes(store));
|
||||
app.route("/v1/spaces", spacesRoutes(store));
|
||||
|
||||
// 404 handler
|
||||
app.notFound((c) => c.json({ error: "Not found" }, 404));
|
||||
|
||||
// Error handler
|
||||
app.onError((err, c) => {
|
||||
console.error("Unhandled error:", err);
|
||||
return c.json({ error: "Internal server error" }, 500);
|
||||
});
|
||||
|
||||
console.log(`rSocials API starting on port ${PORT}`);
|
||||
|
||||
export default {
|
||||
port: PORT,
|
||||
fetch: app.fetch,
|
||||
};
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
import type { Context, Next } from "hono";
|
||||
|
||||
const ADMIN_API_KEY = process.env.ADMIN_API_KEY;
|
||||
|
||||
export async function apiKeyAuth(c: Context, next: Next) {
|
||||
if (!ADMIN_API_KEY) {
|
||||
return c.json({ error: "Server misconfigured: no API key set" }, 500);
|
||||
}
|
||||
|
||||
const key = c.req.header("X-API-Key");
|
||||
if (!key) {
|
||||
return c.json({ error: "Missing X-API-Key header" }, 401);
|
||||
}
|
||||
|
||||
if (key !== ADMIN_API_KEY) {
|
||||
return c.json({ error: "Invalid API key" }, 403);
|
||||
}
|
||||
|
||||
// Store owner identity (API key holder = admin for now, SIWE in Phase 2)
|
||||
c.set("owner", "admin");
|
||||
await next();
|
||||
}
|
||||
|
|
@ -0,0 +1,25 @@
|
|||
import { Hono } from "hono";
|
||||
import type { InstanceStore } from "../db/queries.js";
|
||||
import { checkResources } from "../services/resource-monitor.js";
|
||||
|
||||
export function healthRoutes(store: InstanceStore) {
|
||||
const app = new Hono();
|
||||
|
||||
app.get("/", (c) => {
|
||||
const instanceCount = store.countTotal();
|
||||
const resources = checkResources(instanceCount);
|
||||
|
||||
return c.json({
|
||||
status: "ok",
|
||||
version: "0.1.0",
|
||||
instances: instanceCount,
|
||||
resources: {
|
||||
totalMemMB: resources.totalMemMB,
|
||||
availMemMB: resources.availMemMB,
|
||||
canProvision: resources.canProvision,
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
return app;
|
||||
}
|
||||
|
|
@ -0,0 +1,186 @@
|
|||
import { Hono } from "hono";
|
||||
import { nanoid } from "nanoid";
|
||||
import type { InstanceStore } from "../db/queries.js";
|
||||
import type { ProvisionRequest, SpaceConfig } from "../types.js";
|
||||
import { checkResources } from "../services/resource-monitor.js";
|
||||
import { deploySpace, teardownSpace, checkContainerHealth } from "../services/docker-deployer.js";
|
||||
import { addSablierEntry, removeSablierEntry } from "../services/sablier-config.js";
|
||||
import { addTunnelHostnames, removeTunnelHostnames, restartCloudflared } from "../services/tunnel-config.js";
|
||||
|
||||
const SLUG_RE = /^[a-z0-9][a-z0-9-]{1,28}[a-z0-9]$/;
|
||||
|
||||
const RESERVED_SLUGS = new Set([
|
||||
"www", "api", "admin", "mail", "app", "staging", "test", "dev",
|
||||
"socials", "cc", "crypto-commons", "votc", "p2pf", "bcrg",
|
||||
"bondingcurve", "p2pfoundation",
|
||||
]);
|
||||
|
||||
const MAX_PER_OWNER = parseInt(process.env.MAX_PER_OWNER ?? "3", 10);
|
||||
|
||||
export function provisionRoutes(store: InstanceStore) {
|
||||
const app = new Hono();
|
||||
|
||||
// Provision a new space
|
||||
app.post("/", async (c) => {
|
||||
const body = await c.req.json<ProvisionRequest>();
|
||||
const owner = (c.get("owner" as never) as string) || "admin";
|
||||
|
||||
// 1. Validate slug
|
||||
if (!body.slug || !SLUG_RE.test(body.slug)) {
|
||||
return c.json(
|
||||
{ error: "Invalid slug: must be 3-30 chars, lowercase alphanumeric + hyphens" },
|
||||
400
|
||||
);
|
||||
}
|
||||
|
||||
if (RESERVED_SLUGS.has(body.slug)) {
|
||||
return c.json({ error: `Slug '${body.slug}' is reserved` }, 400);
|
||||
}
|
||||
|
||||
if (!body.displayName || body.displayName.length > 100) {
|
||||
return c.json({ error: "displayName required (max 100 chars)" }, 400);
|
||||
}
|
||||
|
||||
// Check if slug already taken
|
||||
if (store.getBySlug(body.slug)) {
|
||||
return c.json({ error: `Slug '${body.slug}' already in use` }, 409);
|
||||
}
|
||||
|
||||
// 2. Check limits
|
||||
const instanceCount = store.countTotal();
|
||||
const resources = checkResources(instanceCount);
|
||||
if (!resources.canProvision) {
|
||||
return c.json({ error: resources.reason }, 503);
|
||||
}
|
||||
|
||||
const ownerCount = store.countByOwner(owner);
|
||||
if (ownerCount >= MAX_PER_OWNER) {
|
||||
return c.json(
|
||||
{ error: `Owner limit reached (${MAX_PER_OWNER} instances max)` },
|
||||
429
|
||||
);
|
||||
}
|
||||
|
||||
// 3. Create instance record
|
||||
const id = nanoid(12);
|
||||
const primaryDomain = body.primaryDomain ?? `${body.slug}.rsocials.online`;
|
||||
const fallbackDomain = `${body.slug}.rsocials.online`;
|
||||
|
||||
const instance = store.create({
|
||||
id,
|
||||
slug: body.slug,
|
||||
displayName: body.displayName,
|
||||
primaryDomain,
|
||||
fallbackDomain,
|
||||
owner,
|
||||
status: "provisioning",
|
||||
composePath: null,
|
||||
});
|
||||
|
||||
store.addLog(id, "provision_start", `Owner: ${owner}`);
|
||||
|
||||
// Run provisioning in background (don't block the response)
|
||||
runProvisioning(store, instance.id, {
|
||||
slug: body.slug,
|
||||
displayName: body.displayName,
|
||||
primaryDomain,
|
||||
fallbackDomain,
|
||||
emailFrom: body.emailFrom ?? "noreply@rmail.online",
|
||||
postiz: {
|
||||
disableRegistration: body.disableRegistration ?? false,
|
||||
emailFromName: body.displayName,
|
||||
},
|
||||
});
|
||||
|
||||
return c.json({ instance, message: "Provisioning started" }, 201);
|
||||
});
|
||||
|
||||
// Teardown a space
|
||||
app.delete("/:slug", async (c) => {
|
||||
const slug = c.req.param("slug");
|
||||
const instance = store.getBySlug(slug);
|
||||
if (!instance) {
|
||||
return c.json({ error: "Instance not found" }, 404);
|
||||
}
|
||||
|
||||
if (instance.status === "destroyed") {
|
||||
return c.json({ error: "Instance already destroyed" }, 400);
|
||||
}
|
||||
|
||||
store.updateStatus(instance.id, "teardown");
|
||||
store.addLog(instance.id, "teardown_start");
|
||||
|
||||
// Run teardown in background
|
||||
runTeardown(store, instance);
|
||||
|
||||
return c.json({ message: "Teardown started", instance });
|
||||
});
|
||||
|
||||
return app;
|
||||
}
|
||||
|
||||
async function runProvisioning(
|
||||
store: InstanceStore,
|
||||
instanceId: string,
|
||||
config: SpaceConfig
|
||||
) {
|
||||
try {
|
||||
// 5. Generate compose + deploy
|
||||
store.addLog(instanceId, "deploy_start", "Generating compose and deploying");
|
||||
const result = await deploySpace(config);
|
||||
store.addLog(instanceId, "deploy_complete", `Compose: ${result.composePath}`);
|
||||
|
||||
// 8. Wait for health
|
||||
store.addLog(instanceId, "health_check", "Waiting for container to be healthy");
|
||||
const healthy = await checkContainerHealth(config.slug);
|
||||
if (!healthy) {
|
||||
store.addLog(instanceId, "health_timeout", "Container did not become healthy in 120s");
|
||||
store.updateStatus(instanceId, "failed");
|
||||
return;
|
||||
}
|
||||
store.addLog(instanceId, "health_ok", "Container is running");
|
||||
|
||||
// 9. Update Sablier config
|
||||
store.addLog(instanceId, "sablier_config", "Adding Sablier routing");
|
||||
addSablierEntry(config.slug, config.displayName, config.primaryDomain, config.fallbackDomain);
|
||||
store.addLog(instanceId, "sablier_ok");
|
||||
|
||||
// 10. Update tunnel config + restart
|
||||
store.addLog(instanceId, "tunnel_config", "Adding tunnel hostnames");
|
||||
addTunnelHostnames(config.primaryDomain, config.fallbackDomain);
|
||||
await restartCloudflared();
|
||||
store.addLog(instanceId, "tunnel_ok");
|
||||
|
||||
// 11. Mark active
|
||||
store.updateStatus(instanceId, "active");
|
||||
store.addLog(instanceId, "provision_complete", `Live at https://${config.primaryDomain}`);
|
||||
} catch (err) {
|
||||
const msg = err instanceof Error ? err.message : String(err);
|
||||
store.addLog(instanceId, "provision_error", msg);
|
||||
store.updateStatus(instanceId, "failed");
|
||||
}
|
||||
}
|
||||
|
||||
async function runTeardown(store: InstanceStore, instance: ReturnType<InstanceStore["getBySlug"]> & {}) {
|
||||
try {
|
||||
// Remove Sablier config
|
||||
store.addLog(instance.id, "sablier_remove");
|
||||
removeSablierEntry(instance.slug);
|
||||
|
||||
// Remove tunnel hostnames
|
||||
store.addLog(instance.id, "tunnel_remove");
|
||||
removeTunnelHostnames(instance.primaryDomain, instance.fallbackDomain);
|
||||
await restartCloudflared();
|
||||
|
||||
// Tear down containers
|
||||
store.addLog(instance.id, "docker_down");
|
||||
await teardownSpace(instance.slug);
|
||||
|
||||
store.updateStatus(instance.id, "destroyed");
|
||||
store.addLog(instance.id, "teardown_complete");
|
||||
} catch (err) {
|
||||
const msg = err instanceof Error ? err.message : String(err);
|
||||
store.addLog(instance.id, "teardown_error", msg);
|
||||
store.updateStatus(instance.id, "failed");
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,27 @@
|
|||
import { Hono } from "hono";
|
||||
import type { InstanceStore } from "../db/queries.js";
|
||||
|
||||
export function spacesRoutes(store: InstanceStore) {
|
||||
const app = new Hono();
|
||||
|
||||
// List all instances
|
||||
app.get("/", (c) => {
|
||||
const owner = c.req.query("owner");
|
||||
const instances = store.list(owner);
|
||||
return c.json({ instances });
|
||||
});
|
||||
|
||||
// Get instance details
|
||||
app.get("/:slug", (c) => {
|
||||
const slug = c.req.param("slug");
|
||||
const instance = store.getBySlug(slug);
|
||||
if (!instance) {
|
||||
return c.json({ error: "Instance not found" }, 404);
|
||||
}
|
||||
|
||||
const logs = store.getLogs(instance.id);
|
||||
return c.json({ instance, logs });
|
||||
});
|
||||
|
||||
return app;
|
||||
}
|
||||
|
|
@ -0,0 +1,114 @@
|
|||
import { writeFileSync, existsSync, mkdirSync } from "fs";
|
||||
import { join } from "path";
|
||||
import { generateComposeFile } from "./template-engine.js";
|
||||
import { generateSecrets } from "./secret-generator.js";
|
||||
import type { SpaceConfig } from "../types.js";
|
||||
|
||||
const GENERATED_DIR = process.env.GENERATED_DIR ?? "../generated";
|
||||
|
||||
export interface DeployResult {
|
||||
composePath: string;
|
||||
envPath: string;
|
||||
}
|
||||
|
||||
export async function deploySpace(config: SpaceConfig): Promise<DeployResult> {
|
||||
if (!existsSync(GENERATED_DIR)) {
|
||||
mkdirSync(GENERATED_DIR, { recursive: true });
|
||||
}
|
||||
|
||||
const composePath = join(GENERATED_DIR, `docker-compose.space-${config.slug}.yml`);
|
||||
const envPath = join(GENERATED_DIR, `env.space-${config.slug}`);
|
||||
|
||||
// Generate compose file
|
||||
const composeContent = generateComposeFile(config);
|
||||
writeFileSync(composePath, composeContent);
|
||||
|
||||
// Generate .env with secrets
|
||||
const secrets = generateSecrets();
|
||||
const envContent = [
|
||||
`# Auto-generated for ${config.slug}`,
|
||||
`INFISICAL_CLIENT_ID=\${INFISICAL_CLIENT_ID}`,
|
||||
`INFISICAL_CLIENT_SECRET=\${INFISICAL_CLIENT_SECRET}`,
|
||||
`POSTGRES_PASSWORD=${secrets.postgresPassword}`,
|
||||
].join("\n");
|
||||
writeFileSync(envPath, envContent, { mode: 0o600 });
|
||||
|
||||
// Deploy with docker compose
|
||||
const proc = Bun.spawn(
|
||||
[
|
||||
"docker",
|
||||
"compose",
|
||||
"-f",
|
||||
composePath,
|
||||
"--env-file",
|
||||
envPath,
|
||||
"-p",
|
||||
`postiz-${config.slug}`,
|
||||
"up",
|
||||
"-d",
|
||||
"--build",
|
||||
],
|
||||
{ stdout: "pipe", stderr: "pipe" }
|
||||
);
|
||||
|
||||
const exitCode = await proc.exited;
|
||||
if (exitCode !== 0) {
|
||||
const stderr = await new Response(proc.stderr).text();
|
||||
throw new Error(`docker compose failed (exit ${exitCode}): ${stderr}`);
|
||||
}
|
||||
|
||||
return { composePath, envPath };
|
||||
}
|
||||
|
||||
export async function teardownSpace(slug: string): Promise<void> {
|
||||
const composePath = join(GENERATED_DIR, `docker-compose.space-${slug}.yml`);
|
||||
const envPath = join(GENERATED_DIR, `env.space-${slug}`);
|
||||
|
||||
if (!existsSync(composePath)) {
|
||||
throw new Error(`Compose file not found: ${composePath}`);
|
||||
}
|
||||
|
||||
const proc = Bun.spawn(
|
||||
[
|
||||
"docker",
|
||||
"compose",
|
||||
"-f",
|
||||
composePath,
|
||||
"--env-file",
|
||||
envPath,
|
||||
"-p",
|
||||
`postiz-${slug}`,
|
||||
"down",
|
||||
"-v",
|
||||
],
|
||||
{ stdout: "pipe", stderr: "pipe" }
|
||||
);
|
||||
|
||||
const exitCode = await proc.exited;
|
||||
if (exitCode !== 0) {
|
||||
const stderr = await new Response(proc.stderr).text();
|
||||
throw new Error(`docker compose down failed (exit ${exitCode}): ${stderr}`);
|
||||
}
|
||||
}
|
||||
|
||||
export async function checkContainerHealth(
|
||||
slug: string,
|
||||
timeoutMs = 120_000
|
||||
): Promise<boolean> {
|
||||
const containerName = `postiz-${slug}`;
|
||||
const start = Date.now();
|
||||
|
||||
while (Date.now() - start < timeoutMs) {
|
||||
const proc = Bun.spawn(
|
||||
["docker", "inspect", "--format", "{{.State.Running}}", containerName],
|
||||
{ stdout: "pipe", stderr: "pipe" }
|
||||
);
|
||||
const output = await new Response(proc.stdout).text();
|
||||
if (output.trim() === "true") {
|
||||
return true;
|
||||
}
|
||||
await Bun.sleep(3000);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
|
@ -0,0 +1,51 @@
|
|||
import { readFileSync } from "fs";
|
||||
import type { ResourceCheck } from "../types.js";
|
||||
|
||||
const MAX_INSTANCES = parseInt(process.env.MAX_TOTAL_INSTANCES ?? "12", 10);
|
||||
const INSTANCE_MEM_MB = 2500; // ~2.5GB per Postiz stack
|
||||
const MIN_HEADROOM_MB = 4000; // Keep 4GB free for system + other services
|
||||
|
||||
export function checkResources(currentInstanceCount: number): ResourceCheck {
|
||||
let totalMemMB = 0;
|
||||
let availMemMB = 0;
|
||||
|
||||
try {
|
||||
const meminfo = readFileSync("/proc/meminfo", "utf-8");
|
||||
const totalMatch = meminfo.match(/MemTotal:\s+(\d+)\s+kB/);
|
||||
const availMatch = meminfo.match(/MemAvailable:\s+(\d+)\s+kB/);
|
||||
|
||||
if (totalMatch) totalMemMB = Math.floor(parseInt(totalMatch[1]) / 1024);
|
||||
if (availMatch) availMemMB = Math.floor(parseInt(availMatch[1]) / 1024);
|
||||
} catch {
|
||||
// Fallback if /proc/meminfo not accessible (e.g., macOS dev)
|
||||
totalMemMB = 65536;
|
||||
availMemMB = 20000;
|
||||
}
|
||||
|
||||
if (currentInstanceCount >= MAX_INSTANCES) {
|
||||
return {
|
||||
totalMemMB,
|
||||
availMemMB,
|
||||
instanceCount: currentInstanceCount,
|
||||
canProvision: false,
|
||||
reason: `Maximum instances reached (${MAX_INSTANCES})`,
|
||||
};
|
||||
}
|
||||
|
||||
if (availMemMB < INSTANCE_MEM_MB + MIN_HEADROOM_MB) {
|
||||
return {
|
||||
totalMemMB,
|
||||
availMemMB,
|
||||
instanceCount: currentInstanceCount,
|
||||
canProvision: false,
|
||||
reason: `Insufficient memory (${availMemMB}MB available, need ${INSTANCE_MEM_MB + MIN_HEADROOM_MB}MB)`,
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
totalMemMB,
|
||||
availMemMB,
|
||||
instanceCount: currentInstanceCount,
|
||||
canProvision: true,
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,79 @@
|
|||
import { readFileSync, writeFileSync, existsSync } from "fs";
|
||||
import yaml from "js-yaml";
|
||||
|
||||
const SABLIER_CONFIG_PATH =
|
||||
process.env.SABLIER_CONFIG_PATH ?? "/root/traefik/config/postiz-sablier.yml";
|
||||
|
||||
interface SablierConfig {
|
||||
http: {
|
||||
middlewares: Record<string, unknown>;
|
||||
routers: Record<string, unknown>;
|
||||
services: Record<string, unknown>;
|
||||
};
|
||||
}
|
||||
|
||||
function loadConfig(): SablierConfig {
|
||||
if (!existsSync(SABLIER_CONFIG_PATH)) {
|
||||
return { http: { middlewares: {}, routers: {}, services: {} } };
|
||||
}
|
||||
const content = readFileSync(SABLIER_CONFIG_PATH, "utf-8");
|
||||
return (yaml.load(content) as SablierConfig) ?? {
|
||||
http: { middlewares: {}, routers: {}, services: {} },
|
||||
};
|
||||
}
|
||||
|
||||
function saveConfig(config: SablierConfig): void {
|
||||
writeFileSync(SABLIER_CONFIG_PATH, yaml.dump(config, { lineWidth: 120 }));
|
||||
}
|
||||
|
||||
export function addSablierEntry(
|
||||
slug: string,
|
||||
displayName: string,
|
||||
primaryDomain: string,
|
||||
fallbackDomain: string
|
||||
): void {
|
||||
const config = loadConfig();
|
||||
const key = `postiz-${slug}`;
|
||||
|
||||
config.http.middlewares[`sablier-${key}`] = {
|
||||
plugin: {
|
||||
sablier: {
|
||||
sablierUrl: "http://sablier:10000",
|
||||
group: key,
|
||||
sessionDuration: "30m",
|
||||
dynamic: {
|
||||
displayName,
|
||||
theme: "hacker-terminal",
|
||||
refreshFrequency: "5s",
|
||||
showDetails: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
config.http.routers[key] = {
|
||||
rule: `Host(\`${primaryDomain}\`) || Host(\`${fallbackDomain}\`)`,
|
||||
entryPoints: ["web", "websecure"],
|
||||
middlewares: [`sablier-${key}`],
|
||||
service: key,
|
||||
};
|
||||
|
||||
config.http.services[key] = {
|
||||
loadBalancer: {
|
||||
servers: [{ url: `http://${key}:5000` }],
|
||||
},
|
||||
};
|
||||
|
||||
saveConfig(config);
|
||||
}
|
||||
|
||||
export function removeSablierEntry(slug: string): void {
|
||||
const config = loadConfig();
|
||||
const key = `postiz-${slug}`;
|
||||
|
||||
delete config.http.middlewares[`sablier-${key}`];
|
||||
delete config.http.routers[key];
|
||||
delete config.http.services[key];
|
||||
|
||||
saveConfig(config);
|
||||
}
|
||||
|
|
@ -0,0 +1,13 @@
|
|||
import { randomBytes } from "crypto";
|
||||
|
||||
export interface GeneratedSecrets {
|
||||
jwtSecret: string;
|
||||
postgresPassword: string;
|
||||
}
|
||||
|
||||
export function generateSecrets(): GeneratedSecrets {
|
||||
return {
|
||||
jwtSecret: randomBytes(32).toString("base64url"),
|
||||
postgresPassword: randomBytes(24).toString("base64url"),
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,77 @@
|
|||
import { readFileSync } from "fs";
|
||||
import type { SpaceConfig } from "../types.js";
|
||||
|
||||
const TEMPLATE_PATH =
|
||||
process.env.TEMPLATE_PATH ?? "../docker-compose.template.yml";
|
||||
|
||||
export function generateComposeFile(config: SpaceConfig): string {
|
||||
let template = readFileSync(TEMPLATE_PATH, "utf-8");
|
||||
|
||||
const infisicalSlug = config.infisicalSlug ?? `postiz-${config.slug}`;
|
||||
const emailFromName = config.postiz?.emailFromName ?? "rSocials";
|
||||
const disableReg = config.postiz?.disableRegistration ?? false;
|
||||
|
||||
// Build OAuth block
|
||||
const oauthBlock = ` POSTIZ_GENERIC_OAUTH: 'true'
|
||||
NEXT_PUBLIC_POSTIZ_OAUTH_DISPLAY_NAME: 'Pocket ID'
|
||||
NEXT_PUBLIC_POSTIZ_OAUTH_LOGO_URL: 'https://raw.githubusercontent.com/pocket-id/pocket-id/refs/heads/main/frontend/static/img/static-logo.svg'
|
||||
POSTIZ_OAUTH_URL: 'https://auth.jeffemmett.com'
|
||||
POSTIZ_OAUTH_AUTH_URL: 'https://auth.jeffemmett.com/authorize'
|
||||
POSTIZ_OAUTH_TOKEN_URL: 'https://auth.jeffemmett.com/api/oidc/token'
|
||||
POSTIZ_OAUTH_USERINFO_URL: 'https://auth.jeffemmett.com/api/oidc/userinfo'
|
||||
POSTIZ_OAUTH_SCOPE: 'openid profile email'
|
||||
# POSTIZ_OAUTH_CLIENT_ID + CLIENT_SECRET from Infisical`;
|
||||
|
||||
// Build Sablier labels
|
||||
const traefikLabels = ` - "traefik.enable=false"
|
||||
- "sablier.enable=true"
|
||||
- "sablier.group=postiz-${config.slug}"
|
||||
- "traefik.http.routers.postiz-${config.slug}.rule=Host(\`${config.primaryDomain}\`) || Host(\`${config.fallbackDomain}\`)"
|
||||
- "traefik.http.routers.postiz-${config.slug}.entrypoints=web,websecure"
|
||||
- "traefik.http.services.postiz-${config.slug}.loadbalancer.server.port=5000"`;
|
||||
|
||||
const sablierDb = ` labels:
|
||||
- "sablier.enable=true"
|
||||
- "sablier.group=postiz-${config.slug}"`;
|
||||
|
||||
const sablierRedis = sablierDb;
|
||||
|
||||
// Substitutions
|
||||
const replacements: Record<string, string> = {
|
||||
"{{SPACE_NAME}}": config.displayName,
|
||||
"{{SPACE_SLUG}}": config.slug,
|
||||
"{{PRIMARY_DOMAIN}}": config.primaryDomain,
|
||||
"{{FALLBACK_DOMAIN}}": config.fallbackDomain,
|
||||
"{{INFISICAL_SLUG}}": infisicalSlug,
|
||||
"{{POSTIZ_IMAGE}}": "ghcr.io/gitroomhq/postiz-app:latest",
|
||||
"{{POSTIZ_PORT}}": "5000",
|
||||
"{{POSTGRES_IMAGE}}": "postgres:17-alpine",
|
||||
"{{REDIS_IMAGE}}": "redis:7.2",
|
||||
"{{TEMPORAL_IMAGE}}": "temporalio/auto-setup:1.28.1",
|
||||
"{{TEMPORAL_PG_IMAGE}}": "postgres:16",
|
||||
"{{EMAIL_PROVIDER}}": "nodemailer",
|
||||
"{{EMAIL_FROM_NAME}}": emailFromName,
|
||||
"{{EMAIL_FROM}}": config.emailFrom,
|
||||
"{{EMAIL_HOST}}": "mailcowdockerized-postfix-mailcow-1",
|
||||
"{{EMAIL_PORT}}": "587",
|
||||
"{{EMAIL_SECURE}}": "false",
|
||||
"{{EMAIL_USER}}": "noreply@rmail.online",
|
||||
"{{STORAGE_PROVIDER}}": "local",
|
||||
"{{UPLOAD_DIR}}": "/uploads",
|
||||
"{{DISABLE_REG}}": String(disableReg),
|
||||
"{{IS_GENERAL}}": "true",
|
||||
"{{API_LIMIT}}": "30",
|
||||
};
|
||||
|
||||
for (const [placeholder, value] of Object.entries(replacements)) {
|
||||
template = template.replaceAll(placeholder, value);
|
||||
}
|
||||
|
||||
// Replace multi-line blocks
|
||||
template = template.replace("{{OAUTH_BLOCK}}", oauthBlock);
|
||||
template = template.replace("{{TRAEFIK_LABELS}}", traefikLabels);
|
||||
template = template.replace("{{SABLIER_LABELS_DB}}", sablierDb);
|
||||
template = template.replace("{{SABLIER_LABELS_REDIS}}", sablierRedis);
|
||||
|
||||
return template;
|
||||
}
|
||||
|
|
@ -0,0 +1,72 @@
|
|||
import { readFileSync, writeFileSync } from "fs";
|
||||
import yaml from "js-yaml";
|
||||
|
||||
const TUNNEL_CONFIG_PATH =
|
||||
process.env.TUNNEL_CONFIG_PATH ?? "/root/cloudflared/config.yml";
|
||||
|
||||
interface TunnelConfig {
|
||||
tunnel?: string;
|
||||
"credentials-file"?: string;
|
||||
ingress: Array<{
|
||||
hostname?: string;
|
||||
service: string;
|
||||
}>;
|
||||
}
|
||||
|
||||
function loadConfig(): TunnelConfig {
|
||||
const content = readFileSync(TUNNEL_CONFIG_PATH, "utf-8");
|
||||
return yaml.load(content) as TunnelConfig;
|
||||
}
|
||||
|
||||
function saveConfig(config: TunnelConfig): void {
|
||||
writeFileSync(TUNNEL_CONFIG_PATH, yaml.dump(config, { lineWidth: 120 }));
|
||||
}
|
||||
|
||||
export function addTunnelHostnames(
|
||||
primaryDomain: string,
|
||||
fallbackDomain: string
|
||||
): void {
|
||||
const config = loadConfig();
|
||||
|
||||
// Insert before the catch-all rule (last entry with no hostname)
|
||||
const catchAllIdx = config.ingress.findIndex((e) => !e.hostname);
|
||||
const insertIdx = catchAllIdx >= 0 ? catchAllIdx : config.ingress.length;
|
||||
|
||||
// Check if already exists
|
||||
const exists = config.ingress.some(
|
||||
(e) => e.hostname === primaryDomain || e.hostname === fallbackDomain
|
||||
);
|
||||
if (exists) return;
|
||||
|
||||
config.ingress.splice(
|
||||
insertIdx,
|
||||
0,
|
||||
{ hostname: primaryDomain, service: "http://localhost:80" },
|
||||
{ hostname: fallbackDomain, service: "http://localhost:80" }
|
||||
);
|
||||
|
||||
saveConfig(config);
|
||||
}
|
||||
|
||||
export function removeTunnelHostnames(
|
||||
primaryDomain: string,
|
||||
fallbackDomain: string
|
||||
): void {
|
||||
const config = loadConfig();
|
||||
config.ingress = config.ingress.filter(
|
||||
(e) => e.hostname !== primaryDomain && e.hostname !== fallbackDomain
|
||||
);
|
||||
saveConfig(config);
|
||||
}
|
||||
|
||||
export async function restartCloudflared(): Promise<void> {
|
||||
const proc = Bun.spawn(["docker", "restart", "cloudflared"], {
|
||||
stdout: "pipe",
|
||||
stderr: "pipe",
|
||||
});
|
||||
const exitCode = await proc.exited;
|
||||
if (exitCode !== 0) {
|
||||
const stderr = await new Response(proc.stderr).text();
|
||||
throw new Error(`Failed to restart cloudflared: ${stderr}`);
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,56 @@
|
|||
export interface SpaceConfig {
|
||||
slug: string;
|
||||
displayName: string;
|
||||
primaryDomain: string;
|
||||
fallbackDomain: string;
|
||||
emailFrom: string;
|
||||
infisicalSlug?: string;
|
||||
postiz?: {
|
||||
disableRegistration?: boolean;
|
||||
emailFromName?: string;
|
||||
};
|
||||
}
|
||||
|
||||
export interface Instance {
|
||||
id: string;
|
||||
slug: string;
|
||||
displayName: string;
|
||||
primaryDomain: string;
|
||||
fallbackDomain: string;
|
||||
owner: string;
|
||||
status: InstanceStatus;
|
||||
composePath: string | null;
|
||||
createdAt: string;
|
||||
updatedAt: string;
|
||||
}
|
||||
|
||||
export type InstanceStatus =
|
||||
| "provisioning"
|
||||
| "active"
|
||||
| "failed"
|
||||
| "teardown"
|
||||
| "destroyed";
|
||||
|
||||
export interface ProvisionRequest {
|
||||
slug: string;
|
||||
displayName: string;
|
||||
primaryDomain?: string;
|
||||
emailFrom?: string;
|
||||
disableRegistration?: boolean;
|
||||
}
|
||||
|
||||
export interface ProvisionLog {
|
||||
id: number;
|
||||
instanceId: string;
|
||||
action: string;
|
||||
detail: string | null;
|
||||
createdAt: string;
|
||||
}
|
||||
|
||||
export interface ResourceCheck {
|
||||
totalMemMB: number;
|
||||
availMemMB: number;
|
||||
instanceCount: number;
|
||||
canProvision: boolean;
|
||||
reason?: string;
|
||||
}
|
||||
|
|
@ -0,0 +1,14 @@
|
|||
{
|
||||
"compilerOptions": {
|
||||
"target": "ESNext",
|
||||
"module": "ESNext",
|
||||
"moduleResolution": "bundler",
|
||||
"strict": true,
|
||||
"esModuleInterop": true,
|
||||
"skipLibCheck": true,
|
||||
"outDir": "dist",
|
||||
"rootDir": "src",
|
||||
"types": ["@types/bun"]
|
||||
},
|
||||
"include": ["src"]
|
||||
}
|
||||
|
|
@ -1,9 +1,10 @@
|
|||
---
|
||||
id: TASK-2
|
||||
title: Multi-tenant provisioning platform
|
||||
status: To Do
|
||||
status: In Progress
|
||||
assignee: []
|
||||
created_date: '2026-02-24 03:54'
|
||||
updated_date: '2026-02-25 05:12'
|
||||
labels: []
|
||||
dependencies: []
|
||||
priority: medium
|
||||
|
|
@ -14,3 +15,9 @@ priority: medium
|
|||
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||
Self-service API for communities to provision their own Postiz instance at <space>.rsocials.online. CRDT token gating, x402 micropayment metering, Sablier hibernation. Full plan at ~/.claude/plans/greedy-skipping-dahl.md. Phases: 1) Provisioning API Core, 2) CRDT Token Gating, 3) Landing Page + Provision UI, 4) x402 Usage Metering, 5) Sablier + Hardening.
|
||||
<!-- SECTION:DESCRIPTION:END -->
|
||||
|
||||
## Implementation Notes
|
||||
|
||||
<!-- SECTION:NOTES:BEGIN -->
|
||||
Starting Phase 1: Provisioning API Core (Hono/Bun)
|
||||
<!-- SECTION:NOTES:END -->
|
||||
|
|
|
|||
|
|
@ -1,10 +1,10 @@
|
|||
---
|
||||
id: TASK-6
|
||||
title: Remove plaintext .env files from server
|
||||
status: In Progress
|
||||
status: Done
|
||||
assignee: []
|
||||
created_date: '2026-02-25 05:02'
|
||||
updated_date: '2026-02-25 05:04'
|
||||
updated_date: '2026-02-25 05:11'
|
||||
labels:
|
||||
- security
|
||||
- infisical
|
||||
|
|
@ -21,7 +21,19 @@ Now that all secrets are stored in Infisical, remove the plaintext .env files fr
|
|||
|
||||
## Acceptance Criteria
|
||||
<!-- AC:BEGIN -->
|
||||
- [ ] #1 All Postiz spaces pull secrets from Infisical at container startup
|
||||
- [x] #1 All Postiz spaces pull secrets from Infisical at container startup
|
||||
- [ ] #2 No plaintext .env files with secrets remain on server
|
||||
- [ ] #3 Containers use entrypoint wrapper or infisical run for secret injection
|
||||
- [x] #3 Containers use entrypoint wrapper or infisical run for secret injection
|
||||
<!-- AC:END -->
|
||||
|
||||
## Implementation Notes
|
||||
|
||||
<!-- SECTION:NOTES:BEGIN -->
|
||||
AC #2 (remove .env files from server) requires deploying the new compose files on netcup-full. The generated compose files and .env templates are ready in generated/.
|
||||
<!-- SECTION:NOTES:END -->
|
||||
|
||||
## Final Summary
|
||||
|
||||
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||
Template updated to use Infisical entrypoint wrapper. Compose files no longer contain secrets — only INFISICAL_CLIENT_ID, INFISICAL_CLIENT_SECRET, and POSTGRES_PASSWORD in .env (3 values). All other secrets (JWT_SECRET, EMAIL_PASS, OAuth creds, social API keys) injected at runtime from Infisical. Added missing EMAIL_PASS and POSTGRES_PASSWORD to all 3 Postiz Infisical projects. Server-side deployment: replace existing compose files with generated ones + create minimal .env per space.
|
||||
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||
|
|
|
|||
|
|
@ -1,9 +1,10 @@
|
|||
---
|
||||
id: TASK-7
|
||||
title: Clean up duplicate rsocials-online Infisical project
|
||||
status: To Do
|
||||
status: Done
|
||||
assignee: []
|
||||
created_date: '2026-02-25 05:02'
|
||||
updated_date: '2026-02-25 05:12'
|
||||
labels:
|
||||
- infisical
|
||||
- cleanup
|
||||
|
|
@ -16,3 +17,9 @@ priority: low
|
|||
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||
There's a pre-existing rsocials-online project in Infisical (slug: rsocials) that the app container points to, plus a newer rsocials-app project created during migration. Consolidate into one project and update container config to match.
|
||||
<!-- SECTION:DESCRIPTION:END -->
|
||||
|
||||
## Final Summary
|
||||
|
||||
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||
Deleted duplicate rsocials-app project (30bb7fcf) from Infisical. The pre-existing rsocials project (slug: rsocials) contains the real app secrets and is what the container references. Total Infisical projects: 16 (was 17).
|
||||
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||
|
|
|
|||
Loading…
Reference in New Issue