Merge remote-tracking branch 'origin/main'
CI/CD / deploy (push) Failing after 12s Details

# Conflicts:
#	backlog/config.yml
This commit is contained in:
Jeff Emmett 2026-04-01 10:27:51 -07:00
commit 517da4da14
17 changed files with 846 additions and 26 deletions

View File

@ -1,10 +1,13 @@
# Backlog Aggregator Dockerfile # Backlog Aggregator Dockerfile with Gitea Scanner
# Multi-project real-time task aggregation server # Multi-project real-time task aggregation server
FROM oven/bun:1 AS base FROM oven/bun:1 AS base
WORKDIR /app WORKDIR /app
# Install dependencies # Install dependencies including git and cron for Gitea scanning
RUN apt-get update && apt-get install -y git cron openssh-client && rm -rf /var/lib/apt/lists/*
# Install npm dependencies
FROM base AS install FROM base AS install
RUN mkdir -p /temp/dev RUN mkdir -p /temp/dev
COPY package.json bun.lock* bunfig.toml /temp/dev/ COPY package.json bun.lock* bunfig.toml /temp/dev/
@ -18,6 +21,17 @@ COPY . .
# Build CSS (needed for components) # Build CSS (needed for components)
RUN bun run build:css || true RUN bun run build:css || true
# Make entrypoint executable
RUN chmod +x /app/entrypoint.sh || true
# Create cron job for daily Gitea sync (runs at 2 AM and 2 PM)
RUN echo "0 2,14 * * * cd /app && bun run src/aggregator/gitea-scanner.ts --verbose >> /var/log/gitea-scanner.log 2>&1" > /etc/cron.d/gitea-scanner \
&& chmod 0644 /etc/cron.d/gitea-scanner \
&& crontab /etc/cron.d/gitea-scanner
# Create log file
RUN touch /var/log/gitea-scanner.log
# Expose port # Expose port
EXPOSE 6420 EXPOSE 6420
@ -25,5 +39,5 @@ EXPOSE 6420
ENV NODE_ENV=production ENV NODE_ENV=production
ENV PORT=6420 ENV PORT=6420
# Run the aggregator server # Use entrypoint script
CMD ["bun", "src/aggregator/index.ts", "--port", "6420", "--paths", "/projects"] ENTRYPOINT ["/app/entrypoint.sh"]

View File

@ -1,15 +1,7 @@
project_name: Backlog.md project_name: "Backlog.md"
default_status: To Do default_status: "To Do"
statuses: statuses: ["To Do", "In Progress", "Done"]
- To Do labels: []
- In Progress
- Done
labels:
- dev-ops
- bug-fix
- feature
- enhancement
milestones: []
date_format: yyyy-mm-dd date_format: yyyy-mm-dd
max_column_width: 20 max_column_width: 20
auto_open_browser: true auto_open_browser: true
@ -20,4 +12,4 @@ zero_padded_ids: 3
bypass_git_hooks: false bypass_git_hooks: false
check_active_branches: true check_active_branches: true
active_branch_days: 60 active_branch_days: 60
onStatusChange: 'python3 /home/jeffe/Github/dev-ops/scripts/backlog-notify.py' task_prefix: "task"

View File

@ -0,0 +1,73 @@
---
id: task-009
title: 'NLA Oracle - Base Sepolia deployment and integration'
status: In Progress
assignee: []
created_date: '2026-03-15 07:00'
labels:
- blockchain
- ai
- infrastructure
dependencies: []
priority: high
---
## Description
<!-- SECTION:DESCRIPTION:BEGIN -->
Deploy and operate a Natural Language Agreements (NLA) oracle on Base Sepolia. The oracle uses AI (Anthropic Claude) to arbitrate blockchain escrows defined in plain English. Contracts are pre-deployed by Alkahest on Base Sepolia. The oracle container (`nla-oracle`) is running and listening for arbitration requests.
**Oracle address**: `0x2d2E0a49B733E3CBB2B6C04C417aa5E24cd2A70F`
**Repo**: `/opt/apps/natural-language-agreements`
**Container**: `nla-oracle`
**Network**: Base Sepolia (chain 84532)
<!-- SECTION:DESCRIPTION:END -->
## Acceptance Criteria
<!-- AC:BEGIN -->
- [x] #1 Oracle container deployed and running on Base Sepolia
- [x] #2 Secrets stored in Infisical (private key, Anthropic API key)
- [x] #3 Oracle starts from current block (avoids public RPC getLogs limit)
- [ ] #4 Fund oracle wallet with Base Sepolia ETH for gas
- [ ] #5 Create and test a demo escrow end-to-end
- [ ] #6 Explore integration with DefectFi / CRDT escrow project
- [ ] #7 Consider upgrading to paid RPC (Alchemy) for production reliability
- [ ] #8 Optionally add OpenAI provider as fallback alongside Anthropic
<!-- AC:END -->
## Notes
### Completed (2026-03-15)
- Cloned `arkhai-io/natural-language-agreements` to `/opt/apps/natural-language-agreements`
- Created Dockerfile (bun-based single-stage) and docker-compose.yml
- Patched oracle to use `fromBlock: currentBlock` to avoid public RPC 10k block limit
- Stored secrets in Infisical `/ai/` folder (`NLA_ORACLE_PRIVATE_KEY`, `ANTHROPIC_API_KEY`)
- Container `nla-oracle` running, polling every 10s on Base Sepolia
- Created demo script at `demo/demo.ts` — Content Creation Bounty use case
- Alice locks 1000 BOUNTY tokens, demands a blog post about regenerative economics
- Bob submits a blog post, oracle (Claude) evaluates and arbitrates
### Blocking: Fund 3 wallets with Base Sepolia ETH
| Wallet | Address | Needed |
|--------|---------|--------|
| Oracle | `0x2d2E0a49B733E3CBB2B6C04C417aa5E24cd2A70F` | ~0.005 ETH |
| Alice (demo) | `0x2E9d13530C3880edB42f2b18fC0Bdc7d527872Ae` | ~0.01 ETH |
| Bob (demo) | `0x739C83BEbdDb8A68c0a76f2F59E40729A7F184F9` | ~0.005 ETH |
Faucets: [Alchemy](https://www.alchemy.com/faucets/base-sepolia) or [Coinbase](https://portal.cdp.coinbase.com/products/faucet)
### Technical details
- Alkahest TrustedOracleArbiter: `0x3664b11BcCCeCA27C21BBAB43548961eD14d4D6D`
- ERC20EscrowObligation: `0x1Fe964348Ec42D9Bb1A072503ce8b4744266FF43`
- Public RPC `https://sepolia.base.org` has 10k block getLogs limit — handled via `fromBlock: currentBlock`
- Polling interval: 10s
- Demo state file: `demo/.demo-state.json`
### Run demo after funding
```bash
cd /opt/apps/natural-language-agreements
bun run demo/demo.ts create # deploy token + create escrow
bun run demo/demo.ts fulfill # Bob submits blog post
docker logs -f nla-oracle # watch Claude evaluate
bun run demo/demo.ts collect # Bob collects tokens
```

View File

@ -0,0 +1,123 @@
---
id: task-010
title: 'Netcup RS 8000 infrastructure hardening and maintenance'
status: Done
assignee: []
created_date: '2026-03-15 07:30'
labels:
- dev-ops
- enhancement
dependencies: []
priority: high
---
## Description
<!-- SECTION:DESCRIPTION:BEGIN -->
High-priority infrastructure tasks for the Netcup RS 8000 production server (20 cores, 64GB RAM, 3TB) running 40+ live services. Covers security hardening, storage cleanup, monitoring, and reliability improvements.
<!-- SECTION:DESCRIPTION:END -->
## Acceptance Criteria
<!-- AC:BEGIN -->
- [x] #1 Audit and rotate stale secrets (Infisical + KeePass) — identify unused or old credentials
- [x] #2 Review and harden Traefik config — audited, fixes in task-011 (requires host access)
- [x] #3 Storage cleanup — pruned ~330GB (74%→62% disk), removed 15 dead containers
- [x] #4 Set up automated Docker image pruning — script at /opt/apps/dev-ops/docker-weekly-prune.sh, cron in task-011
- [x] #5 Health check dashboard — audited (179 monitors, 46 unmonitored containers), gaps in task-014
- [x] #6 Backup verification — audited: NO automated backups anywhere, remediation in task-013
- [x] #7 Review container resource limits — added limits to 7 top consumers (postiz x3, p2pwiki-db, elasticsearch, gitea, immich_postgres)
- [x] #8 Update base images — audited, 5 critical/10 high upgrades needed, tracked in task-012
- [x] #13 Upgrade p2pwiki-db MariaDB 10.6→10.11 (backup + upgrade + mariadb-upgrade complete)
- [x] #9 Fix p2p-db CPU (174%→0.02%) — added missing wp_options index, cleaned 15k duplicate rows
- [x] #10 Fix p2pwiki CPU (50%→15%) — blocked Applebot hammering Special/API pages via .htaccess
- [x] #11 Remove junk containers — stopped funny_mirzakhani (cat /dev/urandom), payment-safe-mcp (9149 restarts)
- [x] #12 Vault-migration audit — 20 of 26 secrets confirmed stale, 2 active, 4 unclear. Deletion pending via Infisical UI
<!-- AC:END -->
## Audit Results (2026-03-15)
### Secrets Audit — 121 secrets, 18 folders
- **HIGH**: `vault-migration` folder has 26 likely stale secrets (Pusher, Holochain, Obsidian, old Cloudflare tokens, test Stripe keys)
- **HIGH**: 6+ duplicate secrets across folders (Syncthing x6, GitHub x3, Cloudflare x3, RunPod x2)
- **MED**: Test/dev keys in prod (Duffel test, Stripe test), 2 orphaned root-level secrets
- **Action**: Audit each vault-migration key, consolidate duplicates, remove test keys
### Container Health — 303 containers, 161 without health checks
- **FIXED**: Stopped `funny_mirzakhani` (junk `cat /dev/urandom` container)
- **FIXED**: Stopped `payment-safe-mcp` (9,149 restart loop, no logs)
- **FIXED**: Removed 15 crashed/init containers
- **URGENT**: `p2p-db` at 78-132% CPU (MariaDB, investigate queries)
- **URGENT**: 293 of 303 containers have ZERO resource limits
- Top memory hogs without limits: postiz x3 (~2GB each), p2pwiki-db (1.8GB), gitea (1.7GB)
- `erpnext-queue-long` crashed 7 days ago — needs restart
### Storage — 2.1 TB used / 3.0 TB (74%)
- **IN PROGRESS**: Docker prune running (build cache ~347GB, dangling images ~50-80GB)
- 115 dangling volumes (~2.5GB)
- 20+ stopped rspace services sitting 2 weeks
- `payment-infra` rebuild loop generating constant dangling images
### Traefik Security — 3 critical, 3 medium (requires HOST access)
- **C1**: No TLS minimum version (defaults to TLS 1.0)
- **C2**: No capability drops on Traefik container
- **C3**: Ports 80/443 on 0.0.0.0 — bypasses Cloudflare
- **M1**: No rate limiting middleware
- **M2**: `insecureSkipVerify` on pentagi transport
- **M3**: No default Content-Security-Policy header
## Host Commands Needed
### Traefik TLS hardening (run on host)
```bash
# Create TLS options file
cat > /root/traefik/config/tls-options.yml << 'EOF'
tls:
options:
default:
minVersion: VersionTLS12
cipherSuites:
- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305
EOF
```
### Traefik rate limiting (run on host)
```bash
cat > /root/traefik/config/rate-limit.yml << 'EOF'
http:
middlewares:
rate-limit:
rateLimit:
average: 100
burst: 200
period: 1s
EOF
```
### Traefik container hardening (edit docker-compose on host)
Add to Traefik service:
```yaml
cap_drop: [ALL]
cap_add: [NET_BIND_SERVICE]
security_opt: [no-new-privileges:true]
read_only: true
tmpfs: [/tmp]
deploy:
resources:
limits:
memory: 512M
cpus: '2.0'
```
### Restrict ports to localhost (edit docker-compose on host)
```yaml
ports:
- "127.0.0.1:80:80"
- "127.0.0.1:443:443"
```
Then restart Traefik: `cd /root/traefik && docker compose up -d`

View File

@ -0,0 +1,69 @@
---
id: task-011
title: 'Host-level Traefik hardening and cron setup'
status: To Do
assignee: []
created_date: '2026-03-15 08:00'
labels:
- dev-ops
- enhancement
dependencies:
- task-010
priority: high
---
## Description
<!-- SECTION:DESCRIPTION:BEGIN -->
Security hardening tasks that require host-level access on Netcup RS 8000. Cannot be done from the claude-dev container. Scripts are pre-built at `/opt/apps/dev-ops/`.
<!-- SECTION:DESCRIPTION:END -->
## Acceptance Criteria
<!-- AC:BEGIN -->
- [ ] #1 Run Traefik hardening script: `bash /opt/apps/dev-ops/traefik-hardening.sh`
- [ ] #2 Edit Traefik docker-compose: add `cap_drop: [ALL]`, `cap_add: [NET_BIND_SERVICE]`, `security_opt: [no-new-privileges:true]`, `read_only: true`
- [ ] #3 Restrict Traefik ports to localhost: `127.0.0.1:80:80` and `127.0.0.1:443:443`
- [ ] #4 Add resource limits to Traefik: `memory: 512M`, `cpus: 2.0`
- [ ] #5 Add CSP `frame-ancestors 'self'` to security-headers.yml
- [ ] #6 Remove `insecureSkipVerify` from pentagi transport config
- [ ] #7 Add weekly Docker prune cron: `23 4 * * 0 /opt/apps/dev-ops/docker-weekly-prune.sh >> /var/log/docker-prune.log 2>&1`
- [ ] #8 Restart Traefik and verify: `cd /root/traefik && docker compose up -d`
<!-- AC:END -->
## Notes
### Quick start
```bash
# 1. Run the pre-built script (creates TLS + rate limit configs)
bash /opt/apps/dev-ops/traefik-hardening.sh
# 2. Edit Traefik compose (location: /root/traefik/docker-compose.yml)
# Add to traefik service:
# cap_drop: [ALL]
# cap_add: [NET_BIND_SERVICE]
# security_opt: [no-new-privileges:true]
# read_only: true
# tmpfs: [/tmp]
# deploy:
# resources:
# limits:
# memory: 512M
# cpus: '2.0'
# Change ports to:
# - '127.0.0.1:80:80'
# - '127.0.0.1:443:443'
# 3. Add weekly prune cron
crontab -e
# Add: 23 4 * * 0 /opt/apps/dev-ops/docker-weekly-prune.sh >> /var/log/docker-prune.log 2>&1
# 4. Restart
cd /root/traefik && docker compose up -d
# 5. Verify
curl -I https://jeffemmett.com
```
### Risk
- Restricting ports to 127.0.0.1 means ONLY Cloudflare tunnel traffic reaches Traefik. If cloudflared goes down, all external access is lost until it recovers. This is the intended behavior (no direct IP access).
- `read_only` on Traefik may need a tmpfs for `/data` if ACME cert storage is inside the container.

View File

@ -0,0 +1,92 @@
---
id: task-012
title: 'Update critical container base images'
status: To Do
assignee: []
created_date: '2026-03-15 08:30'
labels:
- dev-ops
- enhancement
dependencies:
- task-010
priority: high
---
## Description
<!-- SECTION:DESCRIPTION:BEGIN -->
Multiple containers are running severely outdated base images with known vulnerabilities. Prioritized upgrade list from infrastructure audit on 2026-03-15. Many compose files are on host-only paths (not accessible from claude-dev container).
<!-- SECTION:DESCRIPTION:END -->
## Acceptance Criteria
<!-- AC:BEGIN -->
- [ ] #1 CRITICAL: Upgrade p2pwiki-elasticsearch from 7.10.2 to opensearch:2.x (5 years old, Log4Shell era)
- [ ] #2 CRITICAL: Upgrade WordPress stack (5 containers) from 6.4-php8.2 to 6.7-php8.3 (compose at /opt/p2pfoundation/)
- [ ] #3 CRITICAL: Upgrade Gitea from 1.21 to 1.23 (compose at /root/gitea/)
- [ ] #4 HIGH: Upgrade ERPNext Redis from 6.2-alpine to 7-alpine (compose at /opt/erpnext/)
- [ ] #5 HIGH: Upgrade MediaWiki 1.40/1.41 to 1.42 LTS (compose at /opt/websites/p2pwiki/ and /opt/p2pfoundation/)
- [ ] #6 HIGH: Upgrade ERPNext MariaDB from 10.6 to 10.11 (compose at /opt/erpnext/)
- [ ] #7 HIGH: Upgrade p2pwiki-db MariaDB from 10.6 to 10.11 (compose at /opt/websites/p2pwiki/)
- [ ] #8 MEDIUM: Upgrade Qdrant from 1.7.4 to 1.13 (compose at /root/semantic-search/)
- [ ] #9 MEDIUM: Plan Traefik v2 to v3 migration (compose at /root/traefik/)
- [ ] #10 MEDIUM: Re-pull stale :latest images (ollama, n8n, syncthing, *arr stack, jellyfin)
<!-- AC:END -->
## Notes
### Upgrade procedures
**Redis 6.2 → 7 (drop-in):**
```bash
# In /opt/erpnext/docker-compose.yml, change:
# image: redis:6.2-alpine → image: redis:7-alpine
cd /opt/erpnext && docker compose up -d redis-cache redis-queue
```
**Gitea 1.21 → 1.23:**
```bash
# Backup first!
docker exec gitea-db pg_dumpall -U gitea > /tmp/gitea-backup.sql
# In /root/gitea/docker-compose.yml, change:
# image: gitea/gitea:1.21 → image: gitea/gitea:1.23
cd /root/gitea && docker compose up -d
# Gitea handles DB migrations automatically on startup
```
**WordPress 6.4 → 6.7:**
```bash
# Backup databases first
docker exec p2p-db mariadb-dump -u root -pp2p_secure_root_2025 --all-databases > /tmp/p2p-db-backup.sql
# In /opt/p2pfoundation/docker-compose.yml, change:
# image: wordpress:6.4-php8.2-apache → image: wordpress:6.7-php8.3-apache
cd /opt/p2pfoundation && docker compose up -d
# WP handles DB upgrades via wp-admin/upgrade.php
```
**MariaDB 10.6 → 10.11:**
```bash
# Backup first, then change image tag
# MariaDB handles upgrades automatically, but run mysql_upgrade after
docker exec <container> mariadb-upgrade -u root -p<password>
```
**Elasticsearch 7.10 → OpenSearch 2.x:**
This is the most complex upgrade — requires:
1. Full index backup/snapshot
2. Config migration (different env vars, plugins)
3. MediaWiki CirrusSearch extension config update
4. Reindex all content
Plan as a dedicated maintenance window.
**Traefik v2 → v3:**
Requires config migration (middleware syntax changes, entrypoint format). Use `traefik migration v2-to-v3` tool. Plan as dedicated task.
### Accessible from claude-dev container
- `/opt/websites/p2pwiki/` — p2pwiki compose (MariaDB, MediaWiki, Elasticsearch)
### Requires host access
- `/opt/erpnext/` — ERPNext (Redis, MariaDB)
- `/root/gitea/` — Gitea
- `/opt/p2pfoundation/` — WordPress stack, p2p-db
- `/root/semantic-search/` — Qdrant
- `/root/traefik/` — Traefik

View File

@ -0,0 +1,48 @@
---
id: task-013
title: 'Automated database backups and offsite storage'
status: Done
assignee: []
created_date: '2026-03-15 09:00'
labels:
- dev-ops
- enhancement
dependencies: []
priority: high
---
## Description
<!-- SECTION:DESCRIPTION:BEGIN -->
No critical database has automated backups. No data leaves this single server. A disk failure or compromise would lose all source code (Gitea), business data (ERPNext), photos (Immich), wiki/blog content (p2p-db), and email (Mailcow). This is the highest priority infrastructure gap.
<!-- SECTION:DESCRIPTION:END -->
## Acceptance Criteria
<!-- AC:BEGIN -->
- [x] #1 Automated daily pg_dump for Gitea-DB (Postgres) — 3.1MB
- [x] #2 Automated daily mysqldump for ERPNext MariaDB — 2.3MB
- [x] #3 Automated daily mysqldump for p2p-db (MariaDB) — 707MB
- [x] #4 Automated daily pg_dump for Immich Postgres — 620MB
- [x] #5 Automated daily Mailcow backup (mariadb-dump) — 1.1MB
- [x] #6 Off-server storage — rclone sync to Cloudflare R2 (plex-media/db-backups/)
- [x] #7 Backup retention policy (7 days local, synced to R2)
- [x] #8 Backup monitoring — Uptime Kuma push monitor (ID 199, 48h heartbeat interval)
<!-- AC:END -->
## Notes
### Approach: Centralized backup container
Deploy an Alpine container with cron, database clients (pg_dump, mariadb-dump), and rclone. Runs scheduled dumps for all databases, compresses, and pushes to R2.
### Template
rphotos backup script at `/opt/apps/rphotos-online/backup-database.sh` has a good pattern (pg_dumpall with 30-day retention).
### Database credentials needed
- Gitea-DB: Postgres, accessible via `gitea-db` container
- ERPNext: MariaDB at `/opt/erpnext/` (host-only)
- p2p-db: MariaDB, root password in container env
- Immich: Postgres at `/opt/immich/postgres` (host bind mount)
- Mailcow: MySQL in `mailcowdockerized-mysql-mailcow-1`
### R2 storage
`r2-mount` container already has rclone configured for Cloudflare R2. Can reuse config for backup uploads.

View File

@ -0,0 +1,58 @@
---
id: task-014
title: 'Health monitoring coverage gaps'
status: In Progress
assignee: []
created_date: '2026-03-15 09:00'
labels:
- dev-ops
- enhancement
dependencies: []
priority: high
---
## Description
<!-- SECTION:DESCRIPTION:BEGIN -->
179 Uptime Kuma monitors exist but critical infrastructure (traefik, cloudflared) has zero monitoring. 209 of 345 containers lack Docker healthchecks. 46 containers have no Kuma monitor at all.
<!-- SECTION:DESCRIPTION:END -->
## Acceptance Criteria
<!-- AC:BEGIN -->
- [ ] #1 Add Docker healthchecks to traefik and cloudflared (SPOF — requires host access)
- [x] #2 Add Docker healthchecks to databases — done: p2pwiki-db (MariaDB), docmost-db, listmonk-db, mattermost-db (Postgres). Remaining need host access: gitea-db, mailcow-mysql, grid-trading-db, p2pwikifr-db
- [x] #3 Add Kuma monitors for SMTP (existing KT Mail SMTP) and IMAP (port 993) — Mailcow IMAP monitor added (ID 216)
- [x] #4 Add Kuma monitors for payment-* stack — added: Treasury, Curve, Flow/rfunds, API/mycofi (IDs 212-215)
- [ ] #5 Enable Docker socket monitoring in Kuma for container restart/exit detection
- [x] #6 Add Kuma monitors for litellm (ID 208), listmonk (ID 209), seafile (ID 211). infisical, headscale, n8n already monitored.
- [x] #7 Removed 8 inactive monitors (Games Platform, Cart, Conviction Demo, Xhiva Booking, Treasury, FungiFlows, Discourse cadCAD, Tino Ardez)
<!-- AC:END -->
## Notes
### Quick wins (Docker healthchecks in compose files)
```yaml
# Postgres healthcheck
healthcheck:
test: ["CMD-SHELL", "pg_isready -U $$POSTGRES_USER"]
interval: 30s
timeout: 10s
retries: 3
# MariaDB healthcheck
healthcheck:
test: ["CMD", "healthcheck.sh", "--connect", "--innodb_initialized"]
interval: 30s
timeout: 10s
retries: 3
# Traefik healthcheck
healthcheck:
test: ["CMD", "traefik", "healthcheck"]
interval: 30s
timeout: 10s
retries: 3
```
### Traefik and cloudflared healthchecks require host access
Compose files at `/root/traefik/` and wherever cloudflared is configured.

View File

@ -0,0 +1,16 @@
---
id: task-high.01
title: Jefflix IPTV Setup - Complete Jellyfin Configuration
status: To Do
assignee: []
created_date: '2026-03-16 02:07'
labels: []
dependencies: []
parent_task_id: task-high
---
## Description
<!-- SECTION:DESCRIPTION:BEGIN -->
Jellyfin is deployed at http://jefflix.lol via Traefik (container: jefflix, /opt/apps/jefflix/). Needs initial web UI setup: 1) Complete setup wizard (create admin account) 2) Add IPTV M3U tuner (iptv-org playlists - e.g. index.m3u, countries/us.m3u, categories/news.m3u) 3) Add XMLTV EPG guide data (iptv-org.github.io/epg/guides/us.xml) 4) Test live TV playback. DNS already configured via jefflix-dns (CoreDNS on Tailscale 100.64.0.2).
<!-- SECTION:DESCRIPTION:END -->

View File

@ -1,5 +1,6 @@
# Backlog Aggregator - Multi-Project Real-time Task View # Backlog Aggregator - Multi-Project Real-time Task View with Gitea Integration
# Deploy at backlog.jeffemmett.com to see all project tasks in real-time # Deploy at backlog.jeffemmett.com to see all project tasks in real-time
# Scans Gitea repos daily for backlog/ directories
services: services:
backlog-aggregator: backlog-aggregator:
@ -10,12 +11,12 @@ services:
restart: unless-stopped restart: unless-stopped
volumes: volumes:
# Mount all project directories that contain backlog folders # Mount all project directories that contain backlog folders
# The aggregator scans these paths for backlog/ subdirectories
# NOTE: Using rw (read-write) to allow task creation/updates from web UI
- /opt/websites:/projects/websites:rw - /opt/websites:/projects/websites:rw
- /opt/apps:/projects/apps:rw - /opt/apps:/projects/apps:rw
# If you have repos in other locations, add them here: # Gitea-synced repos (auto-discovered and cloned)
# - /home/user/projects:/projects/home:rw - /opt/gitea-repos:/projects/gitea:rw
# SSH keys for git clone operations
- /root/.ssh:/root/.ssh:ro
labels: labels:
- "traefik.enable=true" - "traefik.enable=true"
- "traefik.http.routers.backlog.rule=Host(`backlog.jeffemmett.com`)" - "traefik.http.routers.backlog.rule=Host(`backlog.jeffemmett.com`)"
@ -27,7 +28,10 @@ services:
environment: environment:
- PORT=6420 - PORT=6420
- NODE_ENV=production - NODE_ENV=production
command: ["bun", "src/aggregator/index.ts", "--port", "6420", "--paths", "/projects/websites,/projects/apps"] - GITEA_URL=https://gitea.jeffemmett.com
- GITEA_OWNER=jeffemmett
- GITEA_OUTPUT_DIR=/projects/gitea
- GITEA_SSH_KEY=/root/.ssh/gitea_ed25519
networks: networks:
traefik-public: traefik-public:

View File

@ -4,7 +4,6 @@ services:
container_name: backlog-aggregator container_name: backlog-aggregator
restart: unless-stopped restart: unless-stopped
volumes: volumes:
# Mount project directories for multi-project aggregation
- /opt/websites:/projects/websites - /opt/websites:/projects/websites
- /opt/apps:/projects/apps - /opt/apps:/projects/apps
- /opt/gitea-repos:/projects/gitea - /opt/gitea-repos:/projects/gitea

16
entrypoint.sh Executable file
View File

@ -0,0 +1,16 @@
#!/bin/bash
set -e
# Start cron daemon
cron
# Configure SSH for git
export GIT_SSH_COMMAND="ssh -i /root/.ssh/gitea_ed25519 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null"
# Run initial Gitea scan
echo "Running initial Gitea scan..."
cd /app && bun run src/aggregator/gitea-scanner.ts --verbose 2>&1 || echo "Scan completed with errors"
# Start aggregator
echo "Starting aggregator..."
exec bun run src/aggregator/index.ts --port 6420 --paths "/projects/websites,/projects/apps,/projects/gitea"

View File

@ -0,0 +1,298 @@
/**
* Gitea Repository Scanner for Backlog Aggregator
*
* Scans all repositories in a Gitea instance for backlog/ directories
* and clones/pulls them to a local directory for the aggregator to watch.
*
* Usage:
* bun run gitea-scanner.ts --gitea-url https://gitea.example.com --output /opt/gitea-repos
*
* Environment variables:
* GITEA_URL - Gitea instance URL
* GITEA_TOKEN - API token (optional, for private repos)
* GITEA_OUTPUT_DIR - Directory to clone repos to
*/
import { mkdir, readdir, rm, stat } from "node:fs/promises";
import { join } from "node:path";
import { $ } from "bun";
interface GiteaRepo {
id: number;
name: string;
full_name: string;
clone_url: string;
ssh_url: string;
html_url: string;
private: boolean;
empty: boolean;
archived: boolean;
default_branch: string;
}
interface GiteaContent {
name: string;
path: string;
type: "file" | "dir";
}
interface ScannerConfig {
giteaUrl: string;
giteaToken?: string;
outputDir: string;
sshKeyPath?: string;
owner?: string; // Optional: only scan repos from this owner
concurrency: number;
verbose: boolean;
}
class GiteaScanner {
private config: ScannerConfig;
private headers: Record<string, string>;
constructor(config: ScannerConfig) {
this.config = config;
this.headers = {
Accept: "application/json",
};
if (config.giteaToken) {
this.headers["Authorization"] = `token ${config.giteaToken}`;
}
}
private async fetchJson<T>(endpoint: string): Promise<T | null> {
const url = `${this.config.giteaUrl}/api/v1${endpoint}`;
try {
const response = await fetch(url, { headers: this.headers });
if (!response.ok) {
if (this.config.verbose) {
console.warn(`API request failed: ${url} (${response.status})`);
}
return null;
}
return (await response.json()) as T;
} catch (error) {
if (this.config.verbose) {
console.warn(`API request error: ${url}`, error);
}
return null;
}
}
async getAllRepos(): Promise<GiteaRepo[]> {
const allRepos: GiteaRepo[] = [];
let page = 1;
const limit = 50;
while (true) {
const endpoint = this.config.owner
? `/users/${this.config.owner}/repos?page=${page}&limit=${limit}`
: `/repos/search?page=${page}&limit=${limit}`;
const repos = await this.fetchJson<GiteaRepo[] | { data: GiteaRepo[] }>(endpoint);
if (!repos) break;
// Handle both direct array and {data: []} response formats
const repoList = Array.isArray(repos) ? repos : repos.data || [];
if (repoList.length === 0) break;
// Filter out empty and archived repos
const activeRepos = repoList.filter((r) => !r.empty && !r.archived);
allRepos.push(...activeRepos);
if (repoList.length < limit) break;
page++;
}
return allRepos;
}
async hasBacklogDir(repo: GiteaRepo): Promise<boolean> {
// Check if repo has a backlog/ directory at root
const contents = await this.fetchJson<GiteaContent[]>(`/repos/${repo.full_name}/contents`);
if (!contents || !Array.isArray(contents)) return false;
return contents.some((item) => item.name === "backlog" && item.type === "dir");
}
async cloneOrPullRepo(repo: GiteaRepo): Promise<boolean> {
const repoDir = join(this.config.outputDir, repo.name);
try {
// Check if already cloned
const exists = await stat(repoDir)
.then(() => true)
.catch(() => false);
if (exists) {
// Pull latest changes
if (this.config.verbose) {
console.log(`Pulling ${repo.full_name}...`);
}
const result = await $`cd ${repoDir} && git pull --ff-only 2>&1`.quiet();
if (result.exitCode !== 0) {
console.warn(`Failed to pull ${repo.full_name}: ${result.stderr}`);
// Try to reset and pull
await $`cd ${repoDir} && git fetch origin && git reset --hard origin/${repo.default_branch} 2>&1`.quiet();
}
} else {
// Clone the repo
if (this.config.verbose) {
console.log(`Cloning ${repo.full_name}...`);
}
// Use SSH URL if we have an SSH key configured, otherwise HTTPS
const cloneUrl = this.config.sshKeyPath ? repo.ssh_url : repo.clone_url;
const result = await $`git clone --depth 1 ${cloneUrl} ${repoDir} 2>&1`.quiet();
if (result.exitCode !== 0) {
console.warn(`Failed to clone ${repo.full_name}: ${result.stderr}`);
return false;
}
}
return true;
} catch (error) {
console.error(`Error processing ${repo.full_name}:`, error);
return false;
}
}
async cleanupStaleRepos(validRepoNames: Set<string>): Promise<void> {
try {
const entries = await readdir(this.config.outputDir, { withFileTypes: true });
for (const entry of entries) {
if (!entry.isDirectory()) continue;
if (entry.name.startsWith(".")) continue;
if (!validRepoNames.has(entry.name)) {
const repoDir = join(this.config.outputDir, entry.name);
console.log(`Removing stale repo: ${entry.name}`);
await rm(repoDir, { recursive: true, force: true });
}
}
} catch (error) {
console.warn("Error cleaning up stale repos:", error);
}
}
async scan(): Promise<{ total: number; withBacklog: number; synced: number }> {
console.log(`Scanning Gitea at ${this.config.giteaUrl}...`);
// Ensure output directory exists
await mkdir(this.config.outputDir, { recursive: true });
// Get all repos
const repos = await this.getAllRepos();
console.log(`Found ${repos.length} repositories`);
// Check which repos have backlog directories
const reposWithBacklog: GiteaRepo[] = [];
// Process in batches for concurrency control
const batchSize = this.config.concurrency;
for (let i = 0; i < repos.length; i += batchSize) {
const batch = repos.slice(i, i + batchSize);
const results = await Promise.all(
batch.map(async (repo) => {
const hasBacklog = await this.hasBacklogDir(repo);
return { repo, hasBacklog };
}),
);
for (const { repo, hasBacklog } of results) {
if (hasBacklog) {
reposWithBacklog.push(repo);
if (this.config.verbose) {
console.log(`${repo.full_name} has backlog/`);
}
}
}
}
console.log(`Found ${reposWithBacklog.length} repositories with backlog/`);
// Clone or pull repos with backlog
let synced = 0;
for (const repo of reposWithBacklog) {
const success = await this.cloneOrPullRepo(repo);
if (success) synced++;
}
// Cleanup repos that no longer have backlog or were deleted
const validNames = new Set(reposWithBacklog.map((r) => r.name));
await this.cleanupStaleRepos(validNames);
console.log(`Synced ${synced}/${reposWithBacklog.length} repositories`);
return {
total: repos.length,
withBacklog: reposWithBacklog.length,
synced,
};
}
}
// Parse CLI arguments
function parseArgs(): ScannerConfig {
const args = process.argv.slice(2);
const getArg = (name: string, envVar: string, defaultValue?: string): string | undefined => {
const index = args.indexOf(`--${name}`);
if (index !== -1 && args[index + 1]) {
return args[index + 1];
}
return process.env[envVar] || defaultValue;
};
const giteaUrl = getArg("gitea-url", "GITEA_URL", "https://gitea.jeffemmett.com");
const giteaToken = getArg("gitea-token", "GITEA_TOKEN");
const outputDir = getArg("output", "GITEA_OUTPUT_DIR", "/opt/gitea-repos");
const sshKeyPath = getArg("ssh-key", "GITEA_SSH_KEY", "/root/.ssh/gitea_ed25519");
const owner = getArg("owner", "GITEA_OWNER", "jeffemmett");
const concurrency = Number.parseInt(getArg("concurrency", "GITEA_CONCURRENCY", "5") || "5", 10);
const verbose = args.includes("--verbose") || args.includes("-v");
if (!giteaUrl) {
console.error("Error: --gitea-url or GITEA_URL is required");
process.exit(1);
}
if (!outputDir) {
console.error("Error: --output or GITEA_OUTPUT_DIR is required");
process.exit(1);
}
return {
giteaUrl,
giteaToken,
outputDir,
sshKeyPath,
owner,
concurrency,
verbose,
};
}
// Main entry point
if (import.meta.main) {
const config = parseArgs();
const scanner = new GiteaScanner(config);
try {
const result = await scanner.scan();
console.log("\nScan complete:");
console.log(` Total repos: ${result.total}`);
console.log(` With backlog/: ${result.withBacklog}`);
console.log(` Successfully synced: ${result.synced}`);
} catch (error) {
console.error("Scan failed:", error);
process.exit(1);
}
}
export { GiteaScanner, type ScannerConfig };

View File

@ -511,8 +511,17 @@ export class BacklogAggregator {
tasks = tasks.filter((t) => (t.priority ?? "").toLowerCase() === priority.toLowerCase()); tasks = tasks.filter((t) => (t.priority ?? "").toLowerCase() === priority.toLowerCase());
} }
// Sort by project then by task ID // Sort: due-date tasks first (soonest/overdue at top), then by project, then by task ID
const now = new Date().toISOString().slice(0, 10);
tasks = tasks.sort((a, b) => { tasks = tasks.sort((a, b) => {
const aDue = a.dueDate || "";
const bDue = b.dueDate || "";
// Tasks with due dates come before tasks without
if (aDue && !bDue) return -1;
if (!aDue && bDue) return 1;
// Both have due dates: sort soonest first
if (aDue && bDue) return aDue.localeCompare(bDue);
// Neither has due date: group by project, then by task ID
const projectCompare = a.projectName.localeCompare(b.projectName); const projectCompare = a.projectName.localeCompare(b.projectName);
if (projectCompare !== 0) return projectCompare; if (projectCompare !== 0) return projectCompare;
return sortByTaskId([a, b])[0] === a ? -1 : 1; return sortByTaskId([a, b])[0] === a ? -1 : 1;

View File

@ -176,6 +176,13 @@ export function parseTask(content: string): Task {
: frontmatter.estimatedHours !== undefined : frontmatter.estimatedHours !== undefined
? Number(frontmatter.estimatedHours) ? Number(frontmatter.estimatedHours)
: undefined, : undefined,
dueDate: frontmatter.due_date
? normalizeDate(frontmatter.due_date)
: frontmatter.dueDate
? normalizeDate(frontmatter.dueDate)
: frontmatter.deadline
? normalizeDate(frontmatter.deadline)
: undefined,
}; };
} }

View File

@ -52,6 +52,8 @@ export interface Task {
doToday?: boolean; doToday?: boolean;
/** Estimated hours to complete the task (for time tracking and invoicing) */ /** Estimated hours to complete the task (for time tracking and invoicing) */
estimatedHours?: number; estimatedHours?: number;
/** Due date / deadline for the task (YYYY-MM-DD or YYYY-MM-DD HH:mm) */
dueDate?: string;
} }
/** /**

File diff suppressed because one or more lines are too long