commit 374c55770dd5bbc3b1bb81a0d7b0974aa4ab3ca2 Author: Jeff Emmett Date: Sun Mar 22 18:21:57 2026 -0700 feat: Initial upload-service implementation Go service streaming file uploads to Cloudflare R2 with SQLite metadata. Features: drag-and-drop web UI, presigned URL downloads, password protection, expiry with cleanup, rate limiting, CLI tool, Infisical secret injection. Co-Authored-By: Claude Opus 4.6 diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..c906a31 --- /dev/null +++ b/.env.example @@ -0,0 +1,14 @@ +# Required: R2 credentials (set directly or via Infisical) +R2_ACCOUNT_ID=your_cloudflare_account_id +R2_ACCESS_KEY_ID=your_r2_access_key +R2_SECRET_ACCESS_KEY=your_r2_secret_key +R2_BUCKET_NAME=uploads + +# Infisical (for production — secrets injected at startup) +INFISICAL_CLIENT_ID= +INFISICAL_CLIENT_SECRET= + +# Optional overrides +# MAX_UPLOAD_SIZE=5368709120 # 5GB +# RATE_LIMIT=2 # requests/sec per IP +# RATE_BURST=5 diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..819a675 --- /dev/null +++ b/.gitignore @@ -0,0 +1,4 @@ +upload-service +.env +*.db +/data/ diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..0652a03 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,26 @@ +FROM golang:1.24-alpine AS builder + +RUN apk add --no-cache git + +WORKDIR /src +COPY go.mod go.sum ./ +RUN go mod download +COPY . . +RUN CGO_ENABLED=0 GOOS=linux go build -ldflags="-s -w" -o /upload-service . + +FROM alpine:3.21 + +RUN apk add --no-cache ca-certificates curl jq + +COPY --from=builder /upload-service /usr/local/bin/upload-service +COPY entrypoint.sh /entrypoint.sh +RUN chmod +x /entrypoint.sh + +RUN mkdir -p /data && chown 65534:65534 /data + +USER 65534:65534 + +EXPOSE 8080 + +ENTRYPOINT ["/entrypoint.sh"] +CMD ["upload-service"] diff --git a/cli/upload.sh b/cli/upload.sh new file mode 100644 index 0000000..f0881ae --- /dev/null +++ b/cli/upload.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash +# Symlink — the actual script is embedded at web/static/upload.sh +# For local dev, run: web/static/upload.sh +echo "The CLI script is served from the upload service itself." +echo "Install: curl -o ~/.local/bin/upload https://upload.jeffemmett.com/cli && chmod +x ~/.local/bin/upload" diff --git a/docker-compose.prod.yml b/docker-compose.prod.yml new file mode 100644 index 0000000..ab342b8 --- /dev/null +++ b/docker-compose.prod.yml @@ -0,0 +1,43 @@ +services: + upload: + build: . + container_name: upload-service + restart: unless-stopped + volumes: + - upload_data:/data + env_file: + - .env + environment: + - PORT=8080 + - DB_PATH=/data/upload.db + - BASE_URL=https://upload.jeffemmett.com + - INFISICAL_PROJECT_SLUG=upload-service + - INFISICAL_ENV=prod + - INFISICAL_URL=http://infisical:8080 + labels: + - "traefik.enable=true" + - "traefik.http.routers.upload.rule=Host(`upload.jeffemmett.com`)" + - "traefik.http.routers.upload.entrypoints=web" + - "traefik.http.middlewares.upload-headers.headers.customrequestheaders.X-Forwarded-Proto=https" + - "traefik.http.routers.upload.middlewares=upload-headers" + - "traefik.http.services.upload.loadbalancer.server.port=8080" + # Disable request buffering for large uploads + - "traefik.http.middlewares.upload-buffering.buffering.maxRequestBodyBytes=0" + - "traefik.http.routers.upload.middlewares=upload-headers,upload-buffering" + - "traefik.docker.network=traefik-public" + cap_drop: + - ALL + security_opt: + - no-new-privileges:true + read_only: true + tmpfs: + - /tmp + networks: + - traefik-public + +volumes: + upload_data: + +networks: + traefik-public: + external: true diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..5a8b5fc --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,18 @@ +services: + upload: + build: . + container_name: upload-service + ports: + - "8080:8080" + volumes: + - upload_data:/data + env_file: + - .env + environment: + - PORT=8080 + - DB_PATH=/data/upload.db + - BASE_URL=http://localhost:8080 + - INFISICAL_PROJECT_SLUG=upload-service + +volumes: + upload_data: diff --git a/entrypoint.sh b/entrypoint.sh new file mode 100644 index 0000000..9b99e44 --- /dev/null +++ b/entrypoint.sh @@ -0,0 +1,56 @@ +#!/bin/sh +# Infisical secret injection entrypoint (curl+jq) +# For images without Python or Node.js (e.g., Rust/Go binaries on minimal base images) +# Required env vars: INFISICAL_CLIENT_ID, INFISICAL_CLIENT_SECRET +# Optional: INFISICAL_PROJECT_SLUG, INFISICAL_ENV (default: prod), +# INFISICAL_URL (default: http://infisical:8080) +# +# Prerequisites: curl and jq must be installed in the image + +set -e + +export INFISICAL_URL="${INFISICAL_URL:-http://infisical:8080}" +export INFISICAL_ENV="${INFISICAL_ENV:-prod}" +# IMPORTANT: Set INFISICAL_PROJECT_SLUG in your docker-compose.yml +export INFISICAL_PROJECT_SLUG="${INFISICAL_PROJECT_SLUG:?INFISICAL_PROJECT_SLUG must be set}" + +if [ -z "$INFISICAL_CLIENT_ID" ] || [ -z "$INFISICAL_CLIENT_SECRET" ]; then + echo "[infisical] No credentials set, starting without secret injection" + exec "$@" +fi + +echo "[infisical] Fetching secrets from ${INFISICAL_PROJECT_SLUG}/${INFISICAL_ENV}..." + +# Authenticate +AUTH_RESPONSE=$(curl -sf -X POST "${INFISICAL_URL}/api/v1/auth/universal-auth/login" \ + -H "Content-Type: application/json" \ + -d "{\"clientId\":\"${INFISICAL_CLIENT_ID}\",\"clientSecret\":\"${INFISICAL_CLIENT_SECRET}\"}") || { + echo "[infisical] WARNING: Auth failed, starting with existing env vars" + exec "$@" +} + +TOKEN=$(echo "$AUTH_RESPONSE" | jq -r '.accessToken') +if [ -z "$TOKEN" ] || [ "$TOKEN" = "null" ]; then + echo "[infisical] WARNING: No token received, starting with existing env vars" + exec "$@" +fi + +# Fetch secrets +SECRETS=$(curl -sf "${INFISICAL_URL}/api/v3/secrets/raw?workspaceSlug=${INFISICAL_PROJECT_SLUG}&environment=${INFISICAL_ENV}&secretPath=/&recursive=true" \ + -H "Authorization: Bearer ${TOKEN}") || { + echo "[infisical] WARNING: Failed to fetch secrets, starting with existing env vars" + exec "$@" +} + +# Parse and export using jq's @sh for proper escaping +EXPORTS=$(echo "$SECRETS" | jq -r '.secrets[]? | "export " + .secretKey + "=" + (.secretValue | @sh)') + +if [ -n "$EXPORTS" ]; then + COUNT=$(echo "$EXPORTS" | grep -c "^export " || true) + eval "$EXPORTS" + echo "[infisical] Injected ${COUNT} secrets" +else + echo "[infisical] WARNING: No secrets found" +fi + +exec "$@" diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..8dfdb80 --- /dev/null +++ b/go.mod @@ -0,0 +1,28 @@ +module github.com/jeffemmett/upload-service + +go 1.24 + +require ( + github.com/aws/aws-sdk-go-v2 v1.36.3 + github.com/aws/aws-sdk-go-v2/credentials v1.17.67 + github.com/aws/aws-sdk-go-v2/service/s3 v1.79.3 + github.com/matoous/go-nanoid/v2 v2.1.0 + github.com/ncruces/go-sqlite3 v0.25.0 + golang.org/x/crypto v0.37.0 + golang.org/x/time v0.11.0 +) + +require ( + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 // indirect + github.com/aws/smithy-go v1.22.2 // indirect + github.com/ncruces/julianday v1.0.0 // indirect + github.com/tetratelabs/wazero v1.9.0 // indirect + golang.org/x/sys v0.32.0 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..5f5f0a6 --- /dev/null +++ b/go.sum @@ -0,0 +1,48 @@ +github.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38yqWM= +github.com/aws/aws-sdk-go-v2 v1.36.3/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 h1:zAybnyUQXIZ5mok5Jqwlf58/TFE7uvd3IAsa1aF9cXs= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10/go.mod h1:qqvMj6gHLR/EXWZw4ZbqlPbQUyenf4h82UQUlKc+l14= +github.com/aws/aws-sdk-go-v2/credentials v1.17.67 h1:9KxtdcIA/5xPNQyZRgUSpYOE6j9Bc4+D7nZua0KGYOM= +github.com/aws/aws-sdk-go-v2/credentials v1.17.67/go.mod h1:p3C44m+cfnbv763s52gCqrjaqyPikj9Sg47kUVaNZQQ= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 h1:ZK5jHhnrioRkUNOc+hOgQKlUL5JeC3S6JgLxtQ+Rm0Q= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34/go.mod h1:p4VfIceZokChbA9FzMbRGz5OV+lekcVtHlPKEO0gSZY= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 h1:SZwFm17ZUNNg5Np0ioo/gq8Mn6u9w19Mri8DnJ15Jf0= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34/go.mod h1:dFZsC0BLo346mvKQLWmoJxT+Sjp+qcVR1tRVHQGOH9Q= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 h1:ZNTqv4nIdE/DiBfUUfXcLZ/Spcuz+RjeziUtNJackkM= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34/go.mod h1:zf7Vcd1ViW7cPqYWEHLHJkS50X0JS2IKz9Cgaj6ugrs= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 h1:eAh2A4b5IzM/lum78bZ590jy36+d/aFLgKF/4Vd1xPE= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3/go.mod h1:0yKJC/kb8sAnmlYa6Zs3QVYqaC8ug2AbnNChv5Ox3uA= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.1 h1:4nm2G6A4pV9rdlWzGMPv4BNtQp22v1hg3yrtkYpeLl8= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.1/go.mod h1:iu6FSzgt+M2/x3Dk8zhycdIcHjEFb36IS8HVUVFoMg0= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 h1:dM9/92u2F1JbDaGooxTq18wmmFzbJRfXfVfy96/1CXM= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15/go.mod h1:SwFBy2vjtA0vZbjjaFtfN045boopadnoVPhu4Fv66vY= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 h1:moLQUoVq91LiqT1nbvzDukyqAlCv89ZmwaHw/ZFlFZg= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15/go.mod h1:ZH34PJUc8ApjBIfgQCFvkWcUDBtl/WTD+uiYHjd8igA= +github.com/aws/aws-sdk-go-v2/service/s3 v1.79.3 h1:BRXS0U76Z8wfF+bnkilA2QwpIch6URlm++yPUt9QPmQ= +github.com/aws/aws-sdk-go-v2/service/s3 v1.79.3/go.mod h1:bNXKFFyaiVvWuR6O16h/I1724+aXe/tAkA9/QS01t5k= +github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= +github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/matoous/go-nanoid/v2 v2.1.0 h1:P64+dmq21hhWdtvZfEAofnvJULaRR1Yib0+PnU669bE= +github.com/matoous/go-nanoid/v2 v2.1.0/go.mod h1:KlbGNQ+FhrUNIHUxZdL63t7tl4LaPkZNpUULS8H4uVM= +github.com/ncruces/go-sqlite3 v0.25.0 h1:trugKUs98Zwy9KwRr/EUxZHL92LYt7UqcKqAfpGpK+I= +github.com/ncruces/go-sqlite3 v0.25.0/go.mod h1:n6Z7036yFilJx04yV0mi5JWaF66rUmXn1It9Ux8dx68= +github.com/ncruces/julianday v1.0.0 h1:fH0OKwa7NWvniGQtxdJRxAgkBMolni2BjDHaWTxqt7M= +github.com/ncruces/julianday v1.0.0/go.mod h1:Dusn2KvZrrovOMJuOt0TNXL6tB7U2E8kvza5fFc9G7g= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/tetratelabs/wazero v1.9.0 h1:IcZ56OuxrtaEz8UYNRHBrUa9bYeX9oVY93KspZZBf/I= +github.com/tetratelabs/wazero v1.9.0/go.mod h1:TSbcXCfFP0L2FGkRPxHphadXPjo1T6W+CseNNY7EkjM= +golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= +golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= +golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= +golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= +golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= +golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= +golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/internal/cleanup/cleanup.go b/internal/cleanup/cleanup.go new file mode 100644 index 0000000..8802d69 --- /dev/null +++ b/internal/cleanup/cleanup.go @@ -0,0 +1,51 @@ +package cleanup + +import ( + "context" + "log" + "time" + + "github.com/jeffemmett/upload-service/internal/r2" + "github.com/jeffemmett/upload-service/internal/store" +) + +func Start(ctx context.Context, s *store.Store, r *r2.Client) { + ticker := time.NewTicker(1 * time.Hour) + defer ticker.Stop() + + // Run once on startup + run(ctx, s, r) + + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + run(ctx, s, r) + } + } +} + +func run(ctx context.Context, s *store.Store, r *r2.Client) { + files, err := s.ListExpired() + if err != nil { + log.Printf("cleanup: list expired error: %v", err) + return + } + + if len(files) == 0 { + return + } + + log.Printf("cleanup: removing %d expired files", len(files)) + + for _, f := range files { + if err := r.Delete(ctx, f.R2Key); err != nil { + log.Printf("cleanup: r2 delete %s error: %v", f.ID, err) + continue + } + if err := s.Delete(f.ID); err != nil { + log.Printf("cleanup: db delete %s error: %v", f.ID, err) + } + } +} diff --git a/internal/config/config.go b/internal/config/config.go new file mode 100644 index 0000000..7546852 --- /dev/null +++ b/internal/config/config.go @@ -0,0 +1,72 @@ +package config + +import ( + "fmt" + "os" + "strconv" +) + +type Config struct { + // Server + Port string + BaseURL string + + // R2 + R2AccountID string + R2AccessKeyID string + R2SecretAccessKey string + R2BucketName string + R2Endpoint string + + // Limits + MaxUploadSize int64 // bytes + DefaultExpiryDays int // 0 = no default expiry + + // Rate limiting + RateLimit float64 // requests per second per IP + RateBurst int + + // Database + DBPath string +} + +func Load() (*Config, error) { + c := &Config{ + Port: getEnv("PORT", "8080"), + BaseURL: getEnv("BASE_URL", "http://localhost:8080"), + + R2AccountID: os.Getenv("R2_ACCOUNT_ID"), + R2AccessKeyID: os.Getenv("R2_ACCESS_KEY_ID"), + R2SecretAccessKey: os.Getenv("R2_SECRET_ACCESS_KEY"), + R2BucketName: getEnv("R2_BUCKET_NAME", "uploads"), + + DBPath: getEnv("DB_PATH", "/data/upload.db"), + } + + c.R2Endpoint = fmt.Sprintf("https://%s.r2.cloudflarestorage.com", c.R2AccountID) + + maxSize, err := strconv.ParseInt(getEnv("MAX_UPLOAD_SIZE", "5368709120"), 10, 64) // 5GB default + if err != nil { + return nil, fmt.Errorf("invalid MAX_UPLOAD_SIZE: %w", err) + } + c.MaxUploadSize = maxSize + + c.DefaultExpiryDays, _ = strconv.Atoi(getEnv("DEFAULT_EXPIRY_DAYS", "0")) + + rateLimit, _ := strconv.ParseFloat(getEnv("RATE_LIMIT", "2"), 64) + c.RateLimit = rateLimit + c.RateBurst, _ = strconv.Atoi(getEnv("RATE_BURST", "5")) + + if c.R2AccountID == "" || c.R2AccessKeyID == "" || c.R2SecretAccessKey == "" { + return nil, fmt.Errorf("R2_ACCOUNT_ID, R2_ACCESS_KEY_ID, and R2_SECRET_ACCESS_KEY are required") + } + + return c, nil +} + +func getEnv(key, fallback string) string { + if v := os.Getenv(key); v != "" { + return v + } + return fallback +} diff --git a/internal/db/db.go b/internal/db/db.go new file mode 100644 index 0000000..b8f2a48 --- /dev/null +++ b/internal/db/db.go @@ -0,0 +1,36 @@ +package db + +import ( + "database/sql" + _ "embed" + "fmt" + "os" + "path/filepath" + + _ "github.com/ncruces/go-sqlite3/driver" + _ "github.com/ncruces/go-sqlite3/embed" +) + +//go:embed schema.sql +var schema string + +func Open(dbPath string) (*sql.DB, error) { + dir := filepath.Dir(dbPath) + if err := os.MkdirAll(dir, 0750); err != nil { + return nil, fmt.Errorf("create db dir: %w", err) + } + + db, err := sql.Open("sqlite3", dbPath+"?_journal_mode=WAL&_busy_timeout=5000") + if err != nil { + return nil, fmt.Errorf("open db: %w", err) + } + + db.SetMaxOpenConns(1) // SQLite single-writer + + if _, err := db.Exec(schema); err != nil { + db.Close() + return nil, fmt.Errorf("init schema: %w", err) + } + + return db, nil +} diff --git a/internal/db/schema.sql b/internal/db/schema.sql new file mode 100644 index 0000000..534cec4 --- /dev/null +++ b/internal/db/schema.sql @@ -0,0 +1,15 @@ +CREATE TABLE IF NOT EXISTS files ( + id TEXT PRIMARY KEY, + filename TEXT NOT NULL, + r2_key TEXT NOT NULL UNIQUE, + size_bytes INTEGER NOT NULL, + content_type TEXT NOT NULL, + uploaded_at DATETIME DEFAULT (datetime('now')), + expires_at DATETIME, + password_hash TEXT, + delete_token TEXT NOT NULL UNIQUE, + download_count INTEGER DEFAULT 0 +); + +CREATE INDEX IF NOT EXISTS idx_files_expires_at ON files(expires_at) WHERE expires_at IS NOT NULL; +CREATE INDEX IF NOT EXISTS idx_files_delete_token ON files(delete_token); diff --git a/internal/handler/auth.go b/internal/handler/auth.go new file mode 100644 index 0000000..430fd51 --- /dev/null +++ b/internal/handler/auth.go @@ -0,0 +1,104 @@ +package handler + +import ( + "database/sql" + "embed" + "html/template" + "log" + "net/http" + "time" + + "golang.org/x/crypto/bcrypt" +) + +var passwordTmpl *template.Template + +func InitTemplates(webFS embed.FS) { + passwordTmpl = template.Must(template.ParseFS(webFS, "web/password.html")) +} + +func (h *Handler) AuthPage(w http.ResponseWriter, r *http.Request) { + id := r.PathValue("id") + if id == "" { + http.NotFound(w, r) + return + } + + rec, err := h.store.Get(id) + if err == sql.ErrNoRows { + http.NotFound(w, r) + return + } + if err != nil { + log.Printf("db get error: %v", err) + http.Error(w, "internal error", http.StatusInternalServerError) + return + } + + if rec.PasswordHash == nil { + http.Redirect(w, r, "/f/"+id, http.StatusSeeOther) + return + } + + data := map[string]any{ + "ID": id, + "Filename": rec.Filename, + "Error": r.URL.Query().Get("error"), + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + passwordTmpl.Execute(w, data) +} + +func (h *Handler) AuthSubmit(w http.ResponseWriter, r *http.Request) { + id := r.PathValue("id") + if id == "" { + http.NotFound(w, r) + return + } + + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + + password := r.FormValue("password") + if password == "" { + http.Redirect(w, r, "/f/"+id+"/auth?error=password+required", http.StatusSeeOther) + return + } + + rec, err := h.store.Get(id) + if err == sql.ErrNoRows { + http.NotFound(w, r) + return + } + if err != nil { + log.Printf("db get error: %v", err) + http.Error(w, "internal error", http.StatusInternalServerError) + return + } + + if rec.PasswordHash == nil { + http.Redirect(w, r, "/f/"+id, http.StatusSeeOther) + return + } + + if err := bcrypt.CompareHashAndPassword([]byte(*rec.PasswordHash), []byte(password)); err != nil { + http.Redirect(w, r, "/f/"+id+"/auth?error=wrong+password", http.StatusSeeOther) + return + } + + // Set auth cookie (10 min lifetime) + http.SetCookie(w, &http.Cookie{ + Name: "auth_" + id, + Value: "granted", + Path: "/f/" + id, + MaxAge: 600, + HttpOnly: true, + SameSite: http.SameSiteLaxMode, + Secure: true, + Expires: time.Now().Add(10 * time.Minute), + }) + + http.Redirect(w, r, "/f/"+id, http.StatusSeeOther) +} diff --git a/internal/handler/delete.go b/internal/handler/delete.go new file mode 100644 index 0000000..67cfe11 --- /dev/null +++ b/internal/handler/delete.go @@ -0,0 +1,49 @@ +package handler + +import ( + "encoding/json" + "log" + "net/http" + "strings" +) + +func (h *Handler) Delete(w http.ResponseWriter, r *http.Request) { + id := r.PathValue("id") + if id == "" { + http.NotFound(w, r) + return + } + + auth := r.Header.Get("Authorization") + if !strings.HasPrefix(auth, "Bearer ") { + http.Error(w, "Authorization: Bearer required", http.StatusUnauthorized) + return + } + token := strings.TrimPrefix(auth, "Bearer ") + + rec, err := h.store.Get(id) + if err != nil { + http.NotFound(w, r) + return + } + + if rec.DeleteToken != token { + http.Error(w, "invalid delete token", http.StatusForbidden) + return + } + + if err := h.r2.Delete(r.Context(), rec.R2Key); err != nil { + log.Printf("r2 delete error: %v", err) + http.Error(w, "failed to delete from storage", http.StatusInternalServerError) + return + } + + if err := h.store.Delete(id); err != nil { + log.Printf("db delete error: %v", err) + http.Error(w, "internal error", http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(map[string]string{"status": "deleted", "id": id}) +} diff --git a/internal/handler/download.go b/internal/handler/download.go new file mode 100644 index 0000000..1426730 --- /dev/null +++ b/internal/handler/download.go @@ -0,0 +1,54 @@ +package handler + +import ( + "database/sql" + "log" + "net/http" + "time" +) + +func (h *Handler) Download(w http.ResponseWriter, r *http.Request) { + id := r.PathValue("id") + if id == "" { + http.NotFound(w, r) + return + } + + rec, err := h.store.Get(id) + if err == sql.ErrNoRows { + http.NotFound(w, r) + return + } + if err != nil { + log.Printf("db get error: %v", err) + http.Error(w, "internal error", http.StatusInternalServerError) + return + } + + // Check expiry + if rec.ExpiresAt != nil && rec.ExpiresAt.Before(time.Now().UTC()) { + http.Error(w, "this file has expired", http.StatusGone) + return + } + + // Check password + if rec.PasswordHash != nil { + cookie, err := r.Cookie("auth_" + id) + if err != nil || cookie.Value != "granted" { + http.Redirect(w, r, "/f/"+id+"/auth", http.StatusSeeOther) + return + } + } + + // Generate presigned URL + url, err := h.r2.PresignGet(r.Context(), rec.R2Key, rec.Filename) + if err != nil { + log.Printf("presign error: %v", err) + http.Error(w, "internal error", http.StatusInternalServerError) + return + } + + h.store.IncrementDownloads(id) + + http.Redirect(w, r, url, http.StatusFound) +} diff --git a/internal/handler/info.go b/internal/handler/info.go new file mode 100644 index 0000000..30d59b1 --- /dev/null +++ b/internal/handler/info.go @@ -0,0 +1,45 @@ +package handler + +import ( + "database/sql" + "encoding/json" + "log" + "net/http" + "time" +) + +func (h *Handler) Info(w http.ResponseWriter, r *http.Request) { + id := r.PathValue("id") + if id == "" { + http.NotFound(w, r) + return + } + + rec, err := h.store.Get(id) + if err == sql.ErrNoRows { + http.NotFound(w, r) + return + } + if err != nil { + log.Printf("db get error: %v", err) + http.Error(w, "internal error", http.StatusInternalServerError) + return + } + + resp := map[string]any{ + "id": rec.ID, + "filename": rec.Filename, + "size": rec.SizeBytes, + "content_type": rec.ContentType, + "uploaded_at": rec.UploadedAt.Format(time.RFC3339), + "download_count": rec.DownloadCount, + "password": rec.PasswordHash != nil, + } + if rec.ExpiresAt != nil { + resp["expires_at"] = rec.ExpiresAt.Format(time.RFC3339) + resp["expired"] = rec.ExpiresAt.Before(time.Now().UTC()) + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(resp) +} diff --git a/internal/handler/upload.go b/internal/handler/upload.go new file mode 100644 index 0000000..b134c6e --- /dev/null +++ b/internal/handler/upload.go @@ -0,0 +1,232 @@ +package handler + +import ( + "crypto/rand" + "encoding/hex" + "encoding/json" + "fmt" + "log" + "mime" + "net/http" + "strconv" + "strings" + "time" + + gonanoid "github.com/matoous/go-nanoid/v2" + "golang.org/x/crypto/bcrypt" + + "github.com/jeffemmett/upload-service/internal/config" + "github.com/jeffemmett/upload-service/internal/r2" + "github.com/jeffemmett/upload-service/internal/store" +) + +type Handler struct { + store *store.Store + r2 *r2.Client + config *config.Config +} + +func New(s *store.Store, r *r2.Client, c *config.Config) *Handler { + return &Handler{store: s, r2: r, config: c} +} + +func (h *Handler) Upload(w http.ResponseWriter, r *http.Request) { + reader, err := r.MultipartReader() + if err != nil { + http.Error(w, "expected multipart/form-data", http.StatusBadRequest) + return + } + + var ( + filename string + contentType string + expiresIn string + password string + fileSize int64 + fileUploaded bool + fileID string + r2Key string + ) + + fileID, err = gonanoid.New(8) + if err != nil { + http.Error(w, "internal error", http.StatusInternalServerError) + return + } + + deleteToken := make([]byte, 32) + if _, err := rand.Read(deleteToken); err != nil { + http.Error(w, "internal error", http.StatusInternalServerError) + return + } + deleteTokenHex := hex.EncodeToString(deleteToken) + + for { + part, err := reader.NextPart() + if err != nil { + break + } + + switch part.FormName() { + case "file": + if fileUploaded { + part.Close() + continue + } + filename = part.FileName() + if filename == "" { + http.Error(w, "no filename", http.StatusBadRequest) + return + } + + // Detect content type from extension, fall back to part header + ct := mime.TypeByExtension("." + fileExtension(filename)) + if ct == "" { + ct = part.Header.Get("Content-Type") + } + if ct == "" { + ct = "application/octet-stream" + } + contentType = ct + + r2Key = fmt.Sprintf("uploads/%s/%s", fileID, filename) + + // Check Content-Length hint if available + if cl := r.Header.Get("Content-Length"); cl != "" { + if size, err := strconv.ParseInt(cl, 10, 64); err == nil && size > h.config.MaxUploadSize { + http.Error(w, fmt.Sprintf("file too large (max %d bytes)", h.config.MaxUploadSize), http.StatusRequestEntityTooLarge) + return + } + } + + // Stream directly to R2 — the part reader is the body pipe + // We use a counting reader to track size + cr := &countingReader{r: part} + if err := h.r2.Upload(r.Context(), r2Key, contentType, -1, cr); err != nil { + log.Printf("r2 upload error: %v", err) + http.Error(w, "upload failed", http.StatusInternalServerError) + return + } + fileSize = cr.n + fileUploaded = true + + if fileSize > h.config.MaxUploadSize { + // File exceeded limit after streaming — clean up + h.r2.Delete(r.Context(), r2Key) + http.Error(w, fmt.Sprintf("file too large (max %d bytes)", h.config.MaxUploadSize), http.StatusRequestEntityTooLarge) + return + } + + case "expires_in": + buf := make([]byte, 64) + n, _ := part.Read(buf) + expiresIn = strings.TrimSpace(string(buf[:n])) + + case "password": + buf := make([]byte, 256) + n, _ := part.Read(buf) + password = strings.TrimSpace(string(buf[:n])) + } + part.Close() + } + + if !fileUploaded { + http.Error(w, "no file provided", http.StatusBadRequest) + return + } + + rec := &store.FileRecord{ + ID: fileID, + Filename: filename, + R2Key: r2Key, + SizeBytes: fileSize, + ContentType: contentType, + DeleteToken: deleteTokenHex, + } + + // Handle expiry + if expiresIn != "" { + dur, err := parseDuration(expiresIn) + if err != nil { + h.r2.Delete(r.Context(), r2Key) + http.Error(w, "invalid expires_in value (use: 1h, 1d, 7d, 30d)", http.StatusBadRequest) + return + } + t := time.Now().UTC().Add(dur) + rec.ExpiresAt = &t + } + + // Handle password + if password != "" { + hash, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost) + if err != nil { + h.r2.Delete(r.Context(), r2Key) + http.Error(w, "internal error", http.StatusInternalServerError) + return + } + hashStr := string(hash) + rec.PasswordHash = &hashStr + } + + if err := h.store.Create(rec); err != nil { + h.r2.Delete(r.Context(), r2Key) + log.Printf("db create error: %v", err) + http.Error(w, "internal error", http.StatusInternalServerError) + return + } + + resp := map[string]any{ + "id": fileID, + "filename": filename, + "size": fileSize, + "url": fmt.Sprintf("%s/f/%s", h.config.BaseURL, fileID), + "delete_url": fmt.Sprintf("%s/f/%s", h.config.BaseURL, fileID), + "delete_token": deleteTokenHex, + } + if rec.ExpiresAt != nil { + resp["expires_at"] = rec.ExpiresAt.Format(time.RFC3339) + } + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + json.NewEncoder(w).Encode(resp) +} + +func parseDuration(s string) (time.Duration, error) { + s = strings.TrimSpace(strings.ToLower(s)) + if strings.HasSuffix(s, "d") { + days, err := strconv.Atoi(strings.TrimSuffix(s, "d")) + if err != nil || days < 1 || days > 365 { + return 0, fmt.Errorf("invalid days") + } + return time.Duration(days) * 24 * time.Hour, nil + } + if strings.HasSuffix(s, "h") { + hours, err := strconv.Atoi(strings.TrimSuffix(s, "h")) + if err != nil || hours < 1 || hours > 8760 { + return 0, fmt.Errorf("invalid hours") + } + return time.Duration(hours) * time.Hour, nil + } + return 0, fmt.Errorf("unsupported format") +} + +func fileExtension(name string) string { + for i := len(name) - 1; i >= 0; i-- { + if name[i] == '.' { + return name[i+1:] + } + } + return "" +} + +type countingReader struct { + r interface{ Read([]byte) (int, error) } + n int64 +} + +func (cr *countingReader) Read(p []byte) (int, error) { + n, err := cr.r.Read(p) + cr.n += int64(n) + return n, err +} diff --git a/internal/middleware/ratelimit.go b/internal/middleware/ratelimit.go new file mode 100644 index 0000000..ae04115 --- /dev/null +++ b/internal/middleware/ratelimit.go @@ -0,0 +1,66 @@ +package middleware + +import ( + "net/http" + "strings" + "sync" + + "golang.org/x/time/rate" +) + +type RateLimiter struct { + limiters map[string]*rate.Limiter + mu sync.Mutex + rate rate.Limit + burst int +} + +func NewRateLimiter(r float64, burst int) *RateLimiter { + return &RateLimiter{ + limiters: make(map[string]*rate.Limiter), + rate: rate.Limit(r), + burst: burst, + } +} + +func (rl *RateLimiter) getLimiter(ip string) *rate.Limiter { + rl.mu.Lock() + defer rl.mu.Unlock() + + l, exists := rl.limiters[ip] + if !exists { + l = rate.NewLimiter(rl.rate, rl.burst) + rl.limiters[ip] = l + } + return l +} + +func (rl *RateLimiter) Middleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ip := clientIP(r) + if !rl.getLimiter(ip).Allow() { + http.Error(w, "rate limit exceeded", http.StatusTooManyRequests) + return + } + next.ServeHTTP(w, r) + }) +} + +func clientIP(r *http.Request) string { + // Cloudflare / proxy headers + if ip := r.Header.Get("CF-Connecting-IP"); ip != "" { + return ip + } + if ip := r.Header.Get("X-Real-IP"); ip != "" { + return ip + } + if xff := r.Header.Get("X-Forwarded-For"); xff != "" { + return strings.Split(xff, ",")[0] + } + // Strip port + ip := r.RemoteAddr + if idx := strings.LastIndex(ip, ":"); idx != -1 { + ip = ip[:idx] + } + return ip +} diff --git a/internal/middleware/security.go b/internal/middleware/security.go new file mode 100644 index 0000000..a7bf7cf --- /dev/null +++ b/internal/middleware/security.go @@ -0,0 +1,28 @@ +package middleware + +import "net/http" + +func Security(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("X-Content-Type-Options", "nosniff") + w.Header().Set("X-Frame-Options", "DENY") + w.Header().Set("Referrer-Policy", "strict-origin-when-cross-origin") + w.Header().Set("Permissions-Policy", "camera=(), microphone=(), geolocation=()") + + // CORS — allow any origin for API usage + origin := r.Header.Get("Origin") + if origin != "" { + w.Header().Set("Access-Control-Allow-Origin", origin) + w.Header().Set("Access-Control-Allow-Methods", "GET, POST, DELETE, OPTIONS") + w.Header().Set("Access-Control-Allow-Headers", "Authorization, Content-Type") + w.Header().Set("Access-Control-Max-Age", "86400") + } + + if r.Method == http.MethodOptions { + w.WriteHeader(http.StatusNoContent) + return + } + + next.ServeHTTP(w, r) + }) +} diff --git a/internal/r2/r2.go b/internal/r2/r2.go new file mode 100644 index 0000000..6a43d48 --- /dev/null +++ b/internal/r2/r2.go @@ -0,0 +1,165 @@ +package r2 + +import ( + "context" + "fmt" + "io" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/jeffemmett/upload-service/internal/config" +) + +const ( + multipartThreshold = 100 * 1024 * 1024 // 100MB + partSize = 64 * 1024 * 1024 // 64MB chunks + presignExpiry = 5 * time.Minute +) + +type Client struct { + s3 *s3.Client + presign *s3.PresignClient + bucket string +} + +func NewClient(cfg *config.Config) *Client { + s3Client := s3.New(s3.Options{ + BaseEndpoint: aws.String(cfg.R2Endpoint), + Region: "auto", + Credentials: credentials.NewStaticCredentialsProvider(cfg.R2AccessKeyID, cfg.R2SecretAccessKey, ""), + }) + + return &Client{ + s3: s3Client, + presign: s3.NewPresignClient(s3Client), + bucket: cfg.R2BucketName, + } +} + +// Upload streams the reader to R2. For files > 100MB, uses multipart upload. +func (c *Client) Upload(ctx context.Context, key, contentType string, size int64, body io.Reader) error { + if size > multipartThreshold { + return c.uploadMultipart(ctx, key, contentType, body) + } + return c.uploadSimple(ctx, key, contentType, body) +} + +func (c *Client) uploadSimple(ctx context.Context, key, contentType string, body io.Reader) error { + _, err := c.s3.PutObject(ctx, &s3.PutObjectInput{ + Bucket: &c.bucket, + Key: &key, + Body: body, + ContentType: &contentType, + }) + return err +} + +func (c *Client) uploadMultipart(ctx context.Context, key, contentType string, body io.Reader) error { + create, err := c.s3.CreateMultipartUpload(ctx, &s3.CreateMultipartUploadInput{ + Bucket: &c.bucket, + Key: &key, + ContentType: &contentType, + }) + if err != nil { + return fmt.Errorf("create multipart: %w", err) + } + uploadID := create.UploadId + + var parts []types.CompletedPart + buf := make([]byte, partSize) + partNum := int32(1) + + for { + n, readErr := io.ReadFull(body, buf) + if n == 0 && readErr != nil { + break + } + + upload, err := c.s3.UploadPart(ctx, &s3.UploadPartInput{ + Bucket: &c.bucket, + Key: &key, + UploadId: uploadID, + PartNumber: &partNum, + Body: io.NopCloser(io.LimitReader(bytesReader(buf[:n]), int64(n))), + }) + if err != nil { + c.s3.AbortMultipartUpload(ctx, &s3.AbortMultipartUploadInput{ + Bucket: &c.bucket, + Key: &key, + UploadId: uploadID, + }) + return fmt.Errorf("upload part %d: %w", partNum, err) + } + + parts = append(parts, types.CompletedPart{ + PartNumber: &partNum, + ETag: upload.ETag, + }) + partNum++ + + if readErr != nil { + break + } + } + + _, err = c.s3.CompleteMultipartUpload(ctx, &s3.CompleteMultipartUploadInput{ + Bucket: &c.bucket, + Key: &key, + UploadId: uploadID, + MultipartUpload: &types.CompletedMultipartUpload{ + Parts: parts, + }, + }) + if err != nil { + return fmt.Errorf("complete multipart: %w", err) + } + + return nil +} + +// PresignGet generates a presigned download URL valid for 5 minutes. +func (c *Client) PresignGet(ctx context.Context, key, filename string) (string, error) { + resp, err := c.presign.PresignGetObject(ctx, &s3.GetObjectInput{ + Bucket: &c.bucket, + Key: &key, + ResponseContentDisposition: aws.String(fmt.Sprintf(`attachment; filename="%s"`, filename)), + }, s3.WithPresignExpires(presignExpiry)) + if err != nil { + return "", err + } + return resp.URL, nil +} + +// Delete removes an object from R2. +func (c *Client) Delete(ctx context.Context, key string) error { + _, err := c.s3.DeleteObject(ctx, &s3.DeleteObjectInput{ + Bucket: &c.bucket, + Key: &key, + }) + return err +} + +// bytesReader wraps a byte slice as an io.Reader. +type bytesReaderImpl struct { + data []byte + pos int +} + +func bytesReader(b []byte) io.Reader { + // Make a copy so the buffer can be reused + cp := make([]byte, len(b)) + copy(cp, b) + return &bytesReaderImpl{data: cp} +} + +func (r *bytesReaderImpl) Read(p []byte) (int, error) { + if r.pos >= len(r.data) { + return 0, io.EOF + } + n := copy(p, r.data[r.pos:]) + r.pos += n + return n, nil +} diff --git a/internal/store/store.go b/internal/store/store.go new file mode 100644 index 0000000..aeaf4b0 --- /dev/null +++ b/internal/store/store.go @@ -0,0 +1,95 @@ +package store + +import ( + "database/sql" + "time" +) + +type FileRecord struct { + ID string + Filename string + R2Key string + SizeBytes int64 + ContentType string + UploadedAt time.Time + ExpiresAt *time.Time + PasswordHash *string + DeleteToken string + DownloadCount int64 +} + +type Store struct { + db *sql.DB +} + +func New(db *sql.DB) *Store { + return &Store{db: db} +} + +func (s *Store) Create(f *FileRecord) error { + _, err := s.db.Exec( + `INSERT INTO files (id, filename, r2_key, size_bytes, content_type, expires_at, password_hash, delete_token) + VALUES (?, ?, ?, ?, ?, ?, ?, ?)`, + f.ID, f.Filename, f.R2Key, f.SizeBytes, f.ContentType, f.ExpiresAt, f.PasswordHash, f.DeleteToken, + ) + return err +} + +func (s *Store) Get(id string) (*FileRecord, error) { + f := &FileRecord{} + err := s.db.QueryRow( + `SELECT id, filename, r2_key, size_bytes, content_type, uploaded_at, expires_at, password_hash, delete_token, download_count + FROM files WHERE id = ?`, id, + ).Scan(&f.ID, &f.Filename, &f.R2Key, &f.SizeBytes, &f.ContentType, &f.UploadedAt, &f.ExpiresAt, &f.PasswordHash, &f.DeleteToken, &f.DownloadCount) + if err != nil { + return nil, err + } + return f, nil +} + +func (s *Store) IncrementDownloads(id string) error { + _, err := s.db.Exec(`UPDATE files SET download_count = download_count + 1 WHERE id = ?`, id) + return err +} + +func (s *Store) Delete(id string) error { + _, err := s.db.Exec(`DELETE FROM files WHERE id = ?`, id) + return err +} + +func (s *Store) DeleteByToken(token string) (*FileRecord, error) { + f := &FileRecord{} + err := s.db.QueryRow( + `SELECT id, filename, r2_key, size_bytes, content_type, uploaded_at, expires_at, password_hash, delete_token, download_count + FROM files WHERE delete_token = ?`, token, + ).Scan(&f.ID, &f.Filename, &f.R2Key, &f.SizeBytes, &f.ContentType, &f.UploadedAt, &f.ExpiresAt, &f.PasswordHash, &f.DeleteToken, &f.DownloadCount) + if err != nil { + return nil, err + } + _, err = s.db.Exec(`DELETE FROM files WHERE delete_token = ?`, token) + if err != nil { + return nil, err + } + return f, nil +} + +func (s *Store) ListExpired() ([]*FileRecord, error) { + rows, err := s.db.Query( + `SELECT id, filename, r2_key, size_bytes, content_type, uploaded_at, expires_at, password_hash, delete_token, download_count + FROM files WHERE expires_at IS NOT NULL AND expires_at <= datetime('now')`, + ) + if err != nil { + return nil, err + } + defer rows.Close() + + var files []*FileRecord + for rows.Next() { + f := &FileRecord{} + if err := rows.Scan(&f.ID, &f.Filename, &f.R2Key, &f.SizeBytes, &f.ContentType, &f.UploadedAt, &f.ExpiresAt, &f.PasswordHash, &f.DeleteToken, &f.DownloadCount); err != nil { + return nil, err + } + files = append(files, f) + } + return files, rows.Err() +} diff --git a/main.go b/main.go new file mode 100644 index 0000000..d8e4122 --- /dev/null +++ b/main.go @@ -0,0 +1,120 @@ +package main + +import ( + "context" + "embed" + "io/fs" + "log" + "net/http" + "os" + "os/signal" + "syscall" + "time" + + "github.com/jeffemmett/upload-service/internal/cleanup" + "github.com/jeffemmett/upload-service/internal/config" + "github.com/jeffemmett/upload-service/internal/db" + "github.com/jeffemmett/upload-service/internal/handler" + "github.com/jeffemmett/upload-service/internal/middleware" + "github.com/jeffemmett/upload-service/internal/r2" + "github.com/jeffemmett/upload-service/internal/store" +) + +//go:embed web +var webFS embed.FS + +func main() { + cfg, err := config.Load() + if err != nil { + log.Fatalf("config: %v", err) + } + + database, err := db.Open(cfg.DBPath) + if err != nil { + log.Fatalf("db: %v", err) + } + defer database.Close() + + s := store.New(database) + r2Client := r2.NewClient(cfg) + h := handler.New(s, r2Client, cfg) + + handler.InitTemplates(webFS) + + // CLI script + cliScript, _ := fs.ReadFile(webFS, "web/static/upload.sh") + + mux := http.NewServeMux() + + // Web UI + mux.HandleFunc("GET /", func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/" { + http.NotFound(w, r) + return + } + data, _ := fs.ReadFile(webFS, "web/index.html") + w.Header().Set("Content-Type", "text/html; charset=utf-8") + w.Write(data) + }) + + // Static assets + staticFS, _ := fs.Sub(webFS, "web/static") + mux.Handle("GET /static/", http.StripPrefix("/static/", http.FileServer(http.FS(staticFS)))) + + // API + mux.HandleFunc("POST /upload", h.Upload) + mux.HandleFunc("GET /f/{id}", h.Download) + mux.HandleFunc("GET /f/{id}/info", h.Info) + mux.HandleFunc("GET /f/{id}/auth", h.AuthPage) + mux.HandleFunc("POST /f/{id}/auth", h.AuthSubmit) + mux.HandleFunc("DELETE /f/{id}", h.Delete) + + // Health + mux.HandleFunc("GET /health", func(w http.ResponseWriter, _ *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"status":"ok"}`)) + }) + + // CLI download + mux.HandleFunc("GET /cli", func(w http.ResponseWriter, _ *http.Request) { + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + w.Header().Set("Content-Disposition", `attachment; filename="upload.sh"`) + w.Write(cliScript) + }) + + // Middleware chain + rl := middleware.NewRateLimiter(cfg.RateLimit, cfg.RateBurst) + var chain http.Handler = mux + chain = rl.Middleware(chain) + chain = middleware.Security(chain) + + srv := &http.Server{ + Addr: ":" + cfg.Port, + Handler: chain, + ReadHeaderTimeout: 10 * time.Second, + IdleTimeout: 120 * time.Second, + // No read/write timeout — large uploads can take a long time + } + + // Cleanup goroutine + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go cleanup.Start(ctx, s, r2Client) + + // Graceful shutdown + go func() { + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) + <-sigCh + log.Println("shutting down...") + cancel() + shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 30*time.Second) + defer shutdownCancel() + srv.Shutdown(shutdownCtx) + }() + + log.Printf("listening on :%s", cfg.Port) + if err := srv.ListenAndServe(); err != http.ErrServerClosed { + log.Fatalf("server: %v", err) + } +} diff --git a/web/index.html b/web/index.html new file mode 100644 index 0000000..fcfd650 --- /dev/null +++ b/web/index.html @@ -0,0 +1,72 @@ + + + + + + upload.jeffemmett.com + + + +
+

upload

+

Simple file sharing. Up to 5 GB.

+ +
+
+ + + + + +

Drop a file here or

+ +
+
+ + + + + + + + + + +
+ + + diff --git a/web/password.html b/web/password.html new file mode 100644 index 0000000..7a1c49e --- /dev/null +++ b/web/password.html @@ -0,0 +1,28 @@ + + + + + + Password Required + + + +
+

Password Required

+

Enter the password to download {{.Filename}}

+ + {{if .Error}} +
{{.Error}}
+ {{end}} + +
+ + +
+ + +
+ + diff --git a/web/static/style.css b/web/static/style.css new file mode 100644 index 0000000..7ed3f1f --- /dev/null +++ b/web/static/style.css @@ -0,0 +1,223 @@ +*, *::before, *::after { box-sizing: border-box; margin: 0; padding: 0; } + +:root { + --bg: #0a0a0a; + --surface: #141414; + --border: #2a2a2a; + --text: #e0e0e0; + --text-dim: #888; + --accent: #3b82f6; + --accent-hover: #2563eb; + --success: #22c55e; + --error: #ef4444; +} + +body { + font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", system-ui, sans-serif; + background: var(--bg); + color: var(--text); + min-height: 100vh; + display: flex; + align-items: center; + justify-content: center; +} + +.container { + width: 100%; + max-width: 480px; + padding: 2rem; +} + +h1 { + font-size: 1.5rem; + font-weight: 600; + margin-bottom: 0.25rem; +} + +.subtitle { + color: var(--text-dim); + font-size: 0.875rem; + margin-bottom: 1.5rem; +} + +.dropzone { + border: 2px dashed var(--border); + border-radius: 12px; + padding: 3rem 1.5rem; + text-align: center; + cursor: pointer; + transition: border-color 0.2s, background 0.2s; +} + +.dropzone:hover, .dropzone.drag-over { + border-color: var(--accent); + background: rgba(59, 130, 246, 0.05); +} + +.dropzone-content svg { + color: var(--text-dim); + margin-bottom: 1rem; +} + +.dropzone-content p { + color: var(--text-dim); + font-size: 0.875rem; +} + +.link { + color: var(--accent); + cursor: pointer; + text-decoration: underline; +} + +.options { + margin-top: 1rem; + display: flex; + flex-direction: column; + gap: 0.75rem; +} + +.option-group { + display: flex; + align-items: center; + gap: 0.75rem; +} + +.option-group label { + font-size: 0.8125rem; + color: var(--text-dim); + width: 80px; + flex-shrink: 0; +} + +.option-group select, .option-group input { + flex: 1; + background: var(--surface); + border: 1px solid var(--border); + border-radius: 6px; + padding: 0.5rem 0.75rem; + color: var(--text); + font-size: 0.875rem; +} + +.btn { + background: var(--accent); + color: #fff; + border: none; + border-radius: 6px; + padding: 0.625rem 1.25rem; + font-size: 0.875rem; + font-weight: 500; + cursor: pointer; + transition: background 0.2s; + margin-top: 0.25rem; +} + +.btn:hover { background: var(--accent-hover); } +.btn:disabled { opacity: 0.5; cursor: not-allowed; } +.btn-small { padding: 0.375rem 0.75rem; font-size: 0.8125rem; margin-top: 0; } + +.progress-bar { + background: var(--surface); + border-radius: 4px; + height: 6px; + overflow: hidden; + margin: 0.5rem 0; +} + +.progress-fill { + background: var(--accent); + height: 100%; + width: 0%; + border-radius: 4px; + transition: width 0.15s; +} + +.progress-text { + font-size: 0.8125rem; + color: var(--text-dim); + text-align: right; +} + +.file-name { + font-size: 0.875rem; + color: var(--text); + word-break: break-all; +} + +.result { + margin-top: 1.5rem; + padding: 1rem; + background: var(--surface); + border-radius: 8px; + border: 1px solid var(--border); +} + +.result-success p { + color: var(--success); + font-weight: 500; + margin-bottom: 0.75rem; +} + +.result-url { + display: flex; + gap: 0.5rem; +} + +.result-url input { + flex: 1; + background: var(--bg); + border: 1px solid var(--border); + border-radius: 6px; + padding: 0.5rem 0.75rem; + color: var(--text); + font-size: 0.8125rem; + font-family: monospace; +} + +.result-meta { + margin-top: 0.5rem; + font-size: 0.75rem; + color: var(--text-dim); + word-break: break-all; +} + +.error { + margin-top: 1rem; + padding: 0.75rem 1rem; + background: rgba(239, 68, 68, 0.1); + border: 1px solid rgba(239, 68, 68, 0.3); + border-radius: 8px; + color: var(--error); + font-size: 0.875rem; +} + +.auth-form { + display: flex; + flex-direction: column; + gap: 0.75rem; +} + +.auth-form input { + background: var(--surface); + border: 1px solid var(--border); + border-radius: 6px; + padding: 0.625rem 0.75rem; + color: var(--text); + font-size: 0.875rem; +} + +footer { + margin-top: 2rem; + text-align: center; +} + +footer a { + color: var(--text-dim); + font-size: 0.75rem; + text-decoration: none; +} + +footer a:hover { color: var(--accent); } + +.hidden { display: none !important; } diff --git a/web/static/upload.js b/web/static/upload.js new file mode 100644 index 0000000..51c8436 --- /dev/null +++ b/web/static/upload.js @@ -0,0 +1,145 @@ +(() => { + const dropzone = document.getElementById('dropzone'); + const fileInput = document.getElementById('file-input'); + const options = document.getElementById('options'); + const uploadBtn = document.getElementById('upload-btn'); + const progressSection = document.getElementById('progress-section'); + const progressFilename = document.getElementById('progress-filename'); + const progressFill = document.getElementById('progress-fill'); + const progressText = document.getElementById('progress-text'); + const result = document.getElementById('result'); + const resultUrl = document.getElementById('result-url'); + const copyBtn = document.getElementById('copy-btn'); + const resultDelete = document.getElementById('result-delete'); + const resultExpiry = document.getElementById('result-expiry'); + const errorDiv = document.getElementById('error'); + + let selectedFile = null; + + // Drag and drop + dropzone.addEventListener('dragover', (e) => { + e.preventDefault(); + dropzone.classList.add('drag-over'); + }); + + dropzone.addEventListener('dragleave', () => { + dropzone.classList.remove('drag-over'); + }); + + dropzone.addEventListener('drop', (e) => { + e.preventDefault(); + dropzone.classList.remove('drag-over'); + if (e.dataTransfer.files.length > 0) { + selectFile(e.dataTransfer.files[0]); + } + }); + + dropzone.addEventListener('click', () => fileInput.click()); + + fileInput.addEventListener('change', () => { + if (fileInput.files.length > 0) { + selectFile(fileInput.files[0]); + } + }); + + function selectFile(file) { + selectedFile = file; + dropzone.querySelector('p').textContent = file.name + ' (' + formatSize(file.size) + ')'; + options.classList.remove('hidden'); + result.classList.add('hidden'); + errorDiv.classList.add('hidden'); + } + + uploadBtn.addEventListener('click', () => { + if (!selectedFile) return; + upload(selectedFile); + }); + + function upload(file) { + const formData = new FormData(); + formData.append('file', file); + + const expires = document.getElementById('expires').value; + if (expires) formData.append('expires_in', expires); + + const password = document.getElementById('password').value; + if (password) formData.append('password', password); + + const xhr = new XMLHttpRequest(); + + // Show progress + options.classList.add('hidden'); + dropzone.classList.add('hidden'); + progressSection.classList.remove('hidden'); + progressFilename.textContent = file.name; + errorDiv.classList.add('hidden'); + + xhr.upload.addEventListener('progress', (e) => { + if (e.lengthComputable) { + const pct = Math.round((e.loaded / e.total) * 100); + progressFill.style.width = pct + '%'; + progressText.textContent = pct + '% — ' + formatSize(e.loaded) + ' / ' + formatSize(e.total); + } + }); + + xhr.addEventListener('load', () => { + progressSection.classList.add('hidden'); + + if (xhr.status === 201) { + const data = JSON.parse(xhr.responseText); + showResult(data); + } else { + showError(xhr.responseText || 'Upload failed'); + } + }); + + xhr.addEventListener('error', () => { + progressSection.classList.add('hidden'); + showError('Network error — upload failed'); + }); + + xhr.open('POST', '/upload'); + xhr.send(formData); + } + + function showResult(data) { + result.classList.remove('hidden'); + resultUrl.value = data.url; + + resultDelete.textContent = 'Delete: curl -X DELETE -H "Authorization: Bearer ' + data.delete_token + '" ' + data.delete_url; + + if (data.expires_at) { + resultExpiry.textContent = 'Expires: ' + new Date(data.expires_at).toLocaleString(); + } else { + resultExpiry.textContent = ''; + } + + // Reset for another upload + dropzone.classList.remove('hidden'); + dropzone.querySelector('p').innerHTML = 'Drop a file here or '; + selectedFile = null; + } + + function showError(msg) { + errorDiv.textContent = msg; + errorDiv.classList.remove('hidden'); + dropzone.classList.remove('hidden'); + dropzone.querySelector('p').innerHTML = 'Drop a file here or '; + selectedFile = null; + } + + copyBtn.addEventListener('click', () => { + resultUrl.select(); + navigator.clipboard.writeText(resultUrl.value).then(() => { + copyBtn.textContent = 'Copied!'; + setTimeout(() => { copyBtn.textContent = 'Copy'; }, 2000); + }); + }); + + function formatSize(bytes) { + if (bytes < 1024) return bytes + ' B'; + if (bytes < 1024 * 1024) return (bytes / 1024).toFixed(1) + ' KB'; + if (bytes < 1024 * 1024 * 1024) return (bytes / (1024 * 1024)).toFixed(1) + ' MB'; + return (bytes / (1024 * 1024 * 1024)).toFixed(2) + ' GB'; + } +})(); diff --git a/web/static/upload.sh b/web/static/upload.sh new file mode 100644 index 0000000..446d030 --- /dev/null +++ b/web/static/upload.sh @@ -0,0 +1,75 @@ +#!/usr/bin/env bash +# upload.sh — CLI uploader for upload.jeffemmett.com +# Usage: ./upload.sh [options] +# -e, --expires Expiry (1h, 1d, 7d, 30d) +# -p, --password Password-protect the file +# -s, --server Server URL (default: https://upload.jeffemmett.com) +# +# Install: curl -o ~/.local/bin/upload https://upload.jeffemmett.com/cli && chmod +x ~/.local/bin/upload + +set -euo pipefail + +SERVER="${UPLOAD_SERVER:-https://upload.jeffemmett.com}" +EXPIRES="" +PASSWORD="" +FILE="" + +while [[ $# -gt 0 ]]; do + case "$1" in + -e|--expires) EXPIRES="$2"; shift 2 ;; + -p|--password) PASSWORD="$2"; shift 2 ;; + -s|--server) SERVER="$2"; shift 2 ;; + -h|--help) + echo "Usage: upload [-e 7d] [-p secret]" + echo "" + echo "Options:" + echo " -e, --expires Expiry duration (1h, 1d, 7d, 30d)" + echo " -p, --password Password-protect the file" + echo " -s, --server Server URL (default: https://upload.jeffemmett.com)" + exit 0 + ;; + -*) echo "Unknown option: $1" >&2; exit 1 ;; + *) FILE="$1"; shift ;; + esac +done + +if [[ -z "$FILE" ]]; then + echo "Usage: upload [-e 7d] [-p secret]" >&2 + exit 1 +fi + +if [[ ! -f "$FILE" ]]; then + echo "Error: file not found: $FILE" >&2 + exit 1 +fi + +CURL_ARGS=(-s -S --progress-bar -F "file=@${FILE}") +[[ -n "$EXPIRES" ]] && CURL_ARGS+=(-F "expires_in=${EXPIRES}") +[[ -n "$PASSWORD" ]] && CURL_ARGS+=(-F "password=${PASSWORD}") + +RESPONSE=$(curl "${CURL_ARGS[@]}" "${SERVER}/upload") + +if command -v jq &>/dev/null; then + URL=$(echo "$RESPONSE" | jq -r '.url') + DELETE_TOKEN=$(echo "$RESPONSE" | jq -r '.delete_token') + EXPIRES_AT=$(echo "$RESPONSE" | jq -r '.expires_at // empty') + + echo "" + echo "URL: $URL" + [[ -n "$EXPIRES_AT" ]] && echo "Expires: $EXPIRES_AT" + echo "Delete: curl -X DELETE -H 'Authorization: Bearer ${DELETE_TOKEN}' ${SERVER}/f/$(echo "$RESPONSE" | jq -r '.id')" + + # Copy to clipboard if available + if command -v xclip &>/dev/null; then + echo -n "$URL" | xclip -selection clipboard + echo "(copied to clipboard)" + elif command -v pbcopy &>/dev/null; then + echo -n "$URL" | pbcopy + echo "(copied to clipboard)" + elif command -v wl-copy &>/dev/null; then + echo -n "$URL" | wl-copy + echo "(copied to clipboard)" + fi +else + echo "$RESPONSE" +fi