Vow, uncensorable PDS written in Go

refactor: pure go, remove postgres, s3, add ipfs

+317 -525
-10
Caddyfile.postgres
··· 1 - {$COCOON_HOSTNAME} { 2 - reverse_proxy cocoon:8080 3 - 4 - encode gzip 5 - 6 - log { 7 - output file /data/access.log 8 - format json 9 - } 10 - }
+28 -66
cmd/cocoon/main.go
··· 14 14 "github.com/bluesky-social/go-util/pkg/telemetry" 15 15 "github.com/bluesky-social/indigo/atproto/atcrypto" 16 16 "github.com/bluesky-social/indigo/atproto/syntax" 17 + "github.com/glebarez/sqlite" 17 18 "github.com/haileyok/cocoon/internal/helpers" 18 19 "github.com/haileyok/cocoon/server" 19 20 _ "github.com/joho/godotenv/autoload" 20 21 "github.com/lestrrat-go/jwx/v2/jwk" 21 22 "github.com/urfave/cli/v2" 22 23 "golang.org/x/crypto/bcrypt" 23 - "gorm.io/driver/postgres" 24 - "gorm.io/driver/sqlite" 25 24 "gorm.io/gorm" 26 25 ) 27 26 ··· 42 41 Value: "cocoon.db", 43 42 EnvVars: []string{"COCOON_DB_NAME"}, 44 43 }, 45 - &cli.StringFlag{ 46 - Name: "db-type", 47 - Value: "sqlite", 48 - Usage: "Database type: sqlite or postgres", 49 - EnvVars: []string{"COCOON_DB_TYPE"}, 50 - }, 51 - &cli.StringFlag{ 52 - Name: "database-url", 53 - Aliases: []string{"db-url"}, 54 - Usage: "PostgreSQL connection string (required if db-type is postgres)", 55 - EnvVars: []string{"COCOON_DATABASE_URL", "DATABASE_URL"}, 56 - }, 44 + 57 45 &cli.StringFlag{ 58 46 Name: "did", 59 47 EnvVars: []string{"COCOON_DID"}, ··· 112 100 EnvVars: []string{"COCOON_SMTP_NAME"}, 113 101 }, 114 102 &cli.BoolFlag{ 115 - Name: "s3-backups-enabled", 116 - EnvVars: []string{"COCOON_S3_BACKUPS_ENABLED"}, 117 - }, 118 - &cli.BoolFlag{ 119 - Name: "s3-blobstore-enabled", 120 - EnvVars: []string{"COCOON_S3_BLOBSTORE_ENABLED"}, 103 + Name: "ipfs-blobstore-enabled", 104 + EnvVars: []string{"COCOON_IPFS_BLOBSTORE_ENABLED"}, 105 + Usage: "Store blobs on IPFS via the Kubo HTTP RPC API instead of SQLite.", 121 106 }, 122 107 &cli.StringFlag{ 123 - Name: "s3-region", 124 - EnvVars: []string{"COCOON_S3_REGION"}, 108 + Name: "ipfs-node-url", 109 + EnvVars: []string{"COCOON_IPFS_NODE_URL"}, 110 + Value: "http://127.0.0.1:5001", 111 + Usage: "Base URL of the Kubo (go-ipfs) RPC API used for adding and fetching blobs.", 125 112 }, 126 113 &cli.StringFlag{ 127 - Name: "s3-bucket", 128 - EnvVars: []string{"COCOON_S3_BUCKET"}, 114 + Name: "ipfs-gateway-url", 115 + EnvVars: []string{"COCOON_IPFS_GATEWAY_URL"}, 116 + Usage: "Public IPFS gateway URL for blob redirects (e.g., https://ipfs.io). When set, getBlob redirects to this URL instead of proxying through the node.", 129 117 }, 130 118 &cli.StringFlag{ 131 - Name: "s3-endpoint", 132 - EnvVars: []string{"COCOON_S3_ENDPOINT"}, 133 - }, 134 - &cli.StringFlag{ 135 - Name: "s3-access-key", 136 - EnvVars: []string{"COCOON_S3_ACCESS_KEY"}, 137 - }, 138 - &cli.StringFlag{ 139 - Name: "s3-secret-key", 140 - EnvVars: []string{"COCOON_S3_SECRET_KEY"}, 119 + Name: "ipfs-pinning-service-url", 120 + EnvVars: []string{"COCOON_IPFS_PINNING_SERVICE_URL"}, 121 + Usage: "Remote IPFS Pinning Service API endpoint (e.g., https://api.pinata.cloud/psa). Leave empty to skip remote pinning.", 141 122 }, 142 123 &cli.StringFlag{ 143 - Name: "s3-cdn-url", 144 - EnvVars: []string{"COCOON_S3_CDN_URL"}, 145 - Usage: "Public URL for S3 blob redirects (e.g., https://cdn.example.com). When set, getBlob redirects to this URL instead of proxying.", 124 + Name: "ipfs-pinning-service-token", 125 + EnvVars: []string{"COCOON_IPFS_PINNING_SERVICE_TOKEN"}, 126 + Usage: "Bearer token for authenticating with the remote IPFS pinning service.", 146 127 }, 147 128 &cli.StringFlag{ 148 129 Name: "session-secret", ··· 216 197 LogLevel: level, 217 198 Addr: cmd.String("addr"), 218 199 DbName: cmd.String("db-name"), 219 - DbType: cmd.String("db-type"), 220 - DatabaseURL: cmd.String("database-url"), 221 200 Did: cmd.String("did"), 222 201 Hostname: cmd.String("hostname"), 223 202 RotationKeyPath: cmd.String("rotation-key-path"), ··· 233 212 SmtpPort: cmd.String("smtp-port"), 234 213 SmtpEmail: cmd.String("smtp-email"), 235 214 SmtpName: cmd.String("smtp-name"), 236 - S3Config: &server.S3Config{ 237 - BackupsEnabled: cmd.Bool("s3-backups-enabled"), 238 - BlobstoreEnabled: cmd.Bool("s3-blobstore-enabled"), 239 - Region: cmd.String("s3-region"), 240 - Bucket: cmd.String("s3-bucket"), 241 - Endpoint: cmd.String("s3-endpoint"), 242 - AccessKey: cmd.String("s3-access-key"), 243 - SecretKey: cmd.String("s3-secret-key"), 244 - CDNUrl: cmd.String("s3-cdn-url"), 215 + IPFSConfig: &server.IPFSConfig{ 216 + BlobstoreEnabled: cmd.Bool("ipfs-blobstore-enabled"), 217 + NodeURL: cmd.String("ipfs-node-url"), 218 + GatewayURL: cmd.String("ipfs-gateway-url"), 219 + PinningServiceURL: cmd.String("ipfs-pinning-service-url"), 220 + PinningServiceToken: cmd.String("ipfs-pinning-service-token"), 245 221 }, 246 222 SessionSecret: cmd.String("session-secret"), 247 223 SessionCookieKey: cmd.String("session-cookie-key"), ··· 410 386 } 411 387 412 388 func newDb(cmd *cli.Context) (*gorm.DB, error) { 413 - dbType := cmd.String("db-type") 414 - if dbType == "" { 415 - dbType = "sqlite" 416 - } 417 - 418 - switch dbType { 419 - case "postgres": 420 - databaseURL := cmd.String("database-url") 421 - if databaseURL == "" { 422 - return nil, fmt.Errorf("COCOON_DATABASE_URL or DATABASE_URL must be set when using postgres") 423 - } 424 - return gorm.Open(postgres.Open(databaseURL), &gorm.Config{}) 425 - default: 426 - dbName := cmd.String("db-name") 427 - if dbName == "" { 428 - dbName = "cocoon.db" 429 - } 430 - return gorm.Open(sqlite.Open(dbName), &gorm.Config{}) 389 + dbName := cmd.String("db-name") 390 + if dbName == "" { 391 + dbName = "cocoon.db" 431 392 } 393 + return gorm.Open(sqlite.Open(dbName), &gorm.Config{}) 432 394 }
+7 -14
docker-compose.noproxy.yaml
··· 5 5 # docker-compose -f docker-compose.noproxy.yaml up -d 6 6 # 7 7 8 - version: '3.8' 8 + version: "3.8" 9 9 10 10 services: 11 11 init-keys: ··· 57 57 58 58 # Server configuration 59 59 COCOON_ADDR: ":8080" 60 - COCOON_DB_TYPE: ${COCOON_DB_TYPE:-sqlite} 61 60 COCOON_DB_NAME: ${COCOON_DB_NAME:-/data/cocoon/cocoon.db} 62 - COCOON_DATABASE_URL: ${COCOON_DATABASE_URL:-} 63 61 COCOON_BLOCKSTORE_VARIANT: ${COCOON_BLOCKSTORE_VARIANT:-sqlite} 64 62 65 63 # Optional: SMTP settings for email ··· 70 68 COCOON_SMTP_EMAIL: ${COCOON_SMTP_EMAIL:-} 71 69 COCOON_SMTP_NAME: ${COCOON_SMTP_NAME:-} 72 70 73 - # Optional: S3 configuration 74 - COCOON_S3_BACKUPS_ENABLED: ${COCOON_S3_BACKUPS_ENABLED:-false} 75 - COCOON_S3_BLOBSTORE_ENABLED: ${COCOON_S3_BLOBSTORE_ENABLED:-false} 76 - COCOON_S3_REGION: ${COCOON_S3_REGION:-} 77 - COCOON_S3_BUCKET: ${COCOON_S3_BUCKET:-} 78 - COCOON_S3_ENDPOINT: ${COCOON_S3_ENDPOINT:-} 79 - COCOON_S3_ACCESS_KEY: ${COCOON_S3_ACCESS_KEY:-} 80 - COCOON_S3_SECRET_KEY: ${COCOON_S3_SECRET_KEY:-} 81 - COCOON_S3_CDN_URL: ${COCOON_S3_CDN_URL:-} 71 + # Optional: IPFS pinning configuration 72 + COCOON_IPFS_BLOBSTORE_ENABLED: ${COCOON_IPFS_BLOBSTORE_ENABLED:-false} 73 + COCOON_IPFS_NODE_URL: ${COCOON_IPFS_NODE_URL:-http://127.0.0.1:5001} 74 + COCOON_IPFS_GATEWAY_URL: ${COCOON_IPFS_GATEWAY_URL:-} 75 + COCOON_IPFS_PINNING_SERVICE_URL: ${COCOON_IPFS_PINNING_SERVICE_URL:-} 76 + COCOON_IPFS_PINNING_SERVICE_TOKEN: ${COCOON_IPFS_PINNING_SERVICE_TOKEN:-} 82 77 83 78 # Optional: Fallback proxy 84 79 COCOON_FALLBACK_PROXY: ${COCOON_FALLBACK_PROXY:-} ··· 109 104 COCOON_CONTACT_EMAIL: ${COCOON_CONTACT_EMAIL} 110 105 COCOON_RELAYS: ${COCOON_RELAYS:-https://bsky.network} 111 106 COCOON_ADMIN_PASSWORD: ${COCOON_ADMIN_PASSWORD} 112 - COCOON_DB_TYPE: ${COCOON_DB_TYPE:-sqlite} 113 107 COCOON_DB_NAME: ${COCOON_DB_NAME:-/data/cocoon/cocoon.db} 114 - COCOON_DATABASE_URL: ${COCOON_DATABASE_URL:-} 115 108 depends_on: 116 109 cocoon: 117 110 condition: service_healthy
-159
docker-compose.postgres.yaml
··· 1 - # Docker Compose with PostgreSQL 2 - # 3 - # Usage: 4 - # docker-compose -f docker-compose.postgres.yaml up -d 5 - # 6 - # This file extends the base docker-compose.yaml with a PostgreSQL database. 7 - # Set the following in your .env file: 8 - # COCOON_DB_TYPE=postgres 9 - # POSTGRES_PASSWORD=your-secure-password 10 - 11 - version: '3.8' 12 - 13 - services: 14 - postgres: 15 - image: postgres:16-alpine 16 - container_name: cocoon-postgres 17 - environment: 18 - POSTGRES_USER: cocoon 19 - POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:?POSTGRES_PASSWORD is required} 20 - POSTGRES_DB: cocoon 21 - PGDATA: /var/lib/postgresql/data 22 - volumes: 23 - - postgres_data:/var/lib/postgresql/data 24 - healthcheck: 25 - test: ["CMD-SHELL", "pg_isready -U cocoon -d cocoon"] 26 - interval: 10s 27 - timeout: 5s 28 - retries: 5 29 - restart: unless-stopped 30 - 31 - init-keys: 32 - build: 33 - context: . 34 - dockerfile: Dockerfile 35 - image: ghcr.io/haileyok/cocoon:latest 36 - container_name: cocoon-init-keys 37 - volumes: 38 - - ./keys:/keys 39 - - ./data:/data/cocoon 40 - - ./init-keys.sh:/init-keys.sh:ro 41 - environment: 42 - COCOON_DID: ${COCOON_DID} 43 - COCOON_HOSTNAME: ${COCOON_HOSTNAME} 44 - COCOON_ROTATION_KEY_PATH: /keys/rotation.key 45 - COCOON_JWK_PATH: /keys/jwk.key 46 - COCOON_CONTACT_EMAIL: ${COCOON_CONTACT_EMAIL} 47 - COCOON_RELAYS: ${COCOON_RELAYS:-https://bsky.network} 48 - COCOON_ADMIN_PASSWORD: ${COCOON_ADMIN_PASSWORD} 49 - entrypoint: ["/bin/sh", "/init-keys.sh"] 50 - restart: "no" 51 - 52 - cocoon: 53 - build: 54 - context: . 55 - dockerfile: Dockerfile 56 - image: ghcr.io/haileyok/cocoon:latest 57 - container_name: cocoon-pds 58 - depends_on: 59 - init-keys: 60 - condition: service_completed_successfully 61 - postgres: 62 - condition: service_healthy 63 - ports: 64 - - "8080:8080" 65 - volumes: 66 - - ./data:/data/cocoon 67 - - ./keys/rotation.key:/keys/rotation.key:ro 68 - - ./keys/jwk.key:/keys/jwk.key:ro 69 - environment: 70 - # Required settings 71 - COCOON_DID: ${COCOON_DID} 72 - COCOON_HOSTNAME: ${COCOON_HOSTNAME} 73 - COCOON_ROTATION_KEY_PATH: /keys/rotation.key 74 - COCOON_JWK_PATH: /keys/jwk.key 75 - COCOON_CONTACT_EMAIL: ${COCOON_CONTACT_EMAIL} 76 - COCOON_RELAYS: ${COCOON_RELAYS:-https://bsky.network} 77 - COCOON_ADMIN_PASSWORD: ${COCOON_ADMIN_PASSWORD} 78 - COCOON_SESSION_SECRET: ${COCOON_SESSION_SECRET} 79 - 80 - # Database configuration - PostgreSQL 81 - COCOON_ADDR: ":8080" 82 - COCOON_DB_TYPE: postgres 83 - COCOON_DATABASE_URL: postgres://cocoon:${POSTGRES_PASSWORD}@postgres:5432/cocoon?sslmode=disable 84 - COCOON_BLOCKSTORE_VARIANT: ${COCOON_BLOCKSTORE_VARIANT:-sqlite} 85 - 86 - # Optional: SMTP settings for email 87 - COCOON_SMTP_USER: ${COCOON_SMTP_USER:-} 88 - COCOON_SMTP_PASS: ${COCOON_SMTP_PASS:-} 89 - COCOON_SMTP_HOST: ${COCOON_SMTP_HOST:-} 90 - COCOON_SMTP_PORT: ${COCOON_SMTP_PORT:-} 91 - COCOON_SMTP_EMAIL: ${COCOON_SMTP_EMAIL:-} 92 - COCOON_SMTP_NAME: ${COCOON_SMTP_NAME:-} 93 - 94 - # Optional: S3 configuration 95 - COCOON_S3_BACKUPS_ENABLED: ${COCOON_S3_BACKUPS_ENABLED:-false} 96 - COCOON_S3_BLOBSTORE_ENABLED: ${COCOON_S3_BLOBSTORE_ENABLED:-false} 97 - COCOON_S3_REGION: ${COCOON_S3_REGION:-} 98 - COCOON_S3_BUCKET: ${COCOON_S3_BUCKET:-} 99 - COCOON_S3_ENDPOINT: ${COCOON_S3_ENDPOINT:-} 100 - COCOON_S3_ACCESS_KEY: ${COCOON_S3_ACCESS_KEY:-} 101 - COCOON_S3_SECRET_KEY: ${COCOON_S3_SECRET_KEY:-} 102 - 103 - # Optional: Fallback proxy 104 - COCOON_FALLBACK_PROXY: ${COCOON_FALLBACK_PROXY:-} 105 - restart: unless-stopped 106 - healthcheck: 107 - test: ["CMD", "curl", "-f", "http://localhost:8080/xrpc/_health"] 108 - interval: 30s 109 - timeout: 10s 110 - retries: 3 111 - start_period: 40s 112 - 113 - create-invite: 114 - build: 115 - context: . 116 - dockerfile: Dockerfile 117 - image: ghcr.io/haileyok/cocoon:latest 118 - container_name: cocoon-create-invite 119 - volumes: 120 - - ./keys:/keys 121 - - ./create-initial-invite.sh:/create-initial-invite.sh:ro 122 - environment: 123 - COCOON_DID: ${COCOON_DID} 124 - COCOON_HOSTNAME: ${COCOON_HOSTNAME} 125 - COCOON_ROTATION_KEY_PATH: /keys/rotation.key 126 - COCOON_JWK_PATH: /keys/jwk.key 127 - COCOON_CONTACT_EMAIL: ${COCOON_CONTACT_EMAIL} 128 - COCOON_RELAYS: ${COCOON_RELAYS:-https://bsky.network} 129 - COCOON_ADMIN_PASSWORD: ${COCOON_ADMIN_PASSWORD} 130 - COCOON_DB_TYPE: postgres 131 - COCOON_DATABASE_URL: postgres://cocoon:${POSTGRES_PASSWORD}@postgres:5432/cocoon?sslmode=disable 132 - depends_on: 133 - cocoon: 134 - condition: service_healthy 135 - entrypoint: ["/bin/sh", "/create-initial-invite.sh"] 136 - restart: "no" 137 - 138 - caddy: 139 - image: caddy:2-alpine 140 - container_name: cocoon-caddy 141 - ports: 142 - - "80:80" 143 - - "443:443" 144 - volumes: 145 - - ./Caddyfile.postgres:/etc/caddy/Caddyfile:ro 146 - - caddy_data:/data 147 - - caddy_config:/config 148 - restart: unless-stopped 149 - environment: 150 - COCOON_HOSTNAME: ${COCOON_HOSTNAME} 151 - CADDY_ACME_EMAIL: ${COCOON_CONTACT_EMAIL:-} 152 - 153 - volumes: 154 - postgres_data: 155 - driver: local 156 - caddy_data: 157 - driver: local 158 - caddy_config: 159 - driver: local
+7 -14
docker-compose.yaml
··· 1 - version: '3.8' 1 + version: "3.8" 2 2 3 3 services: 4 4 init-keys: ··· 49 49 50 50 # Server configuration 51 51 COCOON_ADDR: ":8080" 52 - COCOON_DB_TYPE: ${COCOON_DB_TYPE:-sqlite} 53 52 COCOON_DB_NAME: ${COCOON_DB_NAME:-/data/cocoon/cocoon.db} 54 - COCOON_DATABASE_URL: ${COCOON_DATABASE_URL:-} 55 53 COCOON_BLOCKSTORE_VARIANT: ${COCOON_BLOCKSTORE_VARIANT:-sqlite} 56 54 57 55 # Optional: SMTP settings for email ··· 62 60 COCOON_SMTP_EMAIL: ${COCOON_SMTP_EMAIL:-} 63 61 COCOON_SMTP_NAME: ${COCOON_SMTP_NAME:-} 64 62 65 - # Optional: S3 configuration 66 - COCOON_S3_BACKUPS_ENABLED: ${COCOON_S3_BACKUPS_ENABLED:-false} 67 - COCOON_S3_BLOBSTORE_ENABLED: ${COCOON_S3_BLOBSTORE_ENABLED:-false} 68 - COCOON_S3_REGION: ${COCOON_S3_REGION:-} 69 - COCOON_S3_BUCKET: ${COCOON_S3_BUCKET:-} 70 - COCOON_S3_ENDPOINT: ${COCOON_S3_ENDPOINT:-} 71 - COCOON_S3_ACCESS_KEY: ${COCOON_S3_ACCESS_KEY:-} 72 - COCOON_S3_SECRET_KEY: ${COCOON_S3_SECRET_KEY:-} 73 - COCOON_S3_CDN_URL: ${COCOON_S3_CDN_URL:-} 63 + # Optional: IPFS pinning configuration 64 + COCOON_IPFS_BLOBSTORE_ENABLED: ${COCOON_IPFS_BLOBSTORE_ENABLED:-false} 65 + COCOON_IPFS_NODE_URL: ${COCOON_IPFS_NODE_URL:-http://127.0.0.1:5001} 66 + COCOON_IPFS_GATEWAY_URL: ${COCOON_IPFS_GATEWAY_URL:-} 67 + COCOON_IPFS_PINNING_SERVICE_URL: ${COCOON_IPFS_PINNING_SERVICE_URL:-} 68 + COCOON_IPFS_PINNING_SERVICE_TOKEN: ${COCOON_IPFS_PINNING_SERVICE_TOKEN:-} 74 69 75 70 # Optional: Fallback proxy 76 71 COCOON_FALLBACK_PROXY: ${COCOON_FALLBACK_PROXY:-} ··· 101 96 COCOON_CONTACT_EMAIL: ${COCOON_CONTACT_EMAIL} 102 97 COCOON_RELAYS: ${COCOON_RELAYS:-https://bsky.network} 103 98 COCOON_ADMIN_PASSWORD: ${COCOON_ADMIN_PASSWORD} 104 - COCOON_DB_TYPE: ${COCOON_DB_TYPE:-sqlite} 105 99 COCOON_DB_NAME: ${COCOON_DB_NAME:-/data/cocoon/cocoon.db} 106 - COCOON_DATABASE_URL: ${COCOON_DATABASE_URL:-} 107 100 depends_on: 108 101 - init-keys 109 102 entrypoint: ["/bin/sh", "/create-initial-invite.sh"]
+10 -4
go.mod
··· 4 4 5 5 require ( 6 6 github.com/Azure/go-autorest/autorest/to v0.4.1 7 - github.com/aws/aws-sdk-go v1.55.7 8 7 github.com/bluesky-social/go-util v0.0.0-20251012040650-2ebbf57f5934 9 8 github.com/bluesky-social/indigo v0.0.0-20260203235305-a86f3ae1f8ec 10 9 github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 11 10 github.com/domodwyer/mailyak/v3 v3.6.2 11 + github.com/glebarez/sqlite v1.11.0 12 12 github.com/go-pkgz/expirable-cache/v3 v3.0.0 13 13 github.com/go-playground/validator v9.31.0+incompatible 14 14 github.com/golang-jwt/jwt/v4 v4.5.2 ··· 33 33 github.com/whyrusleeping/cbor-gen v0.2.1-0.20241030202151-b7a6831be65e 34 34 gitlab.com/yawning/secp256k1-voi v0.0.0-20230925100816-f2616030848b 35 35 golang.org/x/crypto v0.41.0 36 - gorm.io/driver/postgres v1.5.7 37 - gorm.io/driver/sqlite v1.5.7 38 36 gorm.io/gorm v1.25.12 39 37 ) 40 38 ··· 45 43 github.com/cespare/xxhash/v2 v2.3.0 // indirect 46 44 github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect 47 45 github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect 46 + github.com/dustin/go-humanize v1.0.1 // indirect 48 47 github.com/earthboundkid/versioninfo/v2 v2.24.1 // indirect 49 48 github.com/felixge/httpsnoop v1.0.4 // indirect 49 + github.com/glebarez/go-sqlite v1.21.2 // indirect 50 50 github.com/go-logr/logr v1.4.2 // indirect 51 51 github.com/go-logr/stdr v1.2.2 // indirect 52 52 github.com/go-playground/locales v0.14.1 // indirect ··· 84 84 github.com/jbenet/goprocess v0.1.4 // indirect 85 85 github.com/jinzhu/inflection v1.0.0 // indirect 86 86 github.com/jinzhu/now v1.1.5 // indirect 87 - github.com/jmespath/go-jmespath v0.4.0 // indirect 88 87 github.com/klauspost/cpuid/v2 v2.2.7 // indirect 89 88 github.com/labstack/gommon v0.4.2 // indirect 90 89 github.com/leodido/go-urn v1.4.0 // indirect ··· 108 107 github.com/prometheus/client_model v0.6.2 // indirect 109 108 github.com/prometheus/common v0.66.1 // indirect 110 109 github.com/prometheus/procfs v0.16.1 // indirect 110 + github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect 111 111 github.com/russross/blackfriday/v2 v2.1.0 // indirect 112 112 github.com/samber/lo v1.49.1 // indirect 113 113 github.com/segmentio/asm v1.2.0 // indirect ··· 133 133 google.golang.org/protobuf v1.36.9 // indirect 134 134 gopkg.in/go-playground/assert.v1 v1.2.1 // indirect 135 135 gopkg.in/inf.v0 v0.9.1 // indirect 136 + gorm.io/driver/postgres v1.5.7 // indirect 137 + gorm.io/driver/sqlite v1.5.7 // indirect 136 138 lukechampine.com/blake3 v1.2.1 // indirect 139 + modernc.org/libc v1.22.5 // indirect 140 + modernc.org/mathutil v1.5.0 // indirect 141 + modernc.org/memory v1.5.0 // indirect 142 + modernc.org/sqlite v1.23.1 // indirect 137 143 )
+19 -6
go.sum
··· 7 7 github.com/RussellLuo/slidingwindow v0.0.0-20200528002341-535bb99d338b/go.mod h1:4+EPqMRApwwE/6yo6CxiHoSnBzjRr3jsqer7frxP8y4= 8 8 github.com/alexbrainman/goissue34681 v0.0.0-20191006012335-3fc7a47baff5 h1:iW0a5ljuFxkLGPNem5Ui+KBjFJzKg4Fv2fnxe4dvzpM= 9 9 github.com/alexbrainman/goissue34681 v0.0.0-20191006012335-3fc7a47baff5/go.mod h1:Y2QMoi1vgtOIfc+6DhrMOGkLoGzqSV2rKp4Sm+opsyA= 10 - github.com/aws/aws-sdk-go v1.55.7 h1:UJrkFq7es5CShfBwlWAC8DA077vp8PyVbQd3lqLiztE= 11 - github.com/aws/aws-sdk-go v1.55.7/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= 12 10 github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= 13 11 github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= 14 12 github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= ··· 38 36 github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= 39 37 github.com/domodwyer/mailyak/v3 v3.6.2 h1:x3tGMsyFhTCaxp6ycgR0FE/bu5QiNp+hetUuCOBXMn8= 40 38 github.com/domodwyer/mailyak/v3 v3.6.2/go.mod h1:lOm/u9CyCVWHeaAmHIdF4RiKVxKUT/H5XX10lIKAL6c= 39 + github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= 40 + github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= 41 41 github.com/earthboundkid/versioninfo/v2 v2.24.1 h1:SJTMHaoUx3GzjjnUO1QzP3ZXK6Ee/nbWyCm58eY3oUg= 42 42 github.com/earthboundkid/versioninfo/v2 v2.24.1/go.mod h1:VcWEooDEuyUJnMfbdTh0uFN4cfEIg+kHMuWB2CDCLjw= 43 43 github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= ··· 46 46 github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= 47 47 github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= 48 48 github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= 49 + github.com/glebarez/go-sqlite v1.21.2 h1:3a6LFC4sKahUunAmynQKLZceZCOzUthkRkEAl9gAXWo= 50 + github.com/glebarez/go-sqlite v1.21.2/go.mod h1:sfxdZyhQjTM2Wry3gVYWaW072Ri1WMdWJi0k6+3382k= 51 + github.com/glebarez/sqlite v1.11.0 h1:wSG0irqzP6VurnMEpFGer5Li19RpIRi2qvQz++w0GMw= 52 + github.com/glebarez/sqlite v1.11.0/go.mod h1:h8/o8j5wiAsqSPoWELDUdJXhjAhsVliSn7bWZjOhrgQ= 49 53 github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= 50 54 github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= 51 55 github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= ··· 79 83 github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= 80 84 github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= 81 85 github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= 86 + github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 h1:Xim43kblpZXfIBQsbuBVKCudVG457BR2GZFIz3uw3hQ= 87 + github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= 82 88 github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= 83 89 github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= 84 90 github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ··· 188 194 github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= 189 195 github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= 190 196 github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= 191 - github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= 192 - github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= 193 - github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= 194 - github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= 195 197 github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= 196 198 github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= 197 199 github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= ··· 303 305 github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= 304 306 github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= 305 307 github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= 308 + github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= 309 + github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= 310 + github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= 306 311 github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= 307 312 github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= 308 313 github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= ··· 473 478 honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= 474 479 lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI= 475 480 lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= 481 + modernc.org/libc v1.22.5 h1:91BNch/e5B0uPbJFgqbxXuOnxBQjlS//icfQEGmvyjE= 482 + modernc.org/libc v1.22.5/go.mod h1:jj+Z7dTNX8fBScMVNRAYZ/jF91K8fdT2hYMThc3YjBY= 483 + modernc.org/mathutil v1.5.0 h1:rV0Ko/6SfM+8G+yKiyI830l3Wuz1zRutdslNoQ0kfiQ= 484 + modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= 485 + modernc.org/memory v1.5.0 h1:N+/8c5rE6EqugZwHii4IFsaJ7MUhoWX07J5tC/iI5Ds= 486 + modernc.org/memory v1.5.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= 487 + modernc.org/sqlite v1.23.1 h1:nrSBg4aRQQwq59JpvGEQ15tNxoO5pX/kUjcRNwSAGQM= 488 + modernc.org/sqlite v1.23.1/go.mod h1:OrDj17Mggn6MhE+iPbBNf7RGKODDE9NFT0f3EwDzJqk=
+81 -31
server/handle_repo_upload_blob.go
··· 4 4 "bytes" 5 5 "fmt" 6 6 "io" 7 + "mime/multipart" 8 + "net/http" 7 9 8 - "github.com/aws/aws-sdk-go/aws" 9 - "github.com/aws/aws-sdk-go/aws/credentials" 10 - "github.com/aws/aws-sdk-go/aws/session" 11 - "github.com/aws/aws-sdk-go/service/s3" 12 10 "github.com/haileyok/cocoon/internal/helpers" 13 11 "github.com/haileyok/cocoon/models" 14 12 "github.com/ipfs/go-cid" ··· 42 40 mime = "application/octet-stream" 43 41 } 44 42 43 + ipfsUpload := s.ipfsConfig != nil && s.ipfsConfig.BlobstoreEnabled 45 44 storage := "sqlite" 46 - s3Upload := s.s3Config != nil && s.s3Config.BlobstoreEnabled 47 - if s3Upload { 48 - storage = "s3" 45 + if ipfsUpload { 46 + storage = "ipfs" 49 47 } 48 + 50 49 blob := models.Blob{ 51 50 Did: urepo.Repo.Did, 52 51 RefCount: 0, ··· 62 61 read := 0 63 62 part := 0 64 63 65 - buf := make([]byte, 0x10000) 64 + buf := make([]byte, blockSize) 66 65 fulldata := new(bytes.Buffer) 67 66 68 67 for { ··· 80 79 read += n 81 80 fulldata.Write(data) 82 81 83 - if !s3Upload { 82 + if !ipfsUpload { 84 83 blobPart := models.BlobPart{ 85 84 BlobID: blob.ID, 86 85 Idx: part, ··· 105 104 return helpers.ServerError(e, nil) 106 105 } 107 106 108 - if s3Upload { 109 - config := &aws.Config{ 110 - Region: aws.String(s.s3Config.Region), 111 - Credentials: credentials.NewStaticCredentials(s.s3Config.AccessKey, s.s3Config.SecretKey, ""), 112 - } 113 - 114 - if s.s3Config.Endpoint != "" { 115 - config.Endpoint = aws.String(s.s3Config.Endpoint) 116 - config.S3ForcePathStyle = aws.Bool(true) 117 - } 118 - 119 - sess, err := session.NewSession(config) 107 + if ipfsUpload { 108 + ipfsCid, err := s.addBlobToIPFS(fulldata.Bytes(), mime) 120 109 if err != nil { 121 - logger.Error("error creating aws session", "error", err) 110 + logger.Error("error adding blob to ipfs", "error", err) 122 111 return helpers.ServerError(e, nil) 123 112 } 124 113 125 - svc := s3.New(sess) 114 + // Overwrite the locally computed CID with the one returned by the IPFS 115 + // node so that retrieval via the gateway uses the correct address. 116 + c = ipfsCid 126 117 127 - if _, err := svc.PutObject(&s3.PutObjectInput{ 128 - Bucket: aws.String(s.s3Config.Bucket), 129 - Key: aws.String(fmt.Sprintf("blobs/%s/%s", urepo.Repo.Did, c.String())), 130 - Body: bytes.NewReader(fulldata.Bytes()), 131 - }); err != nil { 132 - logger.Error("error uploading blob to s3", "error", err) 133 - return helpers.ServerError(e, nil) 118 + if s.ipfsConfig.PinningServiceURL != "" { 119 + if err := s.pinBlobToRemote(ctx, ipfsCid.String(), fmt.Sprintf("blob/%s/%s", urepo.Repo.Did, ipfsCid.String())); err != nil { 120 + // Non-fatal: the blob is already on the local node; log and 121 + // continue so the upload does not fail. 122 + logger.Warn("error pinning blob to remote pinning service", "cid", ipfsCid.String(), "error", err) 123 + } 134 124 } 135 125 } 136 126 137 127 if err := s.db.Exec(ctx, "UPDATE blobs SET cid = ? WHERE id = ?", nil, c.Bytes(), blob.ID).Error; err != nil { 138 - // there should probably be somme handling here if this fails... 139 128 logger.Error("error updating blob", "error", err) 140 129 return helpers.ServerError(e, nil) 141 130 } ··· 148 137 149 138 return e.JSON(200, resp) 150 139 } 140 + 141 + // addBlobToIPFS adds raw blob data to the configured IPFS node via the Kubo 142 + // HTTP RPC API (/api/v0/add) and returns the resulting CID. 143 + func (s *Server) addBlobToIPFS(data []byte, mimeType string) (cid.Cid, error) { 144 + nodeURL := s.ipfsConfig.NodeURL 145 + if nodeURL == "" { 146 + nodeURL = "http://127.0.0.1:5001" 147 + } 148 + 149 + endpoint := nodeURL + "/api/v0/add?cid-version=1&hash=sha2-256&pin=true&quieter=true" 150 + 151 + body := new(bytes.Buffer) 152 + writer := multipart.NewWriter(body) 153 + 154 + part, err := writer.CreateFormFile("file", "blob") 155 + if err != nil { 156 + return cid.Undef, fmt.Errorf("error creating multipart field: %w", err) 157 + } 158 + 159 + if _, err := part.Write(data); err != nil { 160 + return cid.Undef, fmt.Errorf("error writing blob data to multipart: %w", err) 161 + } 162 + 163 + if err := writer.Close(); err != nil { 164 + return cid.Undef, fmt.Errorf("error closing multipart writer: %w", err) 165 + } 166 + 167 + req, err := http.NewRequest(http.MethodPost, endpoint, body) 168 + if err != nil { 169 + return cid.Undef, fmt.Errorf("error building ipfs add request: %w", err) 170 + } 171 + req.Header.Set("Content-Type", writer.FormDataContentType()) 172 + 173 + resp, err := s.http.Do(req) 174 + if err != nil { 175 + return cid.Undef, fmt.Errorf("error calling ipfs add: %w", err) 176 + } 177 + defer resp.Body.Close() 178 + 179 + if resp.StatusCode != http.StatusOK { 180 + msg, _ := io.ReadAll(resp.Body) 181 + return cid.Undef, fmt.Errorf("ipfs add returned status %d: %s", resp.StatusCode, string(msg)) 182 + } 183 + 184 + // The Kubo API with ?quieter=true returns a single JSON line: 185 + // {"Hash":"<cid>","Size":"<n>"} 186 + var result struct { 187 + Hash string `json:"Hash"` 188 + } 189 + 190 + if err := readJSON(resp.Body, &result); err != nil { 191 + return cid.Undef, fmt.Errorf("error decoding ipfs add response: %w", err) 192 + } 193 + 194 + c, err := cid.Parse(result.Hash) 195 + if err != nil { 196 + return cid.Undef, fmt.Errorf("error parsing cid from ipfs add response: %w", err) 197 + } 198 + 199 + return c, nil 200 + }
+52 -56
server/handle_sync_get_blob.go
··· 4 4 "bytes" 5 5 "fmt" 6 6 "io" 7 + "net/http" 7 8 8 9 "github.com/Azure/go-autorest/autorest/to" 9 - "github.com/aws/aws-sdk-go/aws" 10 - "github.com/aws/aws-sdk-go/aws/credentials" 11 - "github.com/aws/aws-sdk-go/aws/session" 12 - "github.com/aws/aws-sdk-go/service/s3" 13 10 "github.com/haileyok/cocoon/internal/helpers" 14 11 "github.com/haileyok/cocoon/models" 15 12 "github.com/ipfs/go-cid" ··· 56 53 57 54 buf := new(bytes.Buffer) 58 55 59 - if blob.Storage == "sqlite" { 56 + switch blob.Storage { 57 + case "sqlite": 60 58 var parts []models.BlobPart 61 59 if err := s.db.Raw(ctx, "SELECT * FROM blob_parts WHERE blob_id = ? ORDER BY idx", nil, blob.ID).Scan(&parts).Error; err != nil { 62 60 logger.Error("error getting blob parts", "error", err) 63 61 return helpers.ServerError(e, nil) 64 62 } 65 63 66 - // TODO: we can just stream this, don't need to make a buffer 67 64 for _, p := range parts { 68 65 buf.Write(p.Data) 69 66 } 70 - } else if blob.Storage == "s3" { 71 - if !(s.s3Config != nil && s.s3Config.BlobstoreEnabled) { 72 - logger.Error("s3 storage disabled") 73 - return helpers.ServerError(e, nil) 74 - } 75 - 76 - blobKey := fmt.Sprintf("blobs/%s/%s", urepo.Repo.Did, c.String()) 77 67 78 - if s.s3Config.CDNUrl != "" { 79 - redirectUrl := fmt.Sprintf("%s/%s", s.s3Config.CDNUrl, blobKey) 80 - return e.Redirect(302, redirectUrl) 81 - } 82 - 83 - config := &aws.Config{ 84 - Region: aws.String(s.s3Config.Region), 85 - Credentials: credentials.NewStaticCredentials(s.s3Config.AccessKey, s.s3Config.SecretKey, ""), 68 + case "ipfs": 69 + if s.ipfsConfig == nil || !s.ipfsConfig.BlobstoreEnabled { 70 + logger.Error("ipfs storage disabled") 71 + return helpers.ServerError(e, nil) 86 72 } 87 73 88 - if s.s3Config.Endpoint != "" { 89 - config.Endpoint = aws.String(s.s3Config.Endpoint) 90 - config.S3ForcePathStyle = aws.Bool(true) 74 + // If a public gateway is configured, redirect the client directly to it 75 + // instead of proxying the content through this server. 76 + if s.ipfsConfig.GatewayURL != "" { 77 + redirectURL := fmt.Sprintf("%s/ipfs/%s", s.ipfsConfig.GatewayURL, c.String()) 78 + return e.Redirect(302, redirectURL) 91 79 } 92 80 93 - sess, err := session.NewSession(config) 81 + // Otherwise fetch from the local Kubo node via /api/v0/cat and stream 82 + // the content back to the client. 83 + data, err := s.fetchBlobFromIPFS(c.String()) 94 84 if err != nil { 95 - logger.Error("error creating aws session", "error", err) 85 + logger.Error("error fetching blob from ipfs node", "cid", c.String(), "error", err) 96 86 return helpers.ServerError(e, nil) 97 87 } 98 - 99 - svc := s3.New(sess) 100 - if result, err := svc.GetObject(&s3.GetObjectInput{ 101 - Bucket: aws.String(s.s3Config.Bucket), 102 - Key: aws.String(blobKey), 103 - }); err != nil { 104 - logger.Error("error getting blob from s3", "error", err) 105 - return helpers.ServerError(e, nil) 106 - } else { 107 - read := 0 108 - part := 0 109 - partBuf := make([]byte, 0x10000) 110 - 111 - for { 112 - n, err := io.ReadFull(result.Body, partBuf) 113 - if err == io.ErrUnexpectedEOF || err == io.EOF { 114 - if n == 0 { 115 - break 116 - } 117 - } else if err != nil && err != io.ErrUnexpectedEOF { 118 - logger.Error("error reading blob", "error", err) 119 - return helpers.ServerError(e, nil) 120 - } 88 + buf.Write(data) 121 89 122 - data := partBuf[:n] 123 - read += n 124 - buf.Write(data) 125 - part++ 126 - } 127 - } 128 - } else { 90 + default: 129 91 logger.Error("unknown storage", "storage", blob.Storage) 130 92 return helpers.ServerError(e, nil) 131 93 } ··· 134 96 135 97 return e.Stream(200, "application/octet-stream", buf) 136 98 } 99 + 100 + // fetchBlobFromIPFS retrieves blob data for the given CID from the local Kubo 101 + // node using the HTTP RPC API (/api/v0/cat). 102 + func (s *Server) fetchBlobFromIPFS(cidStr string) ([]byte, error) { 103 + nodeURL := s.ipfsConfig.NodeURL 104 + if nodeURL == "" { 105 + nodeURL = "http://127.0.0.1:5001" 106 + } 107 + 108 + endpoint := fmt.Sprintf("%s/api/v0/cat?arg=%s", nodeURL, cidStr) 109 + 110 + req, err := http.NewRequest(http.MethodPost, endpoint, nil) 111 + if err != nil { 112 + return nil, fmt.Errorf("error building ipfs cat request: %w", err) 113 + } 114 + 115 + resp, err := s.http.Do(req) 116 + if err != nil { 117 + return nil, fmt.Errorf("error calling ipfs cat: %w", err) 118 + } 119 + defer resp.Body.Close() 120 + 121 + if resp.StatusCode != http.StatusOK { 122 + msg, _ := io.ReadAll(resp.Body) 123 + return nil, fmt.Errorf("ipfs cat returned status %d: %s", resp.StatusCode, string(msg)) 124 + } 125 + 126 + data, err := io.ReadAll(resp.Body) 127 + if err != nil { 128 + return nil, fmt.Errorf("error reading ipfs cat response: %w", err) 129 + } 130 + 131 + return data, nil 132 + }
+72
server/ipfs.go
··· 1 + package server 2 + 3 + import ( 4 + "bytes" 5 + "context" 6 + "encoding/json" 7 + "fmt" 8 + "io" 9 + "net/http" 10 + "time" 11 + ) 12 + 13 + // readJSON decodes a single JSON value from r into dst. 14 + func readJSON(r io.Reader, dst any) error { 15 + return json.NewDecoder(r).Decode(dst) 16 + } 17 + 18 + // pinBlobToRemote pins a CID to the configured remote pinning service using 19 + // the IPFS Pinning Service API spec 20 + // (https://ipfs.github.io/pinning-services-api-spec/). 21 + // 22 + // The call is best-effort: callers should log the error but not treat it as 23 + // fatal so that a transient pinning failure does not prevent a blob upload 24 + // from succeeding. 25 + func (s *Server) pinBlobToRemote(ctx context.Context, cidStr string, name string) error { 26 + serviceURL := s.ipfsConfig.PinningServiceURL 27 + token := s.ipfsConfig.PinningServiceToken 28 + 29 + if serviceURL == "" { 30 + return fmt.Errorf("no pinning service URL configured") 31 + } 32 + 33 + endpoint := serviceURL + "/pins" 34 + 35 + payload := map[string]any{ 36 + "cid": cidStr, 37 + "name": name, 38 + "meta": map[string]string{ 39 + "pinned_by": "cocoon", 40 + "pinned_at": time.Now().UTC().Format(time.RFC3339), 41 + }, 42 + } 43 + 44 + body, err := json.Marshal(payload) 45 + if err != nil { 46 + return fmt.Errorf("error marshalling pin request: %w", err) 47 + } 48 + 49 + req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewReader(body)) 50 + if err != nil { 51 + return fmt.Errorf("error building pin request: %w", err) 52 + } 53 + 54 + req.Header.Set("Content-Type", "application/json") 55 + if token != "" { 56 + req.Header.Set("Authorization", "Bearer "+token) 57 + } 58 + 59 + resp, err := s.http.Do(req) 60 + if err != nil { 61 + return fmt.Errorf("error calling pinning service: %w", err) 62 + } 63 + defer resp.Body.Close() 64 + 65 + // The Pinning Service API returns 202 Accepted on success. 66 + if resp.StatusCode != http.StatusAccepted && resp.StatusCode != http.StatusOK { 67 + msg, _ := io.ReadAll(resp.Body) 68 + return fmt.Errorf("pinning service returned status %d: %s", resp.StatusCode, string(msg)) 69 + } 70 + 71 + return nil 72 + }
+4 -2
server/repo.go
··· 672 672 return nil, err 673 673 } 674 674 675 - // TODO: this does _not_ handle deletions of blobs that are on s3 storage!!!! we need to get the blob, see what 676 - // storage it is in, and clean up s3!!!! 675 + // TODO: blobs with storage == "ipfs" are not unpinned from the local 676 + // IPFS node or the remote pinning service when their ref_count reaches 677 + // zero. A future cleanup pass should call /api/v0/pin/rm on the local 678 + // node and DELETE /pins/<requestid> on the remote pinning service. 677 679 if res.Count == 0 { 678 680 if err := rm.db.Exec(ctx, "DELETE FROM blobs WHERE id = ?", nil, res.ID).Error; err != nil { 679 681 return nil, err
+37 -163
server/server.go
··· 1 1 package server 2 2 3 3 import ( 4 - "bytes" 5 4 "context" 6 5 "crypto/ecdsa" 7 6 "embed" ··· 17 16 "text/template" 18 17 "time" 19 18 20 - "github.com/aws/aws-sdk-go/aws" 21 - "github.com/aws/aws-sdk-go/aws/credentials" 22 - "github.com/aws/aws-sdk-go/aws/session" 23 - "github.com/aws/aws-sdk-go/service/s3" 24 19 "github.com/bluesky-social/indigo/api/atproto" 25 20 "github.com/bluesky-social/indigo/atproto/syntax" 26 21 "github.com/bluesky-social/indigo/events" 27 22 "github.com/bluesky-social/indigo/util" 28 23 "github.com/bluesky-social/indigo/xrpc" 29 24 "github.com/domodwyer/mailyak/v3" 25 + "github.com/glebarez/sqlite" 30 26 "github.com/go-playground/validator" 31 27 "github.com/gorilla/sessions" 32 28 "github.com/haileyok/cocoon/identity" ··· 44 40 "github.com/labstack/echo/v4" 45 41 "github.com/labstack/echo/v4/middleware" 46 42 slogecho "github.com/samber/slog-echo" 47 - "gorm.io/driver/postgres" 48 - "gorm.io/driver/sqlite" 49 43 "gorm.io/gorm" 50 44 ) 51 45 ··· 53 47 AccountSessionMaxAge = 30 * 24 * time.Hour // one week 54 48 ) 55 49 56 - type S3Config struct { 57 - BackupsEnabled bool 50 + // IPFSConfig holds configuration for IPFS pinning-based blob storage. 51 + // Blobs are added to an IPFS node via the Kubo HTTP RPC API and optionally 52 + // pinned to a remote pinning service that implements the IPFS Pinning Service 53 + // API spec (e.g. Pinata, web3.storage, Infura). 54 + type IPFSConfig struct { 55 + // BlobstoreEnabled controls whether blobs are stored on IPFS instead of 56 + // SQLite. 58 57 BlobstoreEnabled bool 59 - Endpoint string 60 - Region string 61 - Bucket string 62 - AccessKey string 63 - SecretKey string 64 - CDNUrl string 58 + 59 + // NodeURL is the base URL of the Kubo (go-ipfs) RPC API used for adding 60 + // blobs, e.g. "http://127.0.0.1:5001". 61 + NodeURL string 62 + 63 + // GatewayURL is the base URL of the IPFS gateway used to serve blobs, e.g. 64 + // "https://ipfs.io" or your own gateway. When set, getBlob redirects to 65 + // this URL instead of fetching the content through the node. 66 + GatewayURL string 67 + 68 + // PinningServiceURL is the URL of a remote IPFS Pinning Service API 69 + // endpoint, e.g. "https://api.pinata.cloud/psa". Leave empty to skip 70 + // remote pinning. 71 + PinningServiceURL string 72 + 73 + // PinningServiceToken is the Bearer token used to authenticate with the 74 + // remote pinning service. 75 + PinningServiceToken string 65 76 } 66 77 67 78 type Server struct { ··· 84 95 lastRequestCrawl time.Time 85 96 requestCrawlMu sync.Mutex 86 97 87 - dbName string 88 - dbType string 89 - s3Config *S3Config 98 + dbName string 99 + ipfsConfig *IPFSConfig 90 100 } 91 101 92 102 type Args struct { ··· 95 105 LogLevel slog.Level 96 106 Addr string 97 107 DbName string 98 - DbType string 99 - DatabaseURL string 100 108 Version string 101 109 Did string 102 110 Hostname string ··· 114 122 SmtpEmail string 115 123 SmtpName string 116 124 117 - S3Config *S3Config 125 + IPFSConfig *IPFSConfig 118 126 119 127 SessionSecret string 120 128 SessionCookieKey string ··· 332 340 IdleTimeout: 5 * time.Minute, 333 341 } 334 342 335 - dbType := args.DbType 336 - if dbType == "" { 337 - dbType = "sqlite" 338 - } 339 - 340 343 var gdb *gorm.DB 341 344 var err error 342 - switch dbType { 343 - case "postgres": 344 - if args.DatabaseURL == "" { 345 - return nil, fmt.Errorf("database-url must be set when using postgres") 346 - } 347 - gdb, err = gorm.Open(postgres.Open(args.DatabaseURL), &gorm.Config{}) 348 - if err != nil { 349 - return nil, fmt.Errorf("failed to connect to postgres: %w", err) 350 - } 351 - logger.Info("connected to PostgreSQL database") 352 - default: 353 - gdb, err = gorm.Open(sqlite.Open(args.DbName), &gorm.Config{}) 354 - if err != nil { 355 - return nil, fmt.Errorf("failed to open sqlite database: %w", err) 356 - } 357 - gdb.Exec("PRAGMA journal_mode=WAL") 358 - gdb.Exec("PRAGMA synchronous=NORMAL") 359 - 360 - logger.Info("connected to SQLite database", "path", args.DbName) 345 + gdb, err = gorm.Open(sqlite.Open(args.DbName), &gorm.Config{}) 346 + if err != nil { 347 + return nil, fmt.Errorf("failed to open sqlite database: %w", err) 361 348 } 349 + gdb.Exec("PRAGMA journal_mode=WAL") 350 + gdb.Exec("PRAGMA synchronous=NORMAL") 351 + logger.Info("connected to SQLite database", "path", args.DbName) 362 352 dbw := db.NewDB(gdb) 363 353 364 354 rkbytes, err := os.ReadFile(args.RotationKeyPath) ··· 437 427 evtman: events.NewEventManager(evtPersister), 438 428 passport: identity.NewPassport(h, identity.NewMemCache(10_000)), 439 429 440 - dbName: args.DbName, 441 - dbType: dbType, 442 - s3Config: args.S3Config, 430 + dbName: args.DbName, 431 + ipfsConfig: args.IPFSConfig, 443 432 444 433 oauthProvider: provider.NewProvider(provider.Args{ 445 434 Hostname: args.Hostname, ··· 611 600 } 612 601 }() 613 602 614 - go s.backupRoutine() 615 - 616 603 go func() { 617 604 if err := s.requestCrawl(ctx); err != nil { 618 605 logger.Error("error requesting crawls", "err", err) ··· 653 640 s.lastRequestCrawl = time.Now() 654 641 655 642 return nil 656 - } 657 - 658 - func (s *Server) doBackup() { 659 - logger := s.logger.With("name", "doBackup") 660 - 661 - if s.dbType == "postgres" { 662 - logger.Info("skipping S3 backup - PostgreSQL backups should be handled externally (pg_dump, managed database backups, etc.)") 663 - return 664 - } 665 - 666 - start := time.Now() 667 - 668 - logger.Info("beginning backup to s3...") 669 - 670 - tmpFile := fmt.Sprintf("/tmp/cocoon-backup-%s.db", time.Now().Format(time.RFC3339Nano)) 671 - defer os.Remove(tmpFile) 672 - 673 - if err := s.db.Client().Exec(fmt.Sprintf("VACUUM INTO '%s'", tmpFile)).Error; err != nil { 674 - logger.Error("error creating tmp backup file", "err", err) 675 - return 676 - } 677 - 678 - backupData, err := os.ReadFile(tmpFile) 679 - if err != nil { 680 - logger.Error("error reading tmp backup file", "err", err) 681 - return 682 - } 683 - 684 - logger.Info("sending to s3...") 685 - 686 - currTime := time.Now().Format("2006-01-02_15-04-05") 687 - key := "cocoon-backup-" + currTime + ".db" 688 - 689 - config := &aws.Config{ 690 - Region: aws.String(s.s3Config.Region), 691 - Credentials: credentials.NewStaticCredentials(s.s3Config.AccessKey, s.s3Config.SecretKey, ""), 692 - } 693 - 694 - if s.s3Config.Endpoint != "" { 695 - config.Endpoint = aws.String(s.s3Config.Endpoint) 696 - config.S3ForcePathStyle = aws.Bool(true) 697 - } 698 - 699 - sess, err := session.NewSession(config) 700 - if err != nil { 701 - logger.Error("error creating s3 session", "err", err) 702 - return 703 - } 704 - 705 - svc := s3.New(sess) 706 - 707 - if _, err := svc.PutObject(&s3.PutObjectInput{ 708 - Bucket: aws.String(s.s3Config.Bucket), 709 - Key: aws.String(key), 710 - Body: bytes.NewReader(backupData), 711 - }); err != nil { 712 - logger.Error("error uploading file to s3", "err", err) 713 - return 714 - } 715 - 716 - logger.Info("finished uploading backup to s3", "key", key, "duration", time.Since(start).Seconds()) 717 - 718 - os.WriteFile("last-backup.txt", []byte(time.Now().Format(time.RFC3339Nano)), 0644) 719 - } 720 - 721 - func (s *Server) backupRoutine() { 722 - logger := s.logger.With("name", "backupRoutine") 723 - 724 - if s.s3Config == nil || !s.s3Config.BackupsEnabled { 725 - return 726 - } 727 - 728 - if s.s3Config.Region == "" { 729 - logger.Warn("no s3 region configured but backups are enabled. backups will not run.") 730 - return 731 - } 732 - 733 - if s.s3Config.Bucket == "" { 734 - logger.Warn("no s3 bucket configured but backups are enabled. backups will not run.") 735 - return 736 - } 737 - 738 - if s.s3Config.AccessKey == "" { 739 - logger.Warn("no s3 access key configured but backups are enabled. backups will not run.") 740 - return 741 - } 742 - 743 - if s.s3Config.SecretKey == "" { 744 - logger.Warn("no s3 secret key configured but backups are enabled. backups will not run.") 745 - return 746 - } 747 - 748 - shouldBackupNow := false 749 - lastBackupStr, err := os.ReadFile("last-backup.txt") 750 - if err != nil { 751 - shouldBackupNow = true 752 - } else { 753 - lastBackup, err := time.Parse(time.RFC3339Nano, string(lastBackupStr)) 754 - if err != nil { 755 - shouldBackupNow = true 756 - } else if time.Since(lastBackup).Seconds() > 3600 { 757 - shouldBackupNow = true 758 - } 759 - } 760 - 761 - if shouldBackupNow { 762 - go s.doBackup() 763 - } 764 - 765 - ticker := time.NewTicker(time.Hour) 766 - for range ticker.C { 767 - go s.doBackup() 768 - } 769 643 } 770 644 771 645 func (s *Server) UpdateRepo(ctx context.Context, did string, root cid.Cid, rev string) error {