A container registry that uses the AT Protocol for manifest storage and S3 for blob storage. atcr.io
docker container atproto go

migate envs to use yaml configs

evan.jarrett.net 9723de0b 914328db

verified
+20 -705
+1 -1
.air.hold.toml
··· 4 4 [build] 5 5 pre_cmd = ["go generate ./pkg/hold/..."] 6 6 cmd = "go build -buildvcs=false -o ./tmp/atcr-hold ./cmd/hold" 7 - entrypoint = ["./tmp/atcr-hold" , "serve"] 7 + entrypoint = ["./tmp/atcr-hold", "serve", "--config", "config-hold.example.yaml"] 8 8 include_ext = ["go", "html", "css", "js"] 9 9 exclude_dir = ["bin", "tmp", "vendor", "deploy", "docs", ".git", "dist", "pkg/appview", "node_modules"] 10 10 exclude_regex = ["_test\\.go$", "cbor_gen\\.go$", "\\.min\\.js$", "public/css/style\\.css$", "public/icons\\.svg$"]
+1 -1
.air.toml
··· 8 8 # Pre-build: generate assets if missing (each string is a shell command) 9 9 pre_cmd = ["go generate ./pkg/appview/..."] 10 10 cmd = "go build -buildvcs=false -o ./tmp/atcr-appview ./cmd/appview" 11 - entrypoint = ["./tmp/atcr-appview", "serve"] 11 + entrypoint = ["./tmp/atcr-appview", "serve", "--config", "config-appview.example.yaml"] 12 12 include_ext = ["go", "html", "css", "js"] 13 13 exclude_dir = ["bin", "tmp", "vendor", "deploy", "docs", ".git", "dist", "node_modules", "pkg/hold"] 14 14 exclude_regex = ["_test\\.go$", "cbor_gen\\.go$", "\\.min\\.js$", "public/css/style\\.css$", "public/icons\\.svg$"]
+1 -1
config-appview.example.yaml
··· 84 84 # Organization name for Terms of Service and Privacy Policy. Defaults to server.client_name. 85 85 company_name: "" 86 86 # Governing law jurisdiction for legal terms. 87 - jurisdiction: State of Texas, United States 87 + jurisdiction: ""
-216
deploy/.env.prod.template
··· 1 - # ATCR Production Environment Configuration 2 - # Copy this file to .env and fill in your values 3 - # 4 - # Usage: 5 - # 1. cp deploy/.env.prod.template .env 6 - # 2. Edit .env with your configuration 7 - # 3. systemctl restart atcr 8 - # 9 - # NOTE: This file is loaded by docker-compose.prod.yml 10 - 11 - # ============================================================================== 12 - # Domain Configuration 13 - # ============================================================================== 14 - 15 - # Main AppView domain (registry API + web UI) 16 - # REQUIRED: Update with your domain 17 - APPVIEW_DOMAIN=atcr.io 18 - 19 - # ============================================================================== 20 - # Hold Service Configuration 21 - # ============================================================================== 22 - 23 - # Hold service domain (REQUIRED) 24 - # The hostname where the hold service will be accessible 25 - # Used by docker-compose.prod.yml to derive: 26 - # - HOLD_PUBLIC_URL: https://${HOLD_DOMAIN} 27 - # - ATCR_DEFAULT_HOLD_DID: did:web:${HOLD_DOMAIN} 28 - # Example: hold01.atcr.io 29 - HOLD_DOMAIN=hold01.atcr.io 30 - 31 - # Your ATProto DID (REQUIRED for hold registration) 32 - # Get your DID from: https://bsky.social/xrpc/com.atproto.identity.resolveHandle?handle=yourhandle.bsky.social 33 - # Example: did:plc:abc123xyz789 34 - HOLD_OWNER=did:plc:pddp4xt5lgnv2qsegbzzs4xg 35 - 36 - # Directory path for embedded PDS carstore (SQLite database) 37 - # Default: /var/lib/atcr-hold 38 - # If empty, embedded PDS is disabled 39 - # 40 - # Note: This should be a directory path, NOT a file path 41 - # Carstore creates db.sqlite3 inside this directory 42 - # 43 - # The embedded PDS makes the hold a proper ATProto user with: 44 - # - did:web identity (derived from HOLD_DOMAIN) 45 - # - DID document at /.well-known/did.json 46 - # - XRPC endpoints for crew management 47 - # - ATProto blob endpoints (wraps existing presigned URL logic) 48 - # 49 - # Example: For HOLD_DOMAIN=hold01.atcr.io, the hold becomes did:web:hold01.atcr.io 50 - HOLD_DATABASE_DIR=/var/lib/atcr-hold 51 - 52 - # Path to signing key (auto-generated on first run if missing) 53 - # Default: {HOLD_DATABASE_DIR}/signing.key 54 - # HOLD_KEY_PATH=/var/lib/atcr-hold/signing.key 55 - 56 - # Allow public blob reads (pulls) without authentication 57 - # - true: Anyone can pull images (read-only) 58 - # - false: Only authenticated users can pull 59 - # Default: false (private) 60 - HOLD_PUBLIC=false 61 - 62 - # Allow all authenticated users to write to this hold 63 - # This setting controls write permissions for authenticated ATCR users 64 - # 65 - # - true: Any authenticated ATCR user can push images (treat all as crew) 66 - # Useful for shared/community holds where you want to allow 67 - # multiple users to push without explicit crew membership. 68 - # Users must still authenticate via ATProto OAuth. 69 - # 70 - # - false: Only hold owner and explicit crew members can push (default) 71 - # Write access requires io.atcr.hold.crew record in owner's PDS. 72 - # Most secure option for production holds. 73 - # 74 - # Read permissions are controlled by HOLD_PUBLIC (above). 75 - # 76 - # Security model: 77 - # Read: HOLD_PUBLIC=true → anonymous + authenticated users 78 - # HOLD_PUBLIC=false → authenticated users only 79 - # Write: HOLD_ALLOW_ALL_CREW=true → all authenticated users 80 - # HOLD_ALLOW_ALL_CREW=false → owner + crew only (verified via PDS) 81 - # 82 - # Use cases: 83 - # - Public registry: HOLD_PUBLIC=true, HOLD_ALLOW_ALL_CREW=true 84 - # - ATProto users only: HOLD_PUBLIC=false, HOLD_ALLOW_ALL_CREW=true 85 - # - Private hold (default): HOLD_PUBLIC=false, HOLD_ALLOW_ALL_CREW=false 86 - # 87 - # Default: false 88 - HOLD_ALLOW_ALL_CREW=false 89 - 90 - # Enable Bluesky posts when manifests are pushed 91 - # When enabled, the hold service creates Bluesky posts announcing new container 92 - # image pushes. Posts include image name, tag, size, and layer count. 93 - # 94 - # - true: Create Bluesky posts for manifest uploads 95 - # - false: Silent operation (no Bluesky posts) 96 - # 97 - # Note: This requires the hold owner to have OAuth credentials for posting. 98 - # See docs/BLUESKY_MANIFEST_POSTS.md for setup instructions. 99 - # 100 - # Default: false 101 - HOLD_BLUESKY_POSTS_ENABLED=true 102 - 103 - # ============================================================================== 104 - # S3/UpCloud Object Storage Configuration (REQUIRED) 105 - # ============================================================================== 106 - 107 - # S3 is the only supported storage backend. Presigned URLs are used for direct 108 - # client ↔ S3 transfers, eliminating the hold service as a bandwidth bottleneck. 109 - 110 - # S3 Access Credentials 111 - # Get these from UpCloud Object Storage console 112 - AWS_ACCESS_KEY_ID= 113 - AWS_SECRET_ACCESS_KEY= 114 - 115 - # S3 Region (for distribution S3 driver) 116 - # For third-party S3 providers (UpCloud, Storj, Minio), this value is ignored 117 - # when S3_ENDPOINT is set, but must be a valid AWS region to pass validation. 118 - # Default: us-east-1 119 - AWS_REGION=us-east-1 120 - 121 - # S3 Bucket Name 122 - # Create this bucket in UpCloud Object Storage 123 - # Example: atcr-blobs 124 - S3_BUCKET=atcr 125 - 126 - # S3 Endpoint 127 - # Get this from UpCloud Console → Storage → Object Storage → Your bucket → "S3 endpoint" 128 - # Format: https://[bucket-id].upcloudobjects.com 129 - # Example: https://6vmss.upcloudobjects.com 130 - # 131 - # NOTE: Use the bucket-specific endpoint, NOT a custom domain 132 - # Custom domains break presigned URL generation 133 - S3_ENDPOINT=https://6vmss.upcloudobjects.com 134 - 135 - # ============================================================================== 136 - # AppView Configuration 137 - # ============================================================================== 138 - 139 - # Default hold service DID (derived from HOLD_DOMAIN in docker-compose.prod.yml) 140 - # Uncomment to override if you want to use a different hold service as the default 141 - # ATCR_DEFAULT_HOLD_DID=did:web:some-other-hold.example.com 142 - 143 - # OAuth client display name (shown in authorization screens) 144 - # Default: AT Container Registry 145 - # ATCR_CLIENT_NAME=AT Container Registry 146 - 147 - # Short brand name for page titles and metadata 148 - # Used in meta tags, page titles, and UI text 149 - # Default: ATCR 150 - # ATCR_CLIENT_SHORT_NAME=ATCR 151 - 152 - # ============================================================================== 153 - # Legal Page Customization 154 - # ============================================================================== 155 - 156 - # Company/organization name displayed in legal pages (Terms, Privacy) 157 - # Default: AT Container Registry 158 - ATCR_LEGAL_COMPANY_NAME=AT Container Registry 159 - 160 - # Governing law jurisdiction for legal terms 161 - # Default: State of Texas, United States 162 - ATCR_LEGAL_JURISDICTION=State of Texas, United States 163 - 164 - # ============================================================================== 165 - # Logging Configuration 166 - # ============================================================================== 167 - 168 - # Log level: debug, info, warn, error 169 - # Default: info 170 - ATCR_LOG_LEVEL=debug 171 - 172 - # Log formatter: text, json 173 - # Default: text 174 - ATCR_LOG_FORMATTER=text 175 - 176 - # ============================================================================== 177 - # Jetstream Configuration (ATProto event streaming) 178 - # ============================================================================== 179 - 180 - # Jetstream WebSocket URL for real-time ATProto events 181 - # Default: wss://jetstream2.us-west.bsky.network/subscribe 182 - JETSTREAM_URL=wss://jetstream2.us-west.bsky.network/subscribe 183 - 184 - # Enable backfill worker to sync historical records 185 - # Default: true (recommended for production) 186 - ATCR_BACKFILL_ENABLED=true 187 - 188 - # ATProto relay endpoint for backfill sync API 189 - # Default: https://relay1.us-east.bsky.network 190 - ATCR_RELAY_ENDPOINT=https://relay1.us-east.bsky.network 191 - 192 - # ============================================================================== 193 - # CHECKLIST 194 - # ============================================================================== 195 - # 196 - # Before starting ATCR, ensure you have: 197 - # 198 - # ☐ Set APPVIEW_DOMAIN (e.g., atcr.io) 199 - # ☐ Set HOLD_DOMAIN (e.g., hold01.atcr.io) 200 - # ☐ Set HOLD_OWNER (your ATProto DID) 201 - # ☐ Set HOLD_DATABASE_DIR (default: /var/lib/atcr-hold) - enables embedded PDS 202 - # ☐ Set AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY 203 - # ☐ Set S3_BUCKET (created in UpCloud Object Storage) 204 - # ☐ Set S3_ENDPOINT (UpCloud bucket endpoint, e.g., https://6vmss.upcloudobjects.com) 205 - # ☐ Configured DNS records: 206 - # - A record: atcr.io → server IP 207 - # - A record: hold01.atcr.io → server IP 208 - # - CNAME: blobs.atcr.io → [bucket].upcloudobjects.com 209 - # ☐ Disabled Cloudflare proxy (gray cloud, not orange) 210 - # ☐ Waited for DNS propagation (check with: dig atcr.io) 211 - # 212 - # After starting: 213 - # ☐ Complete hold OAuth registration (run: /opt/atcr/get-hold-oauth.sh) 214 - # ☐ Verify hold PDS: curl https://hold01.atcr.io/.well-known/did.json 215 - # ☐ Test registry: docker pull atcr.io/test/image 216 - # ☐ Monitor logs: /opt/atcr/logs.sh
+10 -33
deploy/docker-compose.prod.yml
··· 44 44 image: atcr-appview:latest 45 45 container_name: atcr-appview 46 46 restart: unless-stopped 47 + command: ["serve", "--config", "/config.yaml"] 48 + # Base config: config-appview.example.yaml 49 + # Env vars below override config file values for this deployment 47 50 environment: 48 - # Server configuration 49 - ATCR_HTTP_ADDR: :5000 50 51 ATCR_BASE_URL: https://${APPVIEW_DOMAIN:-atcr.io} 51 - 52 - # Storage configuration (derived from HOLD_DOMAIN) 53 52 ATCR_DEFAULT_HOLD_DID: ${ATCR_DEFAULT_HOLD_DID:-did:web:${HOLD_DOMAIN:-hold01.atcr.io}} 54 - 55 - # Authentication 56 - ATCR_AUTH_KEY_PATH: /var/lib/atcr/auth/private-key.pem 57 - ATCR_AUTH_CERT_PATH: /var/lib/atcr/auth/private-key.crt 58 - 59 - # UI configuration 60 - ATCR_UI_DATABASE_PATH: /var/lib/atcr/ui.db 61 - 62 - # Logging 63 53 ATCR_LOG_LEVEL: ${ATCR_LOG_LEVEL:-info} 64 54 ATCR_LOG_FORMATTER: ${ATCR_LOG_FORMATTER:-text} 65 - 66 - # Jetstream configuration 67 - JETSTREAM_URL: ${JETSTREAM_URL:-wss://jetstream2.us-west.bsky.network/subscribe} 68 - ATCR_BACKFILL_ENABLED: ${ATCR_BACKFILL_ENABLED:-true} 69 - ATCR_RELAY_ENDPOINT: ${ATCR_RELAY_ENDPOINT:-https://relay1.us-east.bsky.network} 70 55 volumes: 56 + - ./config-appview.yaml:/config.yaml:ro 71 57 # Persistent data: auth keys, UI database, OAuth tokens, Jetstream cache 72 58 - atcr-appview-data:/var/lib/atcr 73 59 networks: ··· 86 72 image: atcr-hold:latest 87 73 container_name: atcr-hold 88 74 restart: unless-stopped 75 + command: ["serve", "--config", "/config.yaml"] 76 + # Base config: config-hold.example.yaml 77 + # Env vars below override config file values for this deployment 89 78 environment: 90 - HOLD_ADMIN_ENABLED: true 91 - # Hold service configuration (derived from HOLD_DOMAIN) 92 79 HOLD_PUBLIC_URL: ${HOLD_PUBLIC_URL:-https://${HOLD_DOMAIN:-hold01.atcr.io}} 93 - HOLD_SERVER_ADDR: :8080 94 - HOLD_ALLOW_ALL_CREW: ${HOLD_ALLOW_ALL_CREW:-false} 95 - HOLD_PUBLIC: ${HOLD_PUBLIC:-false} 96 80 HOLD_OWNER: ${HOLD_OWNER:-} 97 81 HOLD_BLUESKY_POSTS_ENABLED: ${HOLD_BLUESKY_POSTS_ENABLED:-true} 98 - 99 - # Embedded PDS configuration 100 - HOLD_DATABASE_DIR: ${HOLD_DATABASE_DIR:-/var/lib/atcr-hold} 101 - # HOLD_KEY_PATH: ${HOLD_KEY_PATH} # Optional, defaults to {HOLD_DATABASE_DIR}/signing.key 102 - 103 - # S3/UpCloud Object Storage configuration (REQUIRED) 82 + # S3/UpCloud Object Storage (REQUIRED) 104 83 AWS_ACCESS_KEY_ID: ${AWS_ACCESS_KEY_ID:-} 105 84 AWS_SECRET_ACCESS_KEY: ${AWS_SECRET_ACCESS_KEY:-} 106 85 AWS_REGION: ${AWS_REGION:-us-east-1} 107 86 S3_BUCKET: ${S3_BUCKET:-atcr-blobs} 108 87 S3_ENDPOINT: ${S3_ENDPOINT:-} 109 - 110 - # Logging 111 - ATCR_LOG_LEVEL: ${ATCR_LOG_LEVEL:-debug} 112 - ATCR_LOG_FORMATTER: ${ATCR_LOG_FORMATTER:-text} 88 + HOLD_LOG_LEVEL: ${ATCR_LOG_LEVEL:-info} 113 89 volumes: 90 + - ./config-hold.yaml:/config.yaml:ro 114 91 # PDS data (carstore SQLite + signing keys) 115 92 - atcr-hold-data:/var/lib/atcr-hold 116 93 - ./quotas.yaml:/quotas.yaml:ro
-344
deploy/init-upcloud.sh
··· 1 - #!/bin/bash 2 - # 3 - # ATCR UpCloud Initialization Script for Rocky Linux 4 - # 5 - # This script sets up ATCR on a fresh Rocky Linux instance. 6 - # Paste this into UpCloud's "User data" field when creating a server. 7 - # 8 - # What it does: 9 - # - Updates system packages 10 - # - Creates 2GB swap file (for 1GB RAM instances) 11 - # - Installs Docker and Docker Compose 12 - # - Creates directory structure 13 - # - Clones ATCR repository 14 - # - Creates systemd service for auto-start 15 - # - Builds and starts containers 16 - # 17 - # Post-deployment: 18 - # 1. Edit /opt/atcr/.env with your configuration 19 - # 2. Run: systemctl restart atcr 20 - # 3. Check logs: docker logs atcr-hold (for OAuth URL) 21 - # 4. Complete hold registration via OAuth 22 - 23 - set -euo pipefail 24 - 25 - # Configuration 26 - ATCR_DIR="/opt/atcr" 27 - ATCR_REPO="https://tangled.org/evan.jarrett.net/at-container-registry" # UPDATE THIS 28 - ATCR_BRANCH="main" 29 - 30 - # Simple logging without colors (for cloud-init log compatibility) 31 - log_info() { 32 - echo "[INFO] $1" 33 - } 34 - 35 - log_warn() { 36 - echo "[WARN] $1" 37 - } 38 - 39 - log_error() { 40 - echo "[ERROR] $1" 41 - } 42 - 43 - # Function to check if command exists 44 - command_exists() { 45 - command -v "$1" >/dev/null 2>&1 46 - } 47 - 48 - log_info "Starting ATCR deployment on Rocky Linux..." 49 - 50 - # Update system packages 51 - log_info "Updating system packages..." 52 - dnf update -y 53 - 54 - # Install required packages 55 - log_info "Installing prerequisites..." 56 - dnf install -y \ 57 - git \ 58 - wget \ 59 - curl \ 60 - nano \ 61 - vim 62 - 63 - log_info "Required ports: HTTP (80), HTTPS (443), SSH (22)" 64 - 65 - # Create swap file for instances with limited RAM 66 - if [ ! -f /swapfile ]; then 67 - log_info "Creating 2GB swap file (allows builds on 1GB RAM instances)..." 68 - dd if=/dev/zero of=/swapfile bs=1M count=2048 status=progress 69 - chmod 600 /swapfile 70 - mkswap /swapfile 71 - swapon /swapfile 72 - 73 - # Make swap permanent 74 - echo '/swapfile none swap sw 0 0' >> /etc/fstab 75 - 76 - log_info "Swap file created and enabled" 77 - free -h 78 - else 79 - log_info "Swap file already exists" 80 - fi 81 - 82 - # Install Docker 83 - if ! command_exists docker; then 84 - log_info "Installing Docker..." 85 - 86 - # Add Docker repository 87 - dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo 88 - 89 - # Install Docker 90 - dnf install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin 91 - 92 - # Start and enable Docker 93 - systemctl enable --now docker 94 - 95 - log_info "Docker installed successfully" 96 - else 97 - log_info "Docker already installed" 98 - fi 99 - 100 - # Verify Docker Compose 101 - if ! docker compose version >/dev/null 2>&1; then 102 - log_error "Docker Compose plugin not found. Please install manually." 103 - exit 1 104 - fi 105 - 106 - log_info "Docker Compose version: $(docker compose version)" 107 - 108 - # Create ATCR directory 109 - log_info "Creating ATCR directory: $ATCR_DIR" 110 - mkdir -p "$ATCR_DIR" 111 - cd "$ATCR_DIR" 112 - 113 - # Clone repository or create minimal structure 114 - if [ -n "$ATCR_REPO" ] && [ "$ATCR_REPO" != "https://tangled.org/evan.jarrett.net/at-container-registry" ]; then 115 - log_info "Cloning ATCR repository..." 116 - git clone -b "$ATCR_BRANCH" "$ATCR_REPO" . 117 - else 118 - log_warn "ATCR_REPO not configured. You'll need to manually copy files to $ATCR_DIR" 119 - log_warn "Required files:" 120 - log_warn " - deploy/docker-compose.prod.yml" 121 - log_warn " - deploy/.env.prod.template" 122 - log_warn " - Dockerfile.appview" 123 - log_warn " - Dockerfile.hold" 124 - fi 125 - 126 - # Create .env file from template if it doesn't exist 127 - if [ -f "deploy/.env.prod.template" ] && [ ! -f "$ATCR_DIR/.env" ]; then 128 - log_info "Creating .env file from template..." 129 - cp deploy/.env.prod.template "$ATCR_DIR/.env" 130 - log_warn "IMPORTANT: Edit $ATCR_DIR/.env with your configuration!" 131 - fi 132 - 133 - # Create systemd services (caddy, appview, hold) 134 - log_info "Creating systemd services..." 135 - 136 - # Caddy service (reverse proxy for both appview and hold) 137 - cat > /etc/systemd/system/atcr-caddy.service <<'EOF' 138 - [Unit] 139 - Description=ATCR Caddy Reverse Proxy 140 - Requires=docker.service 141 - After=docker.service network-online.target 142 - Wants=network-online.target 143 - 144 - [Service] 145 - Type=oneshot 146 - RemainAfterExit=yes 147 - WorkingDirectory=/opt/atcr 148 - EnvironmentFile=/opt/atcr/.env 149 - 150 - # Start caddy container 151 - ExecStart=/usr/bin/docker compose -f /opt/atcr/deploy/docker-compose.prod.yml up -d caddy 152 - 153 - # Stop caddy container 154 - ExecStop=/usr/bin/docker compose -f /opt/atcr/deploy/docker-compose.prod.yml stop caddy 155 - 156 - # Restart caddy container 157 - ExecReload=/usr/bin/docker compose -f /opt/atcr/deploy/docker-compose.prod.yml restart caddy 158 - 159 - # Always restart on failure 160 - Restart=on-failure 161 - RestartSec=10 162 - 163 - [Install] 164 - WantedBy=multi-user.target 165 - EOF 166 - 167 - # AppView service (registry + web UI) 168 - cat > /etc/systemd/system/atcr-appview.service <<'EOF' 169 - [Unit] 170 - Description=ATCR AppView (Registry + Web UI) 171 - Requires=docker.service atcr-caddy.service 172 - After=docker.service network-online.target atcr-caddy.service 173 - Wants=network-online.target 174 - 175 - [Service] 176 - Type=oneshot 177 - RemainAfterExit=yes 178 - WorkingDirectory=/opt/atcr 179 - EnvironmentFile=/opt/atcr/.env 180 - 181 - # Start appview container 182 - ExecStart=/usr/bin/docker compose -f /opt/atcr/deploy/docker-compose.prod.yml up -d atcr-appview 183 - 184 - # Stop appview container 185 - ExecStop=/usr/bin/docker compose -f /opt/atcr/deploy/docker-compose.prod.yml stop atcr-appview 186 - 187 - # Restart appview container 188 - ExecReload=/usr/bin/docker compose -f /opt/atcr/deploy/docker-compose.prod.yml restart atcr-appview 189 - 190 - # Always restart on failure 191 - Restart=on-failure 192 - RestartSec=10 193 - 194 - [Install] 195 - WantedBy=multi-user.target 196 - EOF 197 - 198 - # Hold service (storage backend) 199 - cat > /etc/systemd/system/atcr-hold.service <<'EOF' 200 - [Unit] 201 - Description=ATCR Hold (Storage Service) 202 - Requires=docker.service atcr-caddy.service 203 - After=docker.service network-online.target atcr-caddy.service 204 - Wants=network-online.target 205 - 206 - [Service] 207 - Type=oneshot 208 - RemainAfterExit=yes 209 - WorkingDirectory=/opt/atcr 210 - EnvironmentFile=/opt/atcr/.env 211 - 212 - # Start hold container 213 - ExecStart=/usr/bin/docker compose -f /opt/atcr/deploy/docker-compose.prod.yml up -d atcr-hold 214 - 215 - # Stop hold container 216 - ExecStop=/usr/bin/docker compose -f /opt/atcr/deploy/docker-compose.prod.yml stop atcr-hold 217 - 218 - # Restart hold container 219 - ExecReload=/usr/bin/docker compose -f /opt/atcr/deploy/docker-compose.prod.yml restart atcr-hold 220 - 221 - # Always restart on failure 222 - Restart=on-failure 223 - RestartSec=10 224 - 225 - [Install] 226 - WantedBy=multi-user.target 227 - EOF 228 - 229 - # Reload systemd 230 - log_info "Reloading systemd daemon..." 231 - systemctl daemon-reload 232 - 233 - # Enable all services (but don't start yet - user needs to configure .env) 234 - systemctl enable atcr-caddy.service 235 - systemctl enable atcr-appview.service 236 - systemctl enable atcr-hold.service 237 - 238 - log_info "Systemd services created and enabled" 239 - 240 - # Create helper scripts 241 - log_info "Creating helper scripts..." 242 - 243 - # Script to rebuild and restart 244 - cat > "$ATCR_DIR/rebuild.sh" <<'EOF' 245 - #!/bin/bash 246 - set -e 247 - cd /opt/atcr 248 - docker compose -f deploy/docker-compose.prod.yml build 249 - docker compose -f deploy/docker-compose.prod.yml up -d 250 - docker compose -f deploy/docker-compose.prod.yml logs -f 251 - EOF 252 - chmod +x "$ATCR_DIR/rebuild.sh" 253 - 254 - # Script to view logs 255 - cat > "$ATCR_DIR/logs.sh" <<'EOF' 256 - #!/bin/bash 257 - cd /opt/atcr 258 - docker compose -f deploy/docker-compose.prod.yml logs -f "$@" 259 - EOF 260 - chmod +x "$ATCR_DIR/logs.sh" 261 - 262 - log_info "Helper scripts created in $ATCR_DIR" 263 - 264 - # Print completion message 265 - cat <<'EOF' 266 - 267 - ================================================================================ 268 - ATCR Installation Complete! 269 - ================================================================================ 270 - 271 - NEXT STEPS: 272 - 273 - 1. Configure environment variables: 274 - nano /opt/atcr/.env 275 - 276 - Required settings: 277 - - AWS_ACCESS_KEY_ID (UpCloud S3 credentials) 278 - - AWS_SECRET_ACCESS_KEY 279 - 280 - Pre-configured (verify these are correct): 281 - - APPVIEW_DOMAIN=atcr.io 282 - - HOLD_DOMAIN=hold01.atcr.io 283 - - HOLD_OWNER=did:plc:pddp4xt5lgnv2qsegbzzs4xg 284 - - S3_BUCKET=atcr 285 - - S3_ENDPOINT=https://blobs.atcr.io 286 - 287 - 2. Configure UpCloud Cloud Firewall (in control panel): 288 - Allow: TCP 22 (SSH) 289 - Allow: TCP 80 (HTTP) 290 - Allow: TCP 443 (HTTPS) 291 - Drop: Everything else 292 - 293 - 3. Configure DNS (Cloudflare - DNS-only mode): 294 - EOF 295 - 296 - echo " A atcr.io → $(curl -s ifconfig.me || echo '[server-ip]') (gray cloud)" 297 - echo " A hold01.atcr.io → $(curl -s ifconfig.me || echo '[server-ip]') (gray cloud)" 298 - echo " CNAME blobs.atcr.io → atcr.us-chi1.upcloudobjects.com (gray cloud)" 299 - 300 - cat <<'EOF' 301 - 302 - 4. Start ATCR services: 303 - systemctl start atcr-caddy atcr-appview atcr-hold 304 - 305 - 5. Check status: 306 - systemctl status atcr-caddy 307 - systemctl status atcr-appview 308 - systemctl status atcr-hold 309 - docker ps 310 - /opt/atcr/logs.sh 311 - 312 - Helper Scripts: 313 - /opt/atcr/rebuild.sh - Rebuild and restart containers 314 - /opt/atcr/logs.sh [service] - View logs (e.g., logs.sh atcr-hold) 315 - 316 - Service Management: 317 - systemctl start atcr-caddy - Start Caddy reverse proxy 318 - systemctl start atcr-appview - Start AppView (registry + UI) 319 - systemctl start atcr-hold - Start Hold (storage service) 320 - 321 - systemctl stop atcr-appview - Stop AppView only 322 - systemctl stop atcr-hold - Stop Hold only 323 - systemctl stop atcr-caddy - Stop all (stops reverse proxy) 324 - 325 - systemctl restart atcr-appview - Restart AppView 326 - systemctl restart atcr-hold - Restart Hold 327 - 328 - systemctl status atcr-caddy - Check Caddy status 329 - systemctl status atcr-appview - Check AppView status 330 - systemctl status atcr-hold - Check Hold status 331 - 332 - Documentation: 333 - https://tangled.org/evan.jarrett.net/at-container-registry 334 - 335 - IMPORTANT: 336 - - Edit /opt/atcr/.env with S3 credentials before starting! 337 - - Configure UpCloud cloud firewall (see step 2) 338 - - DNS must be configured and propagated 339 - - Cloudflare proxy must be DISABLED (gray cloud) 340 - - Complete hold OAuth registration before first push 341 - 342 - EOF 343 - 344 - log_info "Installation complete. Follow the next steps above."
-41
deploy/quotas.yaml
··· 1 - # ATCR Hold Service Quota Configuration 2 - # Copy this file to quotas.yaml to enable quota enforcement. 3 - # If quotas.yaml doesn't exist, quotas are disabled (unlimited for all users). 4 - 5 - # Tiers define quota levels using nautical crew ranks. 6 - # Each tier has a quota limit specified in human-readable format. 7 - # Supported units: B, KB, MB, GB, TB, PB (case-insensitive) 8 - tiers: 9 - # Entry-level crew - starter tier for new users (free) 10 - swabbie: 11 - quota: 2GB 12 - 13 - # Standard crew - for regular users 14 - deckhand: 15 - quota: 5GB 16 - 17 - # Mid-level crew - for regular contributors 18 - bosun: 19 - quota: 10GB 20 - 21 - # Senior crew - for power users or trusted contributors 22 - #quartermaster: 23 - # quota: 50GB 24 - 25 - # You can add custom tiers with any name: 26 - # admiral: 27 - # quota: 1TB 28 - 29 - defaults: 30 - # Default tier assigned to new crew members who don't have an explicit tier. 31 - # This tier must exist in the tiers section above. 32 - new_crew_tier: swabbie 33 - 34 - # Notes: 35 - # - The hold captain (owner) always has unlimited quota regardless of tiers. 36 - # - Crew members can be assigned a specific tier in their crew record. 37 - # - If a crew member's tier doesn't exist in config, they fall back to the default. 38 - # - Quota is calculated per-user by summing unique blob sizes (deduplicated). 39 - # - Quota is checked when pushing manifests (after blobs are already uploaded). 40 - # - Billing configuration (Stripe prices, descriptions) goes in a separate 41 - # top-level "billing:" section. See billing documentation for details.
-55
deploy/request-crawl.sh
··· 1 - #!/bin/bash 2 - # 3 - # Request crawl for a PDS from the Bluesky relay 4 - # 5 - # Usage: ./request-crawl.sh <hostname> [relay-url] 6 - # Example: ./request-crawl.sh hold01.atcr.io 7 - # 8 - 9 - set -e 10 - 11 - DEFAULT_RELAY="https://bsky.network/xrpc/com.atproto.sync.requestCrawl" 12 - 13 - # Parse arguments 14 - HOSTNAME="${1:-}" 15 - RELAY_URL="${2:-$DEFAULT_RELAY}" 16 - 17 - # Validate hostname 18 - if [ -z "$HOSTNAME" ]; then 19 - echo "Error: hostname is required" >&2 20 - echo "" >&2 21 - echo "Usage: $0 <hostname> [relay-url]" >&2 22 - echo "Example: $0 hold01.atcr.io" >&2 23 - echo "" >&2 24 - echo "Options:" >&2 25 - echo " hostname Hostname of the PDS to request crawl for (required)" >&2 26 - echo " relay-url Relay URL to send crawl request to (default: $DEFAULT_RELAY)" >&2 27 - exit 1 28 - fi 29 - 30 - # Log what we're doing 31 - echo "Requesting crawl for hostname: $HOSTNAME" 32 - echo "Sending to relay: $RELAY_URL" 33 - 34 - # Make the request 35 - RESPONSE=$(curl -s -w "\n%{http_code}" -X POST "$RELAY_URL" \ 36 - -H "Content-Type: application/json" \ 37 - -d "{\"hostname\":\"$HOSTNAME\"}") 38 - 39 - # Split response and status code 40 - HTTP_BODY=$(echo "$RESPONSE" | head -n -1) 41 - HTTP_CODE=$(echo "$RESPONSE" | tail -n 1) 42 - 43 - # Check response 44 - if [ "$HTTP_CODE" -ge 200 ] && [ "$HTTP_CODE" -lt 300 ]; then 45 - echo "✅ Success! Crawl requested for $HOSTNAME" 46 - if [ -n "$HTTP_BODY" ]; then 47 - echo "Response: $HTTP_BODY" 48 - fi 49 - else 50 - echo "❌ Failed with status $HTTP_CODE" >&2 51 - if [ -n "$HTTP_BODY" ]; then 52 - echo "Response: $HTTP_BODY" >&2 53 - fi 54 - exit 1 55 - fi
+4 -10
docker-compose.yml
··· 10 10 # Optional: Load from .env.appview file (create from .env.appview.example) 11 11 # env_file: 12 12 # - .env.appview 13 + # Base config: config-appview.example.yaml (passed via Air entrypoint) 14 + # Env vars below override config file values for local dev 13 15 environment: 14 16 # ATCR_SERVER_CLIENT_NAME: "Seamark" 15 17 # ATCR_SERVER_CLIENT_SHORT_NAME: "Seamark" 16 - # Server configuration 17 - ATCR_SERVER_ADDR: :5000 18 18 ATCR_SERVER_DEFAULT_HOLD_DID: did:web:172.28.0.3:8080 19 - ATCR_JETSTREAM_BACKFILL_ENABLED: "true" 20 - # Test mode - fallback to default hold when user's hold is unreachable 21 19 ATCR_SERVER_TEST_MODE: "true" 22 - # Logging 23 20 ATCR_LOG_LEVEL: debug 24 - # Log shipping (uncomment to enable) 25 21 LOG_SHIPPER_BACKEND: victoria 26 22 LOG_SHIPPER_URL: http://172.28.0.10:9428 27 23 # Limit local Docker logs - real logs go to Victoria Logs ··· 53 49 atcr-hold: 54 50 env_file: 55 51 - ../atcr-secrets.env # Load S3/Storj credentials from external file 52 + # Base config: config-hold.example.yaml (passed via Air entrypoint) 53 + # Env vars below override config file values for local dev 56 54 environment: 57 - HOLD_ADMIN_ENABLED: true 58 55 HOLD_SERVER_PUBLIC_URL: http://172.28.0.3:8080 59 56 HOLD_REGISTRATION_OWNER_DID: did:plc:pddp4xt5lgnv2qsegbzzs4xg 60 - HOLD_SERVER_PUBLIC: false 61 57 HOLD_REGISTRATION_ALLOW_ALL_CREW: true 62 58 HOLD_SERVER_TEST_MODE: true 63 59 # Stripe billing (only used with -tags billing) 64 60 STRIPE_SECRET_KEY: sk_test_ 65 61 STRIPE_PUBLISHABLE_KEY: pk_test_ 66 62 STRIPE_WEBHOOK_SECRET: whsec_ 67 - # Logging 68 63 HOLD_LOG_LEVEL: debug 69 - # Log shipping (uncomment to enable) 70 64 LOG_SHIPPER_BACKEND: victoria 71 65 LOG_SHIPPER_URL: http://172.28.0.10:9428 72 66 # S3 storage config comes from env_file (AWS_*, S3_*)
+1 -1
pkg/appview/config.go
··· 181 181 182 182 // Legal defaults 183 183 v.SetDefault("legal.company_name", "") 184 - v.SetDefault("legal.jurisdiction", "State of Texas, United States") 184 + v.SetDefault("legal.jurisdiction", "") 185 185 186 186 // Log formatter (used by distribution config, not in Config struct) 187 187 v.SetDefault("log_formatter", "text")
+2 -2
pkg/hold/config.go
··· 163 163 // Registration defaults 164 164 v.SetDefault("registration.owner_did", "") 165 165 v.SetDefault("registration.allow_all_crew", false) 166 - v.SetDefault("registration.profile_avatar_url", "https://imgs.blue/evan.jarrett.net/1TpTOdtS60GdJWBYEqtK22y688jajbQ9a5kbYRFtwuqrkBAE") 166 + v.SetDefault("registration.profile_avatar_url", "https://atcr.io/web-app-manifest-192x192.png") 167 167 v.SetDefault("registration.enable_bluesky_posts", false) 168 168 169 169 // Database defaults ··· 174 174 v.SetDefault("database.libsql_sync_interval", "60s") 175 175 176 176 // Admin defaults 177 - v.SetDefault("admin.enabled", false) 177 + v.SetDefault("admin.enabled", true) 178 178 179 179 // Storage defaults 180 180 v.SetDefault("storage.access_key", "")