A container registry that uses the AT Protocol for manifest storage and S3 for blob storage. atcr.io
docker container atproto go

fix star not being filled in. add ability to deploy scanner on the same server as the hold

evan.jarrett.net ec2063ef 8048921f

verified
+840 -151
+110 -2
deploy/upcloud/cloudinit.go
··· 16 //go:embed systemd/hold.service.tmpl 17 var holdServiceTmpl string 18 19 //go:embed configs/appview.yaml.tmpl 20 var appviewConfigTmpl string 21 22 //go:embed configs/hold.yaml.tmpl 23 var holdConfigTmpl string 24 25 //go:embed configs/cloudinit.sh.tmpl 26 var cloudInitTmpl string ··· 41 HoldDomain string // e.g. "us-chi1.cove.seamark.dev" 42 HoldDid string // e.g. "did:web:us-chi1.cove.seamark.dev" 43 BasePath string // e.g. "/var/lib/seamark" 44 } 45 46 // renderConfig executes a Go template with the given values. ··· 78 return buf.String(), nil 79 } 80 81 // generateAppviewCloudInit generates the cloud-init user-data script for the appview server. 82 func generateAppviewCloudInit(cfg *InfraConfig, vals *ConfigValues, goVersion string) (string, error) { 83 naming := cfg.Naming() ··· 119 } 120 121 // generateHoldCloudInit generates the cloud-init user-data script for the hold server. 122 - func generateHoldCloudInit(cfg *InfraConfig, vals *ConfigValues, goVersion string) (string, error) { 123 naming := cfg.Naming() 124 125 configYAML, err := renderConfig(holdConfigTmpl, vals) ··· 139 return "", fmt.Errorf("hold service unit: %w", err) 140 } 141 142 - return generateCloudInit(cloudInitParams{ 143 GoVersion: goVersion, 144 BinaryName: naming.Hold(), 145 BuildCmd: "hold", ··· 156 LogFile: naming.LogFile(), 157 DisplayName: naming.DisplayName(), 158 }) 159 } 160 161 type cloudInitParams struct {
··· 16 //go:embed systemd/hold.service.tmpl 17 var holdServiceTmpl string 18 19 + //go:embed systemd/scanner.service.tmpl 20 + var scannerServiceTmpl string 21 + 22 //go:embed configs/appview.yaml.tmpl 23 var appviewConfigTmpl string 24 25 //go:embed configs/hold.yaml.tmpl 26 var holdConfigTmpl string 27 + 28 + //go:embed configs/scanner.yaml.tmpl 29 + var scannerConfigTmpl string 30 31 //go:embed configs/cloudinit.sh.tmpl 32 var cloudInitTmpl string ··· 47 HoldDomain string // e.g. "us-chi1.cove.seamark.dev" 48 HoldDid string // e.g. "did:web:us-chi1.cove.seamark.dev" 49 BasePath string // e.g. "/var/lib/seamark" 50 + 51 + // Scanner (auto-generated shared secret) 52 + ScannerSecret string // hex-encoded 32-byte secret; empty disables scanning 53 } 54 55 // renderConfig executes a Go template with the given values. ··· 87 return buf.String(), nil 88 } 89 90 + // scannerServiceUnitParams holds values for rendering the scanner systemd unit. 91 + // Extends the standard fields with HoldServiceName for the After= dependency. 92 + type scannerServiceUnitParams struct { 93 + DisplayName string // e.g. "Seamark" 94 + User string // e.g. "seamark" 95 + BinaryPath string // e.g. "/opt/seamark/bin/seamark-scanner" 96 + ConfigPath string // e.g. "/etc/seamark/scanner.yaml" 97 + DataDir string // e.g. "/var/lib/seamark" 98 + ServiceName string // e.g. "seamark-scanner" 99 + HoldServiceName string // e.g. "seamark-hold" (After= dependency) 100 + } 101 + 102 + func renderScannerServiceUnit(p scannerServiceUnitParams) (string, error) { 103 + t, err := template.New("scanner-service").Parse(scannerServiceTmpl) 104 + if err != nil { 105 + return "", fmt.Errorf("parse scanner service template: %w", err) 106 + } 107 + var buf bytes.Buffer 108 + if err := t.Execute(&buf, p); err != nil { 109 + return "", fmt.Errorf("render scanner service template: %w", err) 110 + } 111 + return buf.String(), nil 112 + } 113 + 114 // generateAppviewCloudInit generates the cloud-init user-data script for the appview server. 115 func generateAppviewCloudInit(cfg *InfraConfig, vals *ConfigValues, goVersion string) (string, error) { 116 naming := cfg.Naming() ··· 152 } 153 154 // generateHoldCloudInit generates the cloud-init user-data script for the hold server. 155 + // When withScanner is true, a second phase is appended that builds the scanner binary, 156 + // creates scanner data directories, and installs a scanner systemd service. 157 + func generateHoldCloudInit(cfg *InfraConfig, vals *ConfigValues, goVersion string, withScanner bool) (string, error) { 158 naming := cfg.Naming() 159 160 configYAML, err := renderConfig(holdConfigTmpl, vals) ··· 174 return "", fmt.Errorf("hold service unit: %w", err) 175 } 176 177 + script, err := generateCloudInit(cloudInitParams{ 178 GoVersion: goVersion, 179 BinaryName: naming.Hold(), 180 BuildCmd: "hold", ··· 191 LogFile: naming.LogFile(), 192 DisplayName: naming.DisplayName(), 193 }) 194 + if err != nil { 195 + return "", err 196 + } 197 + 198 + if !withScanner { 199 + return script, nil 200 + } 201 + 202 + // Render scanner config YAML 203 + scannerConfigYAML, err := renderConfig(scannerConfigTmpl, vals) 204 + if err != nil { 205 + return "", fmt.Errorf("scanner config: %w", err) 206 + } 207 + 208 + // Append scanner build and setup phase 209 + scannerUnit, err := renderScannerServiceUnit(scannerServiceUnitParams{ 210 + DisplayName: naming.DisplayName(), 211 + User: naming.SystemUser(), 212 + BinaryPath: naming.InstallDir() + "/bin/" + naming.Scanner(), 213 + ConfigPath: naming.ScannerConfigPath(), 214 + DataDir: naming.BasePath(), 215 + ServiceName: naming.Scanner(), 216 + HoldServiceName: naming.Hold(), 217 + }) 218 + if err != nil { 219 + return "", fmt.Errorf("scanner service unit: %w", err) 220 + } 221 + 222 + // Escape single quotes for heredoc embedding 223 + scannerUnit = strings.ReplaceAll(scannerUnit, "'", "'\\''") 224 + scannerConfigYAML = strings.ReplaceAll(scannerConfigYAML, "'", "'\\''") 225 + 226 + scannerPhase := fmt.Sprintf(` 227 + # === Scanner Setup === 228 + echo "Building scanner..." 229 + cd %s/scanner 230 + CGO_ENABLED=1 go build \ 231 + -ldflags="-s -w" \ 232 + -trimpath \ 233 + -o ../bin/%s ./cmd/scanner 234 + cd %s 235 + 236 + # Scanner data dirs 237 + mkdir -p %s/vulndb %s/tmp 238 + chown -R %s:%s %s 239 + 240 + # Scanner config 241 + cat > %s << 'CFGEOF' 242 + %s 243 + CFGEOF 244 + 245 + # Scanner systemd service 246 + cat > /etc/systemd/system/%s.service << 'SVCEOF' 247 + %s 248 + SVCEOF 249 + systemctl daemon-reload 250 + systemctl enable %s 251 + 252 + echo "=== Scanner setup complete ===" 253 + `, 254 + naming.InstallDir(), 255 + naming.Scanner(), 256 + naming.InstallDir(), 257 + naming.ScannerDataDir(), naming.ScannerDataDir(), 258 + naming.SystemUser(), naming.SystemUser(), naming.ScannerDataDir(), 259 + naming.ScannerConfigPath(), 260 + scannerConfigYAML, 261 + naming.Scanner(), 262 + scannerUnit, 263 + naming.Scanner(), 264 + ) 265 + 266 + return script + scannerPhase, nil 267 } 268 269 type cloudInitParams struct {
+1 -1
deploy/upcloud/configs/hold.yaml.tmpl
··· 50 defaults: 51 new_crew_tier: deckhand 52 scanner: 53 - secret: "" 54
··· 50 defaults: 51 new_crew_tier: deckhand 52 scanner: 53 + secret: "{{.ScannerSecret}}" 54
+21
deploy/upcloud/configs/scanner.yaml.tmpl
···
··· 1 + version: "0.1" 2 + log_level: info 3 + log_shipper: 4 + backend: "" 5 + url: "" 6 + batch_size: 100 7 + flush_interval: 5s 8 + username: "" 9 + password: "" 10 + server: 11 + addr: :9090 12 + hold: 13 + url: "ws://localhost:8080" 14 + secret: "{{.ScannerSecret}}" 15 + scanner: 16 + workers: 2 17 + queue_size: 100 18 + vuln: 19 + enabled: true 20 + db_path: "{{.BasePath}}/scanner/vulndb" 21 + tmp_dir: "{{.BasePath}}/scanner/tmp"
deploy/upcloud/deploy

This is a binary file and will not be displayed.

+9
deploy/upcloud/naming.go
··· 48 // LBName returns the load balancer name (e.g. "seamark-lb"). 49 func (n Naming) LBName() string { return n.ClientName + "-lb" } 50 51 // S3Name returns the name used for S3 storage, user, and bucket. 52 func (n Naming) S3Name() string { return n.ClientName }
··· 48 // LBName returns the load balancer name (e.g. "seamark-lb"). 49 func (n Naming) LBName() string { return n.ClientName + "-lb" } 50 51 + // Scanner returns the scanner binary/service name (e.g. "seamark-scanner"). 52 + func (n Naming) Scanner() string { return n.ClientName + "-scanner" } 53 + 54 + // ScannerConfigPath returns the scanner config file path. 55 + func (n Naming) ScannerConfigPath() string { return n.ConfigDir() + "/scanner.yaml" } 56 + 57 + // ScannerDataDir returns the scanner data directory (e.g. "/var/lib/seamark/scanner"). 58 + func (n Naming) ScannerDataDir() string { return n.BasePath() + "/scanner" } 59 + 60 // S3Name returns the name used for S3 storage, user, and bucket. 61 func (n Naming) S3Name() string { return n.ClientName }
+56 -14
deploy/upcloud/provision.go
··· 3 import ( 4 "bufio" 5 "context" 6 "crypto/sha256" 7 "encoding/base64" 8 "fmt" 9 "os" 10 "strings" ··· 25 plan, _ := cmd.Flags().GetString("plan") 26 sshKey, _ := cmd.Flags().GetString("ssh-key") 27 s3Secret, _ := cmd.Flags().GetString("s3-secret") 28 - return cmdProvision(token, zone, plan, sshKey, s3Secret) 29 }, 30 } 31 ··· 34 provisionCmd.Flags().String("plan", "", "Server plan (interactive picker if omitted)") 35 provisionCmd.Flags().String("ssh-key", "", "Path to SSH public key file (required)") 36 provisionCmd.Flags().String("s3-secret", "", "S3 secret access key (for existing object storage)") 37 provisionCmd.MarkFlagRequired("ssh-key") 38 rootCmd.AddCommand(provisionCmd) 39 } 40 41 - func cmdProvision(token, zone, plan, sshKeyPath, s3Secret string) error { 42 cfg, err := loadConfig(zone, plan, sshKeyPath, s3Secret) 43 if err != nil { 44 return err ··· 79 state.ClientName = cfg.ClientName 80 state.RepoBranch = cfg.RepoBranch 81 82 goVersion, err := requiredGoVersion() 83 if err != nil { 84 return err ··· 136 137 // Build config template values 138 vals := &ConfigValues{ 139 - S3Endpoint: state.ObjectStorage.Endpoint, 140 - S3Region: state.ObjectStorage.Region, 141 - S3Bucket: state.ObjectStorage.Bucket, 142 - S3AccessKey: state.ObjectStorage.AccessKeyID, 143 - S3SecretKey: s3SecretKey, 144 - Zone: cfg.Zone, 145 - HoldDomain: holdDomain, 146 - HoldDid: "did:web:" + holdDomain, 147 - BasePath: naming.BasePath(), 148 } 149 150 // 2. Private network ··· 215 // 4. Hold server 216 if state.Hold.UUID != "" { 217 fmt.Printf("Hold: %s (exists)\n", state.Hold.UUID) 218 - holdScript, err := generateHoldCloudInit(cfg, vals, goVersion) 219 if err != nil { 220 return err 221 } ··· 229 if err := syncConfigKeys("hold", state.Hold.PublicIP, naming.HoldConfigPath(), holdConfigYAML); err != nil { 230 return fmt.Errorf("hold config sync: %w", err) 231 } 232 } else { 233 fmt.Println("Creating hold server...") 234 - holdUserData, err := generateHoldCloudInit(cfg, vals, goVersion) 235 if err != nil { 236 return err 237 } ··· 316 fmt.Println() 317 fmt.Println("Next steps:") 318 fmt.Println(" 1. Wait ~5 min for cloud-init to complete") 319 - fmt.Printf(" 2. systemctl start %s / %s\n", naming.Appview(), naming.Hold()) 320 fmt.Println(" 3. Configure DNS records above") 321 322 return nil ··· 930 931 fmt.Printf(" %s: cloud-init complete\n", name) 932 return nil 933 } 934 935 // writeRemoteCloudInit writes the local cloud-init script to the remote server
··· 3 import ( 4 "bufio" 5 "context" 6 + crypto_rand "crypto/rand" 7 "crypto/sha256" 8 "encoding/base64" 9 + "encoding/hex" 10 "fmt" 11 "os" 12 "strings" ··· 27 plan, _ := cmd.Flags().GetString("plan") 28 sshKey, _ := cmd.Flags().GetString("ssh-key") 29 s3Secret, _ := cmd.Flags().GetString("s3-secret") 30 + withScanner, _ := cmd.Flags().GetBool("with-scanner") 31 + return cmdProvision(token, zone, plan, sshKey, s3Secret, withScanner) 32 }, 33 } 34 ··· 37 provisionCmd.Flags().String("plan", "", "Server plan (interactive picker if omitted)") 38 provisionCmd.Flags().String("ssh-key", "", "Path to SSH public key file (required)") 39 provisionCmd.Flags().String("s3-secret", "", "S3 secret access key (for existing object storage)") 40 + provisionCmd.Flags().Bool("with-scanner", false, "Deploy vulnerability scanner alongside hold") 41 provisionCmd.MarkFlagRequired("ssh-key") 42 rootCmd.AddCommand(provisionCmd) 43 } 44 45 + func cmdProvision(token, zone, plan, sshKeyPath, s3Secret string, withScanner bool) error { 46 cfg, err := loadConfig(zone, plan, sshKeyPath, s3Secret) 47 if err != nil { 48 return err ··· 83 state.ClientName = cfg.ClientName 84 state.RepoBranch = cfg.RepoBranch 85 86 + // Scanner setup 87 + if withScanner { 88 + state.ScannerEnabled = true 89 + if state.ScannerSecret == "" { 90 + secret, err := generateScannerSecret() 91 + if err != nil { 92 + return fmt.Errorf("generate scanner secret: %w", err) 93 + } 94 + state.ScannerSecret = secret 95 + fmt.Printf("Generated scanner shared secret\n") 96 + } 97 + saveState(state) 98 + } 99 + 100 goVersion, err := requiredGoVersion() 101 if err != nil { 102 return err ··· 154 155 // Build config template values 156 vals := &ConfigValues{ 157 + S3Endpoint: state.ObjectStorage.Endpoint, 158 + S3Region: state.ObjectStorage.Region, 159 + S3Bucket: state.ObjectStorage.Bucket, 160 + S3AccessKey: state.ObjectStorage.AccessKeyID, 161 + S3SecretKey: s3SecretKey, 162 + Zone: cfg.Zone, 163 + HoldDomain: holdDomain, 164 + HoldDid: "did:web:" + holdDomain, 165 + BasePath: naming.BasePath(), 166 + ScannerSecret: state.ScannerSecret, 167 } 168 169 // 2. Private network ··· 234 // 4. Hold server 235 if state.Hold.UUID != "" { 236 fmt.Printf("Hold: %s (exists)\n", state.Hold.UUID) 237 + holdScript, err := generateHoldCloudInit(cfg, vals, goVersion, state.ScannerEnabled) 238 if err != nil { 239 return err 240 } ··· 248 if err := syncConfigKeys("hold", state.Hold.PublicIP, naming.HoldConfigPath(), holdConfigYAML); err != nil { 249 return fmt.Errorf("hold config sync: %w", err) 250 } 251 + if state.ScannerEnabled { 252 + scannerConfigYAML, err := renderConfig(scannerConfigTmpl, vals) 253 + if err != nil { 254 + return fmt.Errorf("render scanner config: %w", err) 255 + } 256 + if err := syncConfigKeys("scanner", state.Hold.PublicIP, naming.ScannerConfigPath(), scannerConfigYAML); err != nil { 257 + return fmt.Errorf("scanner config sync: %w", err) 258 + } 259 + } 260 } else { 261 fmt.Println("Creating hold server...") 262 + holdUserData, err := generateHoldCloudInit(cfg, vals, goVersion, state.ScannerEnabled) 263 if err != nil { 264 return err 265 } ··· 344 fmt.Println() 345 fmt.Println("Next steps:") 346 fmt.Println(" 1. Wait ~5 min for cloud-init to complete") 347 + if state.ScannerEnabled { 348 + fmt.Printf(" 2. systemctl start %s / %s / %s\n", naming.Appview(), naming.Hold(), naming.Scanner()) 349 + } else { 350 + fmt.Printf(" 2. systemctl start %s / %s\n", naming.Appview(), naming.Hold()) 351 + } 352 fmt.Println(" 3. Configure DNS records above") 353 354 return nil ··· 962 963 fmt.Printf(" %s: cloud-init complete\n", name) 964 return nil 965 + } 966 + 967 + // generateScannerSecret generates a random 32-byte hex-encoded shared secret 968 + // for authenticating scanner-to-hold WebSocket connections. 969 + func generateScannerSecret() (string, error) { 970 + b := make([]byte, 32) 971 + if _, err := crypto_rand.Read(b); err != nil { 972 + return "", err 973 + } 974 + return hex.EncodeToString(b), nil 975 } 976 977 // writeRemoteCloudInit writes the local cloud-init script to the remote server
+4 -2
deploy/upcloud/state.go
··· 16 Network StateRef `json:"network"` 17 Appview ServerState `json:"appview"` 18 Hold ServerState `json:"hold"` 19 - LB StateRef `json:"loadbalancer"` 20 - ObjectStorage ObjectStorageState `json:"object_storage"` 21 } 22 23 // Naming returns a Naming helper, defaulting to "seamark" if ClientName is empty.
··· 16 Network StateRef `json:"network"` 17 Appview ServerState `json:"appview"` 18 Hold ServerState `json:"hold"` 19 + LB StateRef `json:"loadbalancer"` 20 + ObjectStorage ObjectStorageState `json:"object_storage"` 21 + ScannerEnabled bool `json:"scanner_enabled,omitempty"` 22 + ScannerSecret string `json:"scanner_secret,omitempty"` 23 } 24 25 // Naming returns a Naming helper, defaulting to "seamark" if ClientName is empty.
+25
deploy/upcloud/status.go
··· 90 fmt.Println() 91 } 92 93 // LB status 94 if state.LB.UUID != "" { 95 fmt.Printf("Load Balancer: %s\n", state.LB.UUID)
··· 90 fmt.Println() 91 } 92 93 + // Scanner status (runs on hold server) 94 + if state.ScannerEnabled { 95 + fmt.Printf("Scanner (on hold server)\n") 96 + if state.Hold.PublicIP != "" { 97 + output, err := runSSH(state.Hold.PublicIP, fmt.Sprintf( 98 + "systemctl is-active %s 2>/dev/null || echo 'inactive'; curl -sf http://localhost:9090/healthz > /dev/null 2>&1 && echo 'health:ok' || echo 'health:fail'", 99 + naming.Scanner(), 100 + ), false) 101 + if err != nil { 102 + fmt.Printf(" Service: unreachable\n") 103 + } else { 104 + lines := strings.Split(strings.TrimSpace(output), "\n") 105 + for _, line := range lines { 106 + line = strings.TrimSpace(line) 107 + if line == "active" || line == "inactive" { 108 + fmt.Printf(" Service: %s\n", line) 109 + } else if strings.HasPrefix(line, "health:") { 110 + fmt.Printf(" Health: %s\n", strings.TrimPrefix(line, "health:")) 111 + } 112 + } 113 + } 114 + } 115 + fmt.Println() 116 + } 117 + 118 // LB status 119 if state.LB.UUID != "" { 120 fmt.Printf("Load Balancer: %s\n", state.LB.UUID)
+25
deploy/upcloud/systemd/scanner.service.tmpl
···
··· 1 + [Unit] 2 + Description={{.DisplayName}} Scanner (Vulnerability Scanning) 3 + After=network-online.target {{.HoldServiceName}}.service 4 + Wants=network-online.target 5 + 6 + [Service] 7 + Type=simple 8 + User={{.User}} 9 + Group={{.User}} 10 + ExecStart={{.BinaryPath}} serve --config {{.ConfigPath}} 11 + Restart=on-failure 12 + RestartSec=10 13 + 14 + ReadWritePaths={{.DataDir}} 15 + ProtectSystem=strict 16 + ProtectHome=yes 17 + NoNewPrivileges=yes 18 + PrivateTmp=yes 19 + 20 + StandardOutput=journal 21 + StandardError=journal 22 + SyslogIdentifier={{.ServiceName}} 23 + 24 + [Install] 25 + WantedBy=multi-user.target
+102 -13
deploy/upcloud/update.go
··· 22 if len(args) > 0 { 23 target = args[0] 24 } 25 - return cmdUpdate(target) 26 }, 27 } 28 ··· 37 } 38 39 func init() { 40 rootCmd.AddCommand(updateCmd) 41 rootCmd.AddCommand(sshCmd) 42 } 43 44 - func cmdUpdate(target string) error { 45 state, err := loadState() 46 if err != nil { 47 return err ··· 53 goVersion, err := requiredGoVersion() 54 if err != nil { 55 return err 56 } 57 58 vals := configValsFromState(state) ··· 134 daemonReload = "systemctl daemon-reload" 135 } 136 137 updateScript := fmt.Sprintf(`set -euo pipefail 138 export PATH=$PATH:/usr/local/go/bin 139 export GOTMPDIR=/var/tmp ··· 156 -tags sqlite_omit_load_extension -trimpath \ 157 -o bin/%s ./cmd/%s 158 %s 159 systemctl restart %s 160 - 161 sleep 2 162 curl -sf %s > /dev/null && echo "HEALTH_OK" || echo "HEALTH_FAIL" 163 - `, goVersion, naming.InstallDir(), branch, t.binaryName, t.buildCmd, daemonReload, t.serviceName, t.healthURL) 164 165 output, err := runSSH(t.ip, updateScript, true) 166 if err != nil { ··· 177 } else { 178 fmt.Printf(" %s: updated (health check inconclusive)\n", name) 179 } 180 } 181 182 return nil ··· 191 holdDomain := state.Zone + ".cove." + baseDomain 192 193 return &ConfigValues{ 194 - S3Endpoint: state.ObjectStorage.Endpoint, 195 - S3Region: state.ObjectStorage.Region, 196 - S3Bucket: state.ObjectStorage.Bucket, 197 - S3AccessKey: state.ObjectStorage.AccessKeyID, 198 - S3SecretKey: "", // not persisted in state; existing value on server is preserved 199 - Zone: state.Zone, 200 - HoldDomain: holdDomain, 201 - HoldDid: "did:web:" + holdDomain, 202 - BasePath: naming.BasePath(), 203 } 204 } 205
··· 22 if len(args) > 0 { 23 target = args[0] 24 } 25 + withScanner, _ := cmd.Flags().GetBool("with-scanner") 26 + return cmdUpdate(target, withScanner) 27 }, 28 } 29 ··· 38 } 39 40 func init() { 41 + updateCmd.Flags().Bool("with-scanner", false, "Enable and deploy vulnerability scanner alongside hold") 42 rootCmd.AddCommand(updateCmd) 43 rootCmd.AddCommand(sshCmd) 44 } 45 46 + func cmdUpdate(target string, withScanner bool) error { 47 state, err := loadState() 48 if err != nil { 49 return err ··· 55 goVersion, err := requiredGoVersion() 56 if err != nil { 57 return err 58 + } 59 + 60 + // Enable scanner retroactively via --with-scanner on update 61 + if withScanner && !state.ScannerEnabled { 62 + state.ScannerEnabled = true 63 + if state.ScannerSecret == "" { 64 + secret, err := generateScannerSecret() 65 + if err != nil { 66 + return fmt.Errorf("generate scanner secret: %w", err) 67 + } 68 + state.ScannerSecret = secret 69 + fmt.Printf("Generated scanner shared secret\n") 70 + } 71 + saveState(state) 72 } 73 74 vals := configValsFromState(state) ··· 150 daemonReload = "systemctl daemon-reload" 151 } 152 153 + // Scanner additions for hold server 154 + scannerBuild := "" 155 + scannerRestart := "" 156 + scannerHealthCheck := "" 157 + if name == "hold" && state.ScannerEnabled { 158 + // Sync scanner config keys 159 + scannerConfigYAML, err := renderConfig(scannerConfigTmpl, vals) 160 + if err != nil { 161 + return fmt.Errorf("render scanner config: %w", err) 162 + } 163 + if err := syncConfigKeys("scanner", t.ip, naming.ScannerConfigPath(), scannerConfigYAML); err != nil { 164 + return fmt.Errorf("scanner config sync: %w", err) 165 + } 166 + 167 + // Sync scanner service unit 168 + scannerUnit, err := renderScannerServiceUnit(scannerServiceUnitParams{ 169 + DisplayName: naming.DisplayName(), 170 + User: naming.SystemUser(), 171 + BinaryPath: naming.InstallDir() + "/bin/" + naming.Scanner(), 172 + ConfigPath: naming.ScannerConfigPath(), 173 + DataDir: naming.BasePath(), 174 + ServiceName: naming.Scanner(), 175 + HoldServiceName: naming.Hold(), 176 + }) 177 + if err != nil { 178 + return fmt.Errorf("render scanner service unit: %w", err) 179 + } 180 + scannerUnitChanged, err := syncServiceUnit("scanner", t.ip, naming.Scanner(), scannerUnit) 181 + if err != nil { 182 + return fmt.Errorf("scanner service unit sync: %w", err) 183 + } 184 + if scannerUnitChanged { 185 + daemonReload = "systemctl daemon-reload" 186 + } 187 + 188 + scannerBuild = fmt.Sprintf(` 189 + # Build scanner 190 + cd %s/scanner 191 + CGO_ENABLED=1 go build \ 192 + -ldflags="-s -w" \ 193 + -trimpath \ 194 + -o ../bin/%s ./cmd/scanner 195 + cd %s 196 + 197 + # Ensure scanner data dirs exist 198 + mkdir -p %s/vulndb %s/tmp 199 + chown -R %s:%s %s 200 + `, naming.InstallDir(), naming.Scanner(), naming.InstallDir(), 201 + naming.ScannerDataDir(), naming.ScannerDataDir(), 202 + naming.SystemUser(), naming.SystemUser(), naming.ScannerDataDir()) 203 + 204 + scannerRestart = fmt.Sprintf("\nsystemctl restart %s", naming.Scanner()) 205 + scannerHealthCheck = fmt.Sprintf(` 206 + sleep 2 207 + curl -sf http://localhost:9090/healthz > /dev/null && echo "SCANNER_HEALTH_OK" || echo "SCANNER_HEALTH_FAIL" 208 + `) 209 + } 210 + 211 updateScript := fmt.Sprintf(`set -euo pipefail 212 export PATH=$PATH:/usr/local/go/bin 213 export GOTMPDIR=/var/tmp ··· 230 -tags sqlite_omit_load_extension -trimpath \ 231 -o bin/%s ./cmd/%s 232 %s 233 + %s 234 systemctl restart %s 235 + %s 236 sleep 2 237 curl -sf %s > /dev/null && echo "HEALTH_OK" || echo "HEALTH_FAIL" 238 + %s 239 + `, goVersion, naming.InstallDir(), branch, t.binaryName, t.buildCmd, 240 + scannerBuild, daemonReload, t.serviceName, scannerRestart, 241 + t.healthURL, scannerHealthCheck) 242 243 output, err := runSSH(t.ip, updateScript, true) 244 if err != nil { ··· 255 } else { 256 fmt.Printf(" %s: updated (health check inconclusive)\n", name) 257 } 258 + 259 + // Scanner health reporting 260 + if name == "hold" && state.ScannerEnabled { 261 + if strings.Contains(output, "SCANNER_HEALTH_OK") { 262 + fmt.Printf(" scanner: updated and healthy\n") 263 + } else if strings.Contains(output, "SCANNER_HEALTH_FAIL") { 264 + fmt.Printf(" scanner: updated but health check failed!\n") 265 + fmt.Printf(" Check: ssh root@%s journalctl -u %s -n 50\n", t.ip, naming.Scanner()) 266 + } 267 + } 268 } 269 270 return nil ··· 279 holdDomain := state.Zone + ".cove." + baseDomain 280 281 return &ConfigValues{ 282 + S3Endpoint: state.ObjectStorage.Endpoint, 283 + S3Region: state.ObjectStorage.Region, 284 + S3Bucket: state.ObjectStorage.Bucket, 285 + S3AccessKey: state.ObjectStorage.AccessKeyID, 286 + S3SecretKey: "", // not persisted in state; existing value on server is preserved 287 + Zone: state.Zone, 288 + HoldDomain: holdDomain, 289 + HoldDid: "did:web:" + holdDomain, 290 + BasePath: naming.BasePath(), 291 + ScannerSecret: state.ScannerSecret, 292 } 293 } 294
+1
go.work.sum
··· 246 github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= 247 github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= 248 github.com/campoy/embedmd v1.0.0/go.mod h1:oxyr9RCiSXg0M3VJ3ks0UGfp98BpSSGr0kpiX3MzVl8= 249 github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= 250 github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= 251 github.com/charmbracelet/colorprofile v0.4.1/go.mod h1:U1d9Dljmdf9DLegaJ0nGZNJvoXAhayhmidOdcBwAvKk=
··· 246 github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= 247 github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= 248 github.com/campoy/embedmd v1.0.0/go.mod h1:oxyr9RCiSXg0M3VJ3ks0UGfp98BpSSGr0kpiX3MzVl8= 249 + github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= 250 github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= 251 github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= 252 github.com/charmbracelet/colorprofile v0.4.1/go.mod h1:U1d9Dljmdf9DLegaJ0nGZNJvoXAhayhmidOdcBwAvKk=
+35 -14
pkg/appview/handlers/repository.go
··· 5 "html/template" 6 "log/slog" 7 "net/http" 8 "strings" 9 "sync" 10 "time" ··· 229 artifactType = manifests[0].ArtifactType 230 } 231 232 // Build page meta 233 title := owner.Handle + "/" + repository + " - " + h.ClientShortName 234 if repo.Title != "" { ··· 264 PullCount int 265 IsStarred bool 266 IsOwner bool // Whether current user owns this repository 267 - ReadmeHTML template.HTML 268 - ArtifactType string // Dominant artifact type: container-image, helm-chart, unknown 269 }{ 270 - PageData: NewPageData(r, &h.BaseUIHandler), 271 - Meta: meta, 272 - Owner: owner, 273 - Repository: repo, 274 - Tags: tagsWithPlatforms, 275 - Manifests: manifests, 276 - StarCount: stats.StarCount, 277 - PullCount: stats.PullCount, 278 - IsStarred: isStarred, 279 - IsOwner: isOwner, 280 - ReadmeHTML: readmeHTML, 281 - ArtifactType: artifactType, 282 } 283 284 if err := h.Templates.ExecuteTemplate(w, "repository", data); err != nil {
··· 5 "html/template" 6 "log/slog" 7 "net/http" 8 + "net/url" 9 "strings" 10 "sync" 11 "time" ··· 230 artifactType = manifests[0].ArtifactType 231 } 232 233 + // Collect digests for batch scan-result request 234 + var scanDigests []string 235 + var scanHoldEndpoint string 236 + for _, m := range manifests { 237 + if !m.IsManifestList && m.Manifest.HoldEndpoint != "" { 238 + if scanHoldEndpoint == "" { 239 + scanHoldEndpoint = m.Manifest.HoldEndpoint 240 + } 241 + if m.Manifest.HoldEndpoint == scanHoldEndpoint { 242 + scanDigests = append(scanDigests, strings.TrimPrefix(m.Manifest.Digest, "sha256:")) 243 + } 244 + } 245 + } 246 + var scanBatchParams string 247 + if len(scanDigests) > 0 { 248 + scanBatchParams = "holdEndpoint=" + url.QueryEscape(scanHoldEndpoint) + "&digests=" + strings.Join(scanDigests, ",") 249 + } 250 + 251 // Build page meta 252 title := owner.Handle + "/" + repository + " - " + h.ClientShortName 253 if repo.Title != "" { ··· 283 PullCount int 284 IsStarred bool 285 IsOwner bool // Whether current user owns this repository 286 + ReadmeHTML template.HTML 287 + ArtifactType string // Dominant artifact type: container-image, helm-chart, unknown 288 + ScanBatchParams template.HTML // Pre-encoded query string for batch scan-result endpoint 289 }{ 290 + PageData: NewPageData(r, &h.BaseUIHandler), 291 + Meta: meta, 292 + Owner: owner, 293 + Repository: repo, 294 + Tags: tagsWithPlatforms, 295 + Manifests: manifests, 296 + StarCount: stats.StarCount, 297 + PullCount: stats.PullCount, 298 + IsStarred: isStarred, 299 + IsOwner: isOwner, 300 + ReadmeHTML: readmeHTML, 301 + ArtifactType: artifactType, 302 + ScanBatchParams: template.HTML(scanBatchParams), 303 } 304 305 if err := h.Templates.ExecuteTemplate(w, "repository", data); err != nil {
+122
pkg/appview/handlers/scan_result.go
··· 1 package handlers 2 3 import ( 4 "context" 5 "encoding/json" 6 "fmt" 7 "log/slog" 8 "net/http" 9 "net/url" 10 "strings" 11 "time" 12 13 "atcr.io/pkg/atproto" ··· 123 slog.Warn("Failed to render vuln badge", "error", err) 124 } 125 }
··· 1 package handlers 2 3 import ( 4 + "bytes" 5 "context" 6 "encoding/json" 7 "fmt" 8 + "html/template" 9 "log/slog" 10 "net/http" 11 "net/url" 12 "strings" 13 + "sync" 14 "time" 15 16 "atcr.io/pkg/atproto" ··· 126 slog.Warn("Failed to render vuln badge", "error", err) 127 } 128 } 129 + 130 + // fetchScanRecord fetches a scan record from a hold's PDS and returns badge data. 131 + func fetchScanRecord(ctx context.Context, holdEndpoint, holdDID, hexDigest string) vulnBadgeData { 132 + rkey := hexDigest 133 + fullDigest := "sha256:" + hexDigest 134 + 135 + scanURL := fmt.Sprintf("%s/xrpc/com.atproto.repo.getRecord?repo=%s&collection=%s&rkey=%s", 136 + holdEndpoint, 137 + url.QueryEscape(holdDID), 138 + url.QueryEscape(atproto.ScanCollection), 139 + url.QueryEscape(rkey), 140 + ) 141 + 142 + req, err := http.NewRequestWithContext(ctx, "GET", scanURL, nil) 143 + if err != nil { 144 + return vulnBadgeData{Error: true} 145 + } 146 + 147 + resp, err := http.DefaultClient.Do(req) 148 + if err != nil { 149 + return vulnBadgeData{Error: true} 150 + } 151 + defer resp.Body.Close() 152 + 153 + if resp.StatusCode != http.StatusOK { 154 + return vulnBadgeData{Error: true} 155 + } 156 + 157 + var envelope struct { 158 + Value json.RawMessage `json:"value"` 159 + } 160 + if err := json.NewDecoder(resp.Body).Decode(&envelope); err != nil { 161 + return vulnBadgeData{Error: true} 162 + } 163 + 164 + var scanRecord atproto.ScanRecord 165 + if err := json.Unmarshal(envelope.Value, &scanRecord); err != nil { 166 + return vulnBadgeData{Error: true} 167 + } 168 + 169 + return vulnBadgeData{ 170 + Critical: scanRecord.Critical, 171 + High: scanRecord.High, 172 + Medium: scanRecord.Medium, 173 + Low: scanRecord.Low, 174 + Total: scanRecord.Total, 175 + ScannedAt: scanRecord.ScannedAt, 176 + Found: true, 177 + Digest: fullDigest, 178 + HoldEndpoint: holdEndpoint, 179 + } 180 + } 181 + 182 + // BatchScanResultHandler handles a single HTMX request that fetches scan results 183 + // for multiple manifests concurrently and returns OOB swap fragments. 184 + type BatchScanResultHandler struct { 185 + BaseUIHandler 186 + } 187 + 188 + func (h *BatchScanResultHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { 189 + holdEndpoint := r.URL.Query().Get("holdEndpoint") 190 + digestsParam := r.URL.Query().Get("digests") 191 + 192 + if holdEndpoint == "" || digestsParam == "" { 193 + w.Header().Set("Content-Type", "text/html") 194 + return 195 + } 196 + 197 + digests := strings.Split(digestsParam, ",") 198 + if len(digests) > 50 { 199 + digests = digests[:50] 200 + } 201 + 202 + holdDID := atproto.ResolveHoldDIDFromURL(holdEndpoint) 203 + if holdDID == "" { 204 + // Can't resolve hold — render empty OOB spans 205 + w.Header().Set("Content-Type", "text/html") 206 + for _, d := range digests { 207 + fmt.Fprintf(w, `<span id="scan-badge-%s" hx-swap-oob="outerHTML"></span>`, template.HTMLEscapeString(d)) 208 + } 209 + return 210 + } 211 + 212 + // Fetch scan records concurrently with a semaphore to limit parallelism 213 + type result struct { 214 + hexDigest string 215 + data vulnBadgeData 216 + } 217 + results := make([]result, len(digests)) 218 + sem := make(chan struct{}, 10) 219 + 220 + var wg sync.WaitGroup 221 + for i, hexDigest := range digests { 222 + results[i].hexDigest = hexDigest 223 + wg.Add(1) 224 + go func(idx int, hex string) { 225 + defer wg.Done() 226 + sem <- struct{}{} 227 + defer func() { <-sem }() 228 + 229 + ctx, cancel := context.WithTimeout(r.Context(), 3*time.Second) 230 + defer cancel() 231 + 232 + results[idx].data = fetchScanRecord(ctx, holdEndpoint, holdDID, hex) 233 + }(i, hexDigest) 234 + } 235 + wg.Wait() 236 + 237 + // Render all OOB fragments 238 + w.Header().Set("Content-Type", "text/html") 239 + for _, res := range results { 240 + var buf bytes.Buffer 241 + if err := h.Templates.ExecuteTemplate(&buf, "vuln-badge", res.data); err != nil { 242 + slog.Warn("Failed to render vuln badge in batch", "digest", res.hexDigest, "error", err) 243 + } 244 + fmt.Fprintf(w, `<span id="scan-badge-%s" hx-swap-oob="outerHTML">%s</span>`, 245 + template.HTMLEscapeString(res.hexDigest), buf.String()) 246 + } 247 + }
+147
pkg/appview/handlers/scan_result_test.go
··· 253 t.Error("Should not contain 'L:0' for zero low count") 254 } 255 }
··· 253 t.Error("Should not contain 'L:0' for zero low count") 254 } 255 } 256 + 257 + // --- Batch scan result tests --- 258 + 259 + func setupBatchScanResultHandler(t *testing.T) *handlers.BatchScanResultHandler { 260 + t.Helper() 261 + templates, err := appview.Templates(nil) 262 + if err != nil { 263 + t.Fatalf("Failed to load templates: %v", err) 264 + } 265 + return &handlers.BatchScanResultHandler{ 266 + BaseUIHandler: handlers.BaseUIHandler{ 267 + Templates: templates, 268 + }, 269 + } 270 + } 271 + 272 + func TestBatchScanResult_MultipleDigests(t *testing.T) { 273 + // Mock hold that returns different results based on rkey 274 + hold := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 275 + rkey := r.URL.Query().Get("rkey") 276 + w.Header().Set("Content-Type", "application/json") 277 + switch rkey { 278 + case "abc123": 279 + w.Write([]byte(mockScanRecord(2, 5, 10, 3, 20))) 280 + case "def456": 281 + w.Write([]byte(mockScanRecord(0, 0, 0, 0, 0))) 282 + default: 283 + http.Error(w, "not found", http.StatusNotFound) 284 + } 285 + })) 286 + defer hold.Close() 287 + 288 + handler := setupBatchScanResultHandler(t) 289 + 290 + req := httptest.NewRequest("GET", 291 + "/api/scan-results?holdEndpoint="+hold.URL+"&digests=abc123,def456,unknown789", nil) 292 + rr := httptest.NewRecorder() 293 + handler.ServeHTTP(rr, req) 294 + 295 + if rr.Code != http.StatusOK { 296 + t.Errorf("Expected status %d, got %d", http.StatusOK, rr.Code) 297 + } 298 + 299 + body := rr.Body.String() 300 + 301 + // All three digests should have OOB spans 302 + if !strings.Contains(body, `id="scan-badge-abc123"`) { 303 + t.Error("Expected OOB span for abc123") 304 + } 305 + if !strings.Contains(body, `id="scan-badge-def456"`) { 306 + t.Error("Expected OOB span for def456") 307 + } 308 + if !strings.Contains(body, `id="scan-badge-unknown789"`) { 309 + t.Error("Expected OOB span for unknown789") 310 + } 311 + 312 + // All should have hx-swap-oob attribute 313 + if !strings.Contains(body, `hx-swap-oob="outerHTML"`) { 314 + t.Error("Expected hx-swap-oob attribute in response") 315 + } 316 + 317 + // abc123 should have vulnerability badges 318 + if !strings.Contains(body, "C:2") { 319 + t.Error("Expected body to contain 'C:2' for abc123") 320 + } 321 + // def456 should have clean badge 322 + if !strings.Contains(body, "Clean") { 323 + t.Error("Expected body to contain 'Clean' for def456") 324 + } 325 + } 326 + 327 + func TestBatchScanResult_EmptyParams(t *testing.T) { 328 + handler := setupBatchScanResultHandler(t) 329 + 330 + // No params 331 + req := httptest.NewRequest("GET", "/api/scan-results", nil) 332 + rr := httptest.NewRecorder() 333 + handler.ServeHTTP(rr, req) 334 + 335 + body := strings.TrimSpace(rr.Body.String()) 336 + if body != "" { 337 + t.Errorf("Expected empty body for missing params, got: %q", body) 338 + } 339 + } 340 + 341 + func TestBatchScanResult_MissingDigests(t *testing.T) { 342 + handler := setupBatchScanResultHandler(t) 343 + 344 + req := httptest.NewRequest("GET", "/api/scan-results?holdEndpoint=https://hold.example.com", nil) 345 + rr := httptest.NewRecorder() 346 + handler.ServeHTTP(rr, req) 347 + 348 + body := strings.TrimSpace(rr.Body.String()) 349 + if body != "" { 350 + t.Errorf("Expected empty body for missing digests, got: %q", body) 351 + } 352 + } 353 + 354 + func TestBatchScanResult_HoldUnreachable(t *testing.T) { 355 + hold := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})) 356 + hold.Close() 357 + 358 + handler := setupBatchScanResultHandler(t) 359 + 360 + req := httptest.NewRequest("GET", 361 + "/api/scan-results?holdEndpoint="+hold.URL+"&digests=abc123,def456", nil) 362 + rr := httptest.NewRecorder() 363 + handler.ServeHTTP(rr, req) 364 + 365 + body := rr.Body.String() 366 + 367 + // Should still have OOB spans (empty content since hold is unreachable) 368 + if !strings.Contains(body, `id="scan-badge-abc123"`) { 369 + t.Error("Expected OOB span for abc123 even when hold is unreachable") 370 + } 371 + if !strings.Contains(body, `id="scan-badge-def456"`) { 372 + t.Error("Expected OOB span for def456 even when hold is unreachable") 373 + } 374 + // Should NOT contain vulnerability badges 375 + if strings.Contains(body, "badge-error") || strings.Contains(body, "Clean") { 376 + t.Error("Unreachable hold should not render badge content") 377 + } 378 + } 379 + 380 + func TestBatchScanResult_SingleDigest(t *testing.T) { 381 + hold := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 382 + w.Header().Set("Content-Type", "application/json") 383 + w.Write([]byte(mockScanRecord(1, 0, 0, 0, 1))) 384 + })) 385 + defer hold.Close() 386 + 387 + handler := setupBatchScanResultHandler(t) 388 + 389 + req := httptest.NewRequest("GET", 390 + "/api/scan-results?holdEndpoint="+hold.URL+"&digests=abc123", nil) 391 + rr := httptest.NewRecorder() 392 + handler.ServeHTTP(rr, req) 393 + 394 + body := rr.Body.String() 395 + 396 + if !strings.Contains(body, `id="scan-badge-abc123"`) { 397 + t.Error("Expected OOB span for abc123") 398 + } 399 + if !strings.Contains(body, "C:1") { 400 + t.Error("Expected body to contain 'C:1'") 401 + } 402 + }
+1
pkg/appview/routes/routes.go
··· 124 125 // Vulnerability scan result API endpoints (HTMX lazy loading + modal content) 126 router.Get("/api/scan-result", (&uihandlers.ScanResultHandler{BaseUIHandler: base}).ServeHTTP) 127 router.Get("/api/vuln-details", (&uihandlers.VulnDetailsHandler{BaseUIHandler: base}).ServeHTTP) 128 129 // Attestation details API endpoint (HTMX modal content)
··· 124 125 // Vulnerability scan result API endpoints (HTMX lazy loading + modal content) 126 router.Get("/api/scan-result", (&uihandlers.ScanResultHandler{BaseUIHandler: base}).ServeHTTP) 127 + router.Get("/api/scan-results", (&uihandlers.BatchScanResultHandler{BaseUIHandler: base}).ServeHTTP) 128 router.Get("/api/vuln-details", (&uihandlers.VulnDetailsHandler{BaseUIHandler: base}).ServeHTTP) 129 130 // Attestation details API endpoint (HTMX modal content)
+2 -2
pkg/appview/templates/components/star.html
··· 21 hx-on::before-request="this.disabled=true" 22 hx-on::after-request="if(event.detail.xhr.status===401) window.location='/auth/oauth/login'" 23 aria-label="{{ if .IsStarred }}Unstar{{ else }}Star{{ end }} {{ .Handle }}/{{ .Repository }}"> 24 - <svg class="icon size-4 text-amber-400 stroke-amber-400 transition-transform group-hover:scale-110{{ if .IsStarred }} fill-amber-400{{ end }}" id="star-icon" aria-hidden="true"><use href="/icons.svg#star"></use></svg> 25 <span id="star-count">{{ .StarCount }}</span> 26 </button> 27 {{ else }} 28 <span class="flex items-center gap-2 text-base-content/60"> 29 - <svg class="icon size-[1.1rem] text-amber-400 stroke-amber-400{{ if .IsStarred }} fill-amber-400{{ end }}" aria-hidden="true"><use href="/icons.svg#star"></use></svg> 30 <span class="font-semibold text-base-content">{{ .StarCount }}</span> 31 </span> 32 {{ end }}
··· 21 hx-on::before-request="this.disabled=true" 22 hx-on::after-request="if(event.detail.xhr.status===401) window.location='/auth/oauth/login'" 23 aria-label="{{ if .IsStarred }}Unstar{{ else }}Star{{ end }} {{ .Handle }}/{{ .Repository }}"> 24 + <svg class="icon size-4 text-amber-400 stroke-amber-400 transition-transform group-hover:scale-110{{ if .IsStarred }} fill-amber-400!{{ end }}" id="star-icon" aria-hidden="true"><use href="/icons.svg#star"></use></svg> 25 <span id="star-count">{{ .StarCount }}</span> 26 </button> 27 {{ else }} 28 <span class="flex items-center gap-2 text-base-content/60"> 29 + <svg class="icon size-[1.1rem] text-amber-400 stroke-amber-400{{ if .IsStarred }} fill-amber-400!{{ end }}" aria-hidden="true"><use href="/icons.svg#star"></use></svg> 30 <span class="font-semibold text-base-content">{{ .StarCount }}</span> 31 </span> 32 {{ end }}
+8 -5
pkg/appview/templates/pages/repository.html
··· 220 {{ else if not .Reachable }} 221 <span class="badge badge-sm badge-warning">{{ icon "alert-triangle" "size-3" }} Offline</span> 222 {{ end }} 223 - {{/* Vulnerability scan badge (lazy-loaded from hold) */}} 224 {{ if and (not .IsManifestList) .Manifest.HoldEndpoint }} 225 - <span hx-get="/api/scan-result?digest={{ .Manifest.Digest | urlquery }}&holdEndpoint={{ .Manifest.HoldEndpoint | urlquery }}" 226 - hx-trigger="load delay:1s" 227 - hx-swap="outerHTML"> 228 - </span> 229 {{ end }} 230 </div> 231 <div class="flex items-center gap-2"> ··· 270 </div> 271 {{ end }} 272 </div> 273 {{ else }} 274 <p class="text-base-content/60">No manifests available</p> 275 {{ end }}
··· 220 {{ else if not .Reachable }} 221 <span class="badge badge-sm badge-warning">{{ icon "alert-triangle" "size-3" }} Offline</span> 222 {{ end }} 223 + {{/* Vulnerability scan badge placeholder (batch-loaded via OOB swap) */}} 224 {{ if and (not .IsManifestList) .Manifest.HoldEndpoint }} 225 + <span id="scan-badge-{{ trimPrefix "sha256:" .Manifest.Digest }}"></span> 226 {{ end }} 227 </div> 228 <div class="flex items-center gap-2"> ··· 267 </div> 268 {{ end }} 269 </div> 270 + {{ if $.ScanBatchParams }} 271 + <div hx-get="/api/scan-results?{{ $.ScanBatchParams }}" 272 + hx-trigger="load delay:500ms" 273 + hx-swap="none" 274 + style="display:none"></div> 275 + {{ end }} 276 {{ else }} 277 <p class="text-base-content/60">No manifests available</p> 278 {{ end }}
+47 -18
scanner/cmd/scanner/main.go
··· 17 "atcr.io/scanner/internal/scan" 18 ) 19 20 var rootCmd = &cobra.Command{ 21 Use: "atcr-scanner", 22 Short: "ATCR Scanner - container image vulnerability scanner", ··· 31 generates SBOMs with Syft, scans for vulnerabilities with Grype, and sends 32 results back over the same WebSocket connection. 33 34 - Configuration via environment variables (SCANNER_ prefix): 35 - SCANNER_HOLD_URL Hold service URL (required) 36 - SCANNER_SHARED_SECRET Shared secret for auth (required) 37 - SCANNER_WORKERS Worker count (default: 2) 38 - SCANNER_QUEUE_SIZE Max queue depth (default: 100) 39 - SCANNER_VULN_ENABLED Enable Grype scanning (default: true) 40 - SCANNER_VULN_DB_PATH Grype DB location (default: /var/lib/atcr-scanner/vulndb) 41 - SCANNER_TMP_DIR Temp dir for extraction (default: /var/lib/atcr-scanner/tmp) 42 - SCANNER_ADDR Health endpoint addr (default: :9090)`, 43 Args: cobra.NoArgs, 44 RunE: func(cmd *cobra.Command, args []string) error { 45 - cfg, err := config.Load() 46 if err != nil { 47 return fmt.Errorf("failed to load config: %w", err) 48 } 49 50 slog.Info("Starting ATCR scanner", 51 - "hold_url", cfg.HoldURL, 52 - "workers", cfg.Workers, 53 - "queue_size", cfg.QueueSize, 54 - "vuln_enabled", cfg.VulnEnabled) 55 56 ctx, cancel := context.WithCancel(context.Background()) 57 defer cancel() 58 59 // Create priority queue 60 - q := queue.NewJobQueue(cfg.QueueSize) 61 62 // Create hold WebSocket client 63 - holdClient := client.NewHoldClient(cfg.HoldURL, cfg.SharedSecret, q) 64 65 // Start WebSocket connection (feeds queue) 66 go holdClient.Connect() ··· 75 w.WriteHeader(http.StatusOK) 76 w.Write([]byte("ok")) 77 }) 78 - healthServer := &http.Server{Addr: cfg.Addr, Handler: mux} 79 go func() { 80 - slog.Info("Health endpoint listening", "addr", cfg.Addr) 81 if err := healthServer.ListenAndServe(); err != nil && err != http.ErrServerClosed { 82 slog.Error("Health server error", "error", err) 83 } ··· 101 }, 102 } 103 104 func init() { 105 rootCmd.AddCommand(serveCmd) 106 } 107 108 func main() {
··· 17 "atcr.io/scanner/internal/scan" 18 ) 19 20 + var configFile string 21 + 22 var rootCmd = &cobra.Command{ 23 Use: "atcr-scanner", 24 Short: "ATCR Scanner - container image vulnerability scanner", ··· 33 generates SBOMs with Syft, scans for vulnerabilities with Grype, and sends 34 results back over the same WebSocket connection. 35 36 + Configuration is loaded in layers: defaults -> YAML file -> environment variables. 37 + Use --config to specify a YAML configuration file. 38 + Environment variables always override file values (SCANNER_ prefix).`, 39 Args: cobra.NoArgs, 40 RunE: func(cmd *cobra.Command, args []string) error { 41 + cfg, err := config.LoadConfig(configFile) 42 if err != nil { 43 return fmt.Errorf("failed to load config: %w", err) 44 } 45 46 slog.Info("Starting ATCR scanner", 47 + "hold_url", cfg.Hold.URL, 48 + "workers", cfg.Scanner.Workers, 49 + "queue_size", cfg.Scanner.QueueSize, 50 + "vuln_enabled", cfg.Vuln.Enabled) 51 52 ctx, cancel := context.WithCancel(context.Background()) 53 defer cancel() 54 55 // Create priority queue 56 + q := queue.NewJobQueue(cfg.Scanner.QueueSize) 57 58 // Create hold WebSocket client 59 + holdClient := client.NewHoldClient(cfg.Hold.URL, cfg.Hold.Secret, q) 60 61 // Start WebSocket connection (feeds queue) 62 go holdClient.Connect() ··· 71 w.WriteHeader(http.StatusOK) 72 w.Write([]byte("ok")) 73 }) 74 + healthServer := &http.Server{Addr: cfg.Server.Addr, Handler: mux} 75 go func() { 76 + slog.Info("Health endpoint listening", "addr", cfg.Server.Addr) 77 if err := healthServer.ListenAndServe(); err != nil && err != http.ErrServerClosed { 78 slog.Error("Health server error", "error", err) 79 } ··· 97 }, 98 } 99 100 + var configCmd = &cobra.Command{ 101 + Use: "config", 102 + Short: "Configuration management commands", 103 + } 104 + 105 + var configInitCmd = &cobra.Command{ 106 + Use: "init [path]", 107 + Short: "Generate an example configuration file", 108 + Long: `Generate an example YAML configuration file with all available options. 109 + If path is provided, writes to that file. Otherwise writes to stdout.`, 110 + Args: cobra.MaximumNArgs(1), 111 + RunE: func(cmd *cobra.Command, args []string) error { 112 + yamlBytes, err := config.ExampleYAML() 113 + if err != nil { 114 + return fmt.Errorf("failed to generate example config: %w", err) 115 + } 116 + if len(args) == 1 { 117 + if err := os.WriteFile(args[0], yamlBytes, 0644); err != nil { 118 + return fmt.Errorf("failed to write config file: %w", err) 119 + } 120 + fmt.Fprintf(os.Stderr, "Wrote example config to %s\n", args[0]) 121 + return nil 122 + } 123 + fmt.Print(string(yamlBytes)) 124 + return nil 125 + }, 126 + } 127 + 128 func init() { 129 + serveCmd.Flags().StringVarP(&configFile, "config", "c", "", "path to YAML configuration file") 130 + 131 + configCmd.AddCommand(configInitCmd) 132 + 133 rootCmd.AddCommand(serveCmd) 134 + rootCmd.AddCommand(configCmd) 135 } 136 137 func main() {
+6 -1
scanner/go.mod
··· 9 github.com/spf13/cobra v1.10.2 10 ) 11 12 exclude google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a 13 14 require ( 15 cel.dev/expr v0.25.1 // indirect 16 cloud.google.com/go v0.123.0 // indirect 17 cloud.google.com/go/auth v0.18.1 // indirect ··· 248 github.com/spf13/afero v1.15.0 // indirect 249 github.com/spf13/cast v1.10.0 // indirect 250 github.com/spf13/pflag v1.0.10 // indirect 251 - github.com/spf13/viper v1.21.0 // indirect 252 github.com/spiffe/go-spiffe/v2 v2.6.0 // indirect 253 github.com/subosito/gotenv v1.6.0 // indirect 254 github.com/sylabs/sif/v2 v2.23.0 // indirect ··· 304 modernc.org/memory v1.11.0 // indirect 305 modernc.org/sqlite v1.45.0 // indirect 306 )
··· 9 github.com/spf13/cobra v1.10.2 10 ) 11 12 + require go.yaml.in/yaml/v4 v4.0.0-rc.4 // indirect 13 + 14 exclude google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a 15 16 require ( 17 + atcr.io v0.0.0 18 cel.dev/expr v0.25.1 // indirect 19 cloud.google.com/go v0.123.0 // indirect 20 cloud.google.com/go/auth v0.18.1 // indirect ··· 251 github.com/spf13/afero v1.15.0 // indirect 252 github.com/spf13/cast v1.10.0 // indirect 253 github.com/spf13/pflag v1.0.10 // indirect 254 + github.com/spf13/viper v1.21.0 255 github.com/spiffe/go-spiffe/v2 v2.6.0 // indirect 256 github.com/subosito/gotenv v1.6.0 // indirect 257 github.com/sylabs/sif/v2 v2.23.0 // indirect ··· 307 modernc.org/memory v1.11.0 // indirect 308 modernc.org/sqlite v1.45.0 // indirect 309 ) 310 + 311 + replace atcr.io => ../
+16 -14
scanner/go.sum
··· 243 github.com/bodgit/windows v1.0.1/go.mod h1:a6JLwrB4KrTR5hBpp8FI9/9W9jJfeQ2h4XDXU74ZCdM= 244 github.com/bradleyjkemp/cupaloy/v2 v2.8.0 h1:any4BmKE+jGIaMpnU8YgH/I2LPiLBufr6oMMlVBbn9M= 245 github.com/bradleyjkemp/cupaloy/v2 v2.8.0/go.mod h1:bm7JXdkRd4BHJk9HpwqAI8BoAY1lps46Enkdqw6aRX0= 246 - github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= 247 - github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= 248 github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= 249 github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= 250 github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= ··· 571 github.com/gpustack/gguf-parser-go v0.24.0/go.mod h1:y4TwTtDqFWTK+xvprOjRUh+dowgU2TKCX37vRKvGiZ0= 572 github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= 573 github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= 574 - github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 h1:e9Rjr40Z98/clHv5Yg79Is0NtosR5LXRvdr7o/6NwbA= 575 - github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1/go.mod h1:tIxuGz/9mpox++sgp9fJjHO0+q1X9/UOWd798aAm22M= 576 github.com/hako/durafmt v0.0.0-20210608085754-5c1018a4e16b h1:wDUNC2eKiL35DbLvsDhiblTUXHxcOPwQSCzi7xpQUN4= 577 github.com/hako/durafmt v0.0.0-20210608085754-5c1018a4e16b/go.mod h1:VzxiSdG6j1pi7rwGm/xYI5RbtpBgM8sARDXlvEvxlu0= 578 github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.70 h1:0HADrxxqaQkGycO1JoUUA+B4FnIkuo8d2bz/hSaTFFQ= ··· 845 github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= 846 github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= 847 github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= 848 - github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= 849 - github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= 850 github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= 851 github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= 852 github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= ··· 1009 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0/go.mod h1:c7hN3ddxs/z6q9xwvfLPk+UHlWRQyaeR1LdgfL/66l0= 1010 go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= 1011 go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= 1012 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 h1:1fTNlAIJZGWLP5FVu0fikVry1IsiUnXjf7QFvoNN3Xw= 1013 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0/go.mod h1:zjPK58DtkqQFn+YUMbx0M2XV3QgKU0gS9LeGohREyK4= 1014 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0 h1:xJ2qHD0C1BeYVTLLR9sX12+Qb95kfeD/byKj6Ky1pXg= 1015 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0/go.mod h1:u5BF1xyjstDowA1R5QAO9JHzqK+ublenEW/dyqTjBVk= 1016 - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.39.0 h1:5gn2urDL/FBnK8OkCfD1j3/ER79rUuTYmCvlXBKeYL8= 1017 - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.39.0/go.mod h1:0fBG6ZJxhqByfFZDwSwpZGzJU671HkwpWaNe2t4VUPI= 1018 go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= 1019 go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= 1020 go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= ··· 1024 go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= 1025 go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= 1026 go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= 1027 - go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4= 1028 - go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE= 1029 go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= 1030 go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= 1031 go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= ··· 1033 go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= 1034 go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= 1035 go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= 1036 go4.org v0.0.0-20260112195520-a5071408f32f h1:ziUVAjmTPwQMBmYR1tbdRFJPtTcQUI12fH9QQjfb0Sw= 1037 go4.org v0.0.0-20260112195520-a5071408f32f/go.mod h1:ZRJnO5ZI4zAwMFp+dS1+V6J6MSyAowhRqAE+DPa1Xp0= 1038 golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
··· 243 github.com/bodgit/windows v1.0.1/go.mod h1:a6JLwrB4KrTR5hBpp8FI9/9W9jJfeQ2h4XDXU74ZCdM= 244 github.com/bradleyjkemp/cupaloy/v2 v2.8.0 h1:any4BmKE+jGIaMpnU8YgH/I2LPiLBufr6oMMlVBbn9M= 245 github.com/bradleyjkemp/cupaloy/v2 v2.8.0/go.mod h1:bm7JXdkRd4BHJk9HpwqAI8BoAY1lps46Enkdqw6aRX0= 246 + github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= 247 + github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= 248 github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= 249 github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= 250 github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= ··· 571 github.com/gpustack/gguf-parser-go v0.24.0/go.mod h1:y4TwTtDqFWTK+xvprOjRUh+dowgU2TKCX37vRKvGiZ0= 572 github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= 573 github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= 574 + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.8 h1:NpbJl/eVbvrGE0MJ6X16X9SAifesl6Fwxg/YmCvubRI= 575 + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.8/go.mod h1:mi7YA+gCzVem12exXy46ZespvGtX/lZmD/RLnQhVW7U= 576 github.com/hako/durafmt v0.0.0-20210608085754-5c1018a4e16b h1:wDUNC2eKiL35DbLvsDhiblTUXHxcOPwQSCzi7xpQUN4= 577 github.com/hako/durafmt v0.0.0-20210608085754-5c1018a4e16b/go.mod h1:VzxiSdG6j1pi7rwGm/xYI5RbtpBgM8sARDXlvEvxlu0= 578 github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.70 h1:0HADrxxqaQkGycO1JoUUA+B4FnIkuo8d2bz/hSaTFFQ= ··· 845 github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= 846 github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= 847 github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= 848 + github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws= 849 + github.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw= 850 github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= 851 github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= 852 github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= ··· 1009 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0/go.mod h1:c7hN3ddxs/z6q9xwvfLPk+UHlWRQyaeR1LdgfL/66l0= 1010 go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= 1011 go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= 1012 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 h1:QKdN8ly8zEMrByybbQgv8cWBcdAarwmIPZ6FThrWXJs= 1013 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0/go.mod h1:bTdK1nhqF76qiPoCCdyFIV+N/sRHYXYCTQc+3VCi3MI= 1014 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0 h1:wVZXIWjQSeSmMoxF74LzAnpVQOAFDo3pPji9Y4SOFKc= 1015 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0/go.mod h1:khvBS2IggMFNwZK/6lEeHg/W57h/IX6J4URh57fuI40= 1016 + go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.40.0 h1:ZrPRak/kS4xI3AVXy8F7pipuDXmDsrO8Lg+yQjBLjw0= 1017 + go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.40.0/go.mod h1:3y6kQCWztq6hyW8Z9YxQDDm0Je9AJoFar2G0yDcmhRk= 1018 go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= 1019 go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= 1020 go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= ··· 1024 go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= 1025 go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= 1026 go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= 1027 + go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A= 1028 + go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4= 1029 go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= 1030 go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= 1031 go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= ··· 1033 go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= 1034 go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= 1035 go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= 1036 + go.yaml.in/yaml/v4 v4.0.0-rc.4 h1:UP4+v6fFrBIb1l934bDl//mmnoIZEDK0idg1+AIvX5U= 1037 + go.yaml.in/yaml/v4 v4.0.0-rc.4/go.mod h1:aZqd9kCMsGL7AuUv/m/PvWLdg5sjJsZ4oHDEnfPPfY0= 1038 go4.org v0.0.0-20260112195520-a5071408f32f h1:ziUVAjmTPwQMBmYR1tbdRFJPtTcQUI12fH9QQjfb0Sw= 1039 go4.org v0.0.0-20260112195520-a5071408f32f/go.mod h1:ZRJnO5ZI4zAwMFp+dS1+V6J6MSyAowhRqAE+DPa1Xp0= 1040 golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+94 -57
scanner/internal/config/config.go
··· 1 - // Package config provides environment-based configuration for the scanner service. 2 package config 3 4 import ( 5 "fmt" 6 - "os" 7 - "strconv" 8 ) 9 10 - // Config holds all scanner configuration 11 type Config struct { 12 - // Addr is the HTTP address for the health endpoint 13 - Addr string 14 15 - // HoldURL is the WebSocket URL of the hold service 16 - HoldURL string 17 18 - // SharedSecret is the shared secret for scanner authentication 19 - SharedSecret string 20 21 - // Workers is the number of concurrent scan workers 22 - Workers int 23 24 - // QueueSize is the maximum priority queue depth 25 - QueueSize int 26 27 - // VulnEnabled enables Grype vulnerability scanning 28 - VulnEnabled bool 29 30 - // VulnDBPath is the directory for the Grype vulnerability database 31 - VulnDBPath string 32 33 - // TmpDir is the directory for temporary layer extraction 34 - TmpDir string 35 } 36 37 - // Load reads configuration from environment variables with SCANNER_ prefix 38 - func Load() (*Config, error) { 39 - cfg := &Config{ 40 - Addr: envOr("SCANNER_ADDR", ":9090"), 41 - HoldURL: os.Getenv("SCANNER_HOLD_URL"), 42 - SharedSecret: os.Getenv("SCANNER_SHARED_SECRET"), 43 - Workers: envIntOr("SCANNER_WORKERS", 2), 44 - QueueSize: envIntOr("SCANNER_QUEUE_SIZE", 100), 45 - VulnEnabled: envBoolOr("SCANNER_VULN_ENABLED", true), 46 - VulnDBPath: envOr("SCANNER_VULN_DB_PATH", "/var/lib/atcr-scanner/vulndb"), 47 - TmpDir: envOr("SCANNER_TMP_DIR", "/var/lib/atcr-scanner/tmp"), 48 - } 49 50 - if cfg.HoldURL == "" { 51 - return nil, fmt.Errorf("SCANNER_HOLD_URL is required") 52 - } 53 - if cfg.SharedSecret == "" { 54 - return nil, fmt.Errorf("SCANNER_SHARED_SECRET is required") 55 - } 56 57 - return cfg, nil 58 } 59 60 - func envOr(key, fallback string) string { 61 - if v := os.Getenv(key); v != "" { 62 - return v 63 - } 64 - return fallback 65 } 66 67 - func envIntOr(key string, fallback int) int { 68 - if v := os.Getenv(key); v != "" { 69 - if n, err := strconv.Atoi(v); err == nil { 70 - return n 71 - } 72 } 73 - return fallback 74 - } 75 76 - func envBoolOr(key string, fallback bool) bool { 77 - if v := os.Getenv(key); v != "" { 78 - if b, err := strconv.ParseBool(v); err == nil { 79 - return b 80 - } 81 } 82 - return fallback 83 }
··· 1 + // Package config provides Viper-based configuration for the scanner service. 2 package config 3 4 import ( 5 "fmt" 6 + 7 + "github.com/spf13/viper" 8 + 9 + "atcr.io/pkg/config" 10 ) 11 12 + // Config holds all scanner configuration. 13 type Config struct { 14 + Version string `yaml:"version" comment:"Configuration format version."` 15 + LogLevel string `yaml:"log_level" comment:"Log level: debug, info, warn, error."` 16 + LogShipper config.LogShipperConfig `yaml:"log_shipper" comment:"Remote log shipping settings."` 17 + Server ServerConfig `yaml:"server" comment:"Health endpoint settings."` 18 + Hold HoldConfig `yaml:"hold" comment:"Hold service connection settings."` 19 + Scanner ScannerConfig `yaml:"scanner" comment:"Worker pool settings."` 20 + Vuln VulnConfig `yaml:"vuln" comment:"Vulnerability scanning (Grype) settings."` 21 + } 22 23 + // ServerConfig defines the health endpoint settings. 24 + type ServerConfig struct { 25 + // Listen address for the health endpoint. 26 + Addr string `yaml:"addr" comment:"Listen address for the health endpoint, e.g. \":9090\"."` 27 + } 28 29 + // HoldConfig defines the hold service connection. 30 + type HoldConfig struct { 31 + // WebSocket URL of the hold service. 32 + URL string `yaml:"url" comment:"WebSocket URL of the hold service (REQUIRED), e.g. \"ws://localhost:8080\"."` 33 34 + // Shared secret for scanner authentication. 35 + Secret string `yaml:"secret" comment:"Shared secret for scanner WebSocket auth (REQUIRED)."` 36 + } 37 38 + // ScannerConfig defines worker pool settings. 39 + type ScannerConfig struct { 40 + // Number of concurrent scan workers. 41 + Workers int `yaml:"workers" comment:"Number of concurrent scan workers."` 42 43 + // Maximum priority queue depth. 44 + QueueSize int `yaml:"queue_size" comment:"Maximum priority queue depth."` 45 + } 46 47 + // VulnConfig defines vulnerability scanning settings. 48 + type VulnConfig struct { 49 + // Enable Grype vulnerability scanning. 50 + Enabled bool `yaml:"enabled" comment:"Enable Grype vulnerability scanning."` 51 52 + // Directory for the Grype vulnerability database. 53 + DBPath string `yaml:"db_path" comment:"Directory for the Grype vulnerability database."` 54 + 55 + // Directory for temporary layer extraction. 56 + TmpDir string `yaml:"tmp_dir" comment:"Directory for temporary layer extraction."` 57 } 58 59 + // setScannerDefaults registers all default values on the given Viper instance. 60 + func setScannerDefaults(v *viper.Viper) { 61 + v.SetDefault("version", "0.1") 62 + v.SetDefault("log_level", "info") 63 + 64 + // Server defaults 65 + v.SetDefault("server.addr", ":9090") 66 + 67 + // Hold defaults 68 + v.SetDefault("hold.url", "") 69 + v.SetDefault("hold.secret", "") 70 + 71 + // Scanner defaults 72 + v.SetDefault("scanner.workers", 2) 73 + v.SetDefault("scanner.queue_size", 100) 74 75 + // Vuln defaults 76 + v.SetDefault("vuln.enabled", true) 77 + v.SetDefault("vuln.db_path", "/var/lib/atcr-scanner/vulndb") 78 + v.SetDefault("vuln.tmp_dir", "/var/lib/atcr-scanner/tmp") 79 80 + // Log shipper defaults 81 + v.SetDefault("log_shipper.batch_size", 100) 82 + v.SetDefault("log_shipper.flush_interval", "5s") 83 + } 84 + 85 + // DefaultConfig returns a Config populated with all default values (no validation). 86 + func DefaultConfig() *Config { 87 + v := config.NewViper("SCANNER", "") 88 + setScannerDefaults(v) 89 + 90 + cfg := &Config{} 91 + _ = v.Unmarshal(cfg, config.UnmarshalOption()) 92 + return cfg 93 } 94 95 + // ExampleYAML returns a fully-commented YAML configuration with default values. 96 + func ExampleYAML() ([]byte, error) { 97 + return config.MarshalCommentedYAML("ATCR Scanner Configuration", DefaultConfig()) 98 } 99 100 + // LoadConfig builds a complete configuration using Viper layered loading: 101 + // defaults -> YAML file -> environment variables. 102 + // yamlPath is optional; empty string means env-only (backward compatible). 103 + func LoadConfig(yamlPath string) (*Config, error) { 104 + v := config.NewViper("SCANNER", yamlPath) 105 + setScannerDefaults(v) 106 + 107 + cfg := &Config{} 108 + if err := v.Unmarshal(cfg, config.UnmarshalOption()); err != nil { 109 + return nil, fmt.Errorf("failed to unmarshal config: %w", err) 110 } 111 112 + if cfg.Hold.URL == "" { 113 + return nil, fmt.Errorf("hold.url is required (env: SCANNER_HOLD_URL)") 114 + } 115 + if cfg.Hold.Secret == "" { 116 + return nil, fmt.Errorf("hold.secret is required (env: SCANNER_HOLD_SECRET)") 117 } 118 + 119 + return cfg, nil 120 }
+8 -8
scanner/internal/scan/worker.go
··· 36 // Start launches worker goroutines 37 func (wp *WorkerPool) Start(ctx context.Context) { 38 // Initialize vuln database on startup if enabled 39 - if wp.cfg.VulnEnabled { 40 go func() { 41 - if err := initializeVulnDatabase(wp.cfg.VulnDBPath, wp.cfg.TmpDir); err != nil { 42 slog.Error("Failed to initialize vulnerability database", "error", err) 43 slog.Warn("Vulnerability scanning will be disabled until database is available") 44 } 45 }() 46 } 47 48 - for i := 0; i < wp.cfg.Workers; i++ { 49 wp.wg.Add(1) 50 go wp.worker(ctx, i) 51 } 52 53 - slog.Info("Scanner worker pool started", "workers", wp.cfg.Workers) 54 } 55 56 // Wait blocks until all workers finish ··· 100 startTime := time.Now() 101 102 // Ensure tmp dir exists 103 - if err := ensureDir(wp.cfg.TmpDir); err != nil { 104 return nil, fmt.Errorf("failed to create tmp dir: %w", err) 105 } 106 107 // Step 1: Extract image layers from hold via presigned URLs 108 slog.Info("Extracting image layers", "repository", job.Repository) 109 - imageDir, cleanup, err := extractLayers(job, wp.cfg.TmpDir) 110 if err != nil { 111 return nil, fmt.Errorf("failed to extract layers: %w", err) 112 } ··· 126 } 127 128 // Step 3: Scan SBOM with Grype (if enabled) 129 - if wp.cfg.VulnEnabled { 130 slog.Info("Scanning for vulnerabilities", "repository", job.Repository) 131 - vulnJSON, vulnDigest, summary, err := scanVulnerabilities(ctx, sbomResult, wp.cfg.VulnDBPath) 132 if err != nil { 133 return nil, fmt.Errorf("failed to scan vulnerabilities: %w", err) 134 }
··· 36 // Start launches worker goroutines 37 func (wp *WorkerPool) Start(ctx context.Context) { 38 // Initialize vuln database on startup if enabled 39 + if wp.cfg.Vuln.Enabled { 40 go func() { 41 + if err := initializeVulnDatabase(wp.cfg.Vuln.DBPath, wp.cfg.Vuln.TmpDir); err != nil { 42 slog.Error("Failed to initialize vulnerability database", "error", err) 43 slog.Warn("Vulnerability scanning will be disabled until database is available") 44 } 45 }() 46 } 47 48 + for i := 0; i < wp.cfg.Scanner.Workers; i++ { 49 wp.wg.Add(1) 50 go wp.worker(ctx, i) 51 } 52 53 + slog.Info("Scanner worker pool started", "workers", wp.cfg.Scanner.Workers) 54 } 55 56 // Wait blocks until all workers finish ··· 100 startTime := time.Now() 101 102 // Ensure tmp dir exists 103 + if err := ensureDir(wp.cfg.Vuln.TmpDir); err != nil { 104 return nil, fmt.Errorf("failed to create tmp dir: %w", err) 105 } 106 107 // Step 1: Extract image layers from hold via presigned URLs 108 slog.Info("Extracting image layers", "repository", job.Repository) 109 + imageDir, cleanup, err := extractLayers(job, wp.cfg.Vuln.TmpDir) 110 if err != nil { 111 return nil, fmt.Errorf("failed to extract layers: %w", err) 112 } ··· 126 } 127 128 // Step 3: Scan SBOM with Grype (if enabled) 129 + if wp.cfg.Vuln.Enabled { 130 slog.Info("Scanning for vulnerabilities", "repository", job.Repository) 131 + vulnJSON, vulnDigest, summary, err := scanVulnerabilities(ctx, sbomResult, wp.cfg.Vuln.DBPath) 132 if err != nil { 133 return nil, fmt.Errorf("failed to scan vulnerabilities: %w", err) 134 }