Monorepo for Tangled
at sl/spindle-rewrite 436 lines 11 kB view raw
1package nixery 2 3import ( 4 "context" 5 "errors" 6 "fmt" 7 "io" 8 "log/slog" 9 "os" 10 "path" 11 "runtime" 12 "sync" 13 "time" 14 15 "github.com/docker/docker/api/types/container" 16 "github.com/docker/docker/api/types/image" 17 "github.com/docker/docker/api/types/mount" 18 "github.com/docker/docker/api/types/network" 19 "github.com/docker/docker/client" 20 "github.com/docker/docker/pkg/stdcopy" 21 "gopkg.in/yaml.v3" 22 "tangled.org/core/api/tangled" 23 "tangled.org/core/log" 24 "tangled.org/core/spindle/config" 25 "tangled.org/core/spindle/engine" 26 "tangled.org/core/spindle/models" 27 "tangled.org/core/spindle/secrets" 28) 29 30const ( 31 workspaceDir = "/tangled/workspace" 32 homeDir = "/tangled/home" 33) 34 35type cleanupFunc func(context.Context) error 36 37type Engine struct { 38 docker client.APIClient 39 l *slog.Logger 40 cfg *config.Config 41 42 cleanupMu sync.Mutex 43 cleanup map[string][]cleanupFunc 44} 45 46type Step struct { 47 name string 48 kind models.StepKind 49 command string 50 environment map[string]string 51} 52 53func (s Step) Name() string { 54 return s.name 55} 56 57func (s Step) Command() string { 58 return s.command 59} 60 61func (s Step) Kind() models.StepKind { 62 return s.kind 63} 64 65// setupSteps get added to start of Steps 66type setupSteps []models.Step 67 68// addStep adds a step to the beginning of the workflow's steps. 69func (ss *setupSteps) addStep(step models.Step) { 70 *ss = append(*ss, step) 71} 72 73type addlFields struct { 74 image string 75 container string 76} 77 78func (e *Engine) InitWorkflow(twf tangled.Pipeline_Workflow, tpl tangled.Pipeline) (*models.Workflow, error) { 79 swf := &models.Workflow{} 80 addl := addlFields{} 81 82 dwf := &struct { 83 Steps []struct { 84 Command string `yaml:"command"` 85 Name string `yaml:"name"` 86 Environment map[string]string `yaml:"environment"` 87 } `yaml:"steps"` 88 Dependencies map[string][]string `yaml:"dependencies"` 89 Environment map[string]string `yaml:"environment"` 90 }{} 91 err := yaml.Unmarshal([]byte(twf.Raw), &dwf) 92 if err != nil { 93 return nil, err 94 } 95 96 for _, dstep := range dwf.Steps { 97 sstep := Step{} 98 sstep.environment = dstep.Environment 99 sstep.command = dstep.Command 100 sstep.name = dstep.Name 101 sstep.kind = models.StepKindUser 102 swf.Steps = append(swf.Steps, sstep) 103 } 104 swf.Name = twf.Name 105 swf.Environment = dwf.Environment 106 addl.image = workflowImage(dwf.Dependencies, e.cfg.NixeryPipelines.Nixery) 107 108 setup := &setupSteps{} 109 110 setup.addStep(nixConfStep()) 111 setup.addStep(models.BuildCloneStep(twf, *tpl.TriggerMetadata, e.cfg.Server.Dev)) 112 // this step could be empty 113 if s := dependencyStep(dwf.Dependencies); s != nil { 114 setup.addStep(*s) 115 } 116 117 // append setup steps in order to the start of workflow steps 118 swf.Steps = append(*setup, swf.Steps...) 119 swf.Data = addl 120 121 return swf, nil 122} 123 124func (e *Engine) WorkflowTimeout() time.Duration { 125 workflowTimeoutStr := e.cfg.NixeryPipelines.WorkflowTimeout 126 workflowTimeout, err := time.ParseDuration(workflowTimeoutStr) 127 if err != nil { 128 e.l.Error("failed to parse workflow timeout", "error", err, "timeout", workflowTimeoutStr) 129 workflowTimeout = 5 * time.Minute 130 } 131 132 return workflowTimeout 133} 134 135func workflowImage(deps map[string][]string, nixery string) string { 136 var dependencies string 137 for reg, ds := range deps { 138 if reg == "nixpkgs" { 139 dependencies = path.Join(ds...) 140 } 141 } 142 143 // load defaults from somewhere else 144 dependencies = path.Join(dependencies, "bash", "git", "coreutils", "nix") 145 146 if runtime.GOARCH == "arm64" { 147 dependencies = path.Join("arm64", dependencies) 148 } 149 150 return path.Join(nixery, dependencies) 151} 152 153func New(ctx context.Context, cfg *config.Config) (*Engine, error) { 154 dcli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) 155 if err != nil { 156 return nil, err 157 } 158 159 l := log.FromContext(ctx).With("component", "spindle") 160 161 e := &Engine{ 162 docker: dcli, 163 l: l, 164 cfg: cfg, 165 } 166 167 e.cleanup = make(map[string][]cleanupFunc) 168 169 return e, nil 170} 171 172func (e *Engine) SetupWorkflow(ctx context.Context, wid models.WorkflowId, wf *models.Workflow) error { 173 e.l.Info("setting up workflow", "workflow", wid) 174 175 _, err := e.docker.NetworkCreate(ctx, networkName(wid), network.CreateOptions{ 176 Driver: "bridge", 177 }) 178 if err != nil { 179 return err 180 } 181 e.registerCleanup(wid, func(ctx context.Context) error { 182 err := e.docker.NetworkRemove(ctx, networkName(wid)) 183 if err != nil { 184 return fmt.Errorf("removing network: %w", err) 185 } 186 return nil 187 }) 188 189 addl := wf.Data.(addlFields) 190 191 reader, err := e.docker.ImagePull(ctx, addl.image, image.PullOptions{}) 192 if err != nil { 193 e.l.Error("pipeline image pull failed!", "image", addl.image, "workflowId", wid, "error", err.Error()) 194 195 return fmt.Errorf("pulling image: %w", err) 196 } 197 defer reader.Close() 198 io.Copy(os.Stdout, reader) 199 200 resp, err := e.docker.ContainerCreate(ctx, &container.Config{ 201 Image: addl.image, 202 Cmd: []string{"cat"}, 203 OpenStdin: true, // so cat stays alive :3 204 Tty: false, 205 Hostname: "spindle", 206 WorkingDir: workspaceDir, 207 Labels: map[string]string{ 208 "sh.tangled.pipeline/workflow_id": wid.String(), 209 }, 210 // TODO(winter): investigate whether environment variables passed here 211 // get propagated to ContainerExec processes 212 }, &container.HostConfig{ 213 Mounts: []mount.Mount{ 214 { 215 Type: mount.TypeTmpfs, 216 Target: "/tmp", 217 ReadOnly: false, 218 TmpfsOptions: &mount.TmpfsOptions{ 219 Mode: 0o1777, // world-writeable sticky bit 220 Options: [][]string{ 221 {"exec"}, 222 }, 223 }, 224 }, 225 }, 226 ReadonlyRootfs: false, 227 CapDrop: []string{"ALL"}, 228 CapAdd: []string{"CAP_DAC_OVERRIDE", "CAP_CHOWN", "CAP_FOWNER", "CAP_SETUID", "CAP_SETGID"}, 229 SecurityOpt: []string{"no-new-privileges"}, 230 ExtraHosts: []string{"host.docker.internal:host-gateway"}, 231 }, nil, nil, "") 232 if err != nil { 233 return fmt.Errorf("creating container: %w", err) 234 } 235 e.registerCleanup(wid, func(ctx context.Context) error { 236 err := e.docker.ContainerStop(ctx, resp.ID, container.StopOptions{}) 237 if err != nil { 238 return fmt.Errorf("stopping container: %w", err) 239 } 240 241 err = e.docker.ContainerRemove(ctx, resp.ID, container.RemoveOptions{ 242 RemoveVolumes: true, 243 RemoveLinks: false, 244 Force: false, 245 }) 246 if err != nil { 247 return fmt.Errorf("removing container: %w", err) 248 } 249 return nil 250 }) 251 252 err = e.docker.ContainerStart(ctx, resp.ID, container.StartOptions{}) 253 if err != nil { 254 return fmt.Errorf("starting container: %w", err) 255 } 256 257 mkExecResp, err := e.docker.ContainerExecCreate(ctx, resp.ID, container.ExecOptions{ 258 Cmd: []string{"mkdir", "-p", workspaceDir, homeDir}, 259 AttachStdout: true, // NOTE(winter): pretty sure this will make it so that when stdout read is done below, mkdir is done. maybe?? 260 AttachStderr: true, // for good measure, backed up by docker/cli ("If -d is not set, attach to everything by default") 261 }) 262 if err != nil { 263 return err 264 } 265 266 // This actually *starts* the command. Thanks, Docker! 267 execResp, err := e.docker.ContainerExecAttach(ctx, mkExecResp.ID, container.ExecAttachOptions{}) 268 if err != nil { 269 return err 270 } 271 defer execResp.Close() 272 273 // This is apparently best way to wait for the command to complete. 274 _, err = io.ReadAll(execResp.Reader) 275 if err != nil { 276 return err 277 } 278 279 execInspectResp, err := e.docker.ContainerExecInspect(ctx, mkExecResp.ID) 280 if err != nil { 281 return err 282 } 283 284 if execInspectResp.ExitCode != 0 { 285 return fmt.Errorf("mkdir exited with exit code %d", execInspectResp.ExitCode) 286 } else if execInspectResp.Running { 287 return errors.New("mkdir is somehow still running??") 288 } 289 290 addl.container = resp.ID 291 wf.Data = addl 292 293 return nil 294} 295 296func (e *Engine) RunStep(ctx context.Context, wid models.WorkflowId, w *models.Workflow, idx int, secrets []secrets.UnlockedSecret, wfLogger *models.WorkflowLogger) error { 297 addl := w.Data.(addlFields) 298 workflowEnvs := ConstructEnvs(w.Environment) 299 // TODO(winter): should SetupWorkflow also have secret access? 300 // IMO yes, but probably worth thinking on. 301 for _, s := range secrets { 302 workflowEnvs.AddEnv(s.Key, s.Value) 303 } 304 305 step := w.Steps[idx] 306 307 select { 308 case <-ctx.Done(): 309 return ctx.Err() 310 default: 311 } 312 313 envs := append(EnvVars(nil), workflowEnvs...) 314 if nixStep, ok := step.(Step); ok { 315 for k, v := range nixStep.environment { 316 envs.AddEnv(k, v) 317 } 318 } 319 envs.AddEnv("HOME", homeDir) 320 321 mkExecResp, err := e.docker.ContainerExecCreate(ctx, addl.container, container.ExecOptions{ 322 Cmd: []string{"bash", "-c", step.Command()}, 323 AttachStdout: true, 324 AttachStderr: true, 325 Env: envs, 326 }) 327 if err != nil { 328 return fmt.Errorf("creating exec: %w", err) 329 } 330 331 // start tailing logs in background 332 tailDone := make(chan error, 1) 333 go func() { 334 tailDone <- e.tailStep(ctx, wfLogger, mkExecResp.ID, wid, idx, step) 335 }() 336 337 select { 338 case <-tailDone: 339 340 case <-ctx.Done(): 341 // cleanup will be handled by DestroyWorkflow, since 342 // Docker doesn't provide an API to kill an exec run 343 // (sure, we could grab the PID and kill it ourselves, 344 // but that's wasted effort) 345 e.l.Warn("step timed out", "step", step.Name()) 346 347 <-tailDone 348 349 return engine.ErrTimedOut 350 } 351 352 select { 353 case <-ctx.Done(): 354 return ctx.Err() 355 default: 356 } 357 358 execInspectResp, err := e.docker.ContainerExecInspect(ctx, mkExecResp.ID) 359 if err != nil { 360 return err 361 } 362 363 if execInspectResp.ExitCode != 0 { 364 inspectResp, err := e.docker.ContainerInspect(ctx, addl.container) 365 if err != nil { 366 return err 367 } 368 369 e.l.Error("workflow failed!", "workflow_id", wid.String(), "exit_code", execInspectResp.ExitCode, "oom_killed", inspectResp.State.OOMKilled) 370 371 if inspectResp.State.OOMKilled { 372 return ErrOOMKilled 373 } 374 return engine.ErrWorkflowFailed 375 } 376 377 return nil 378} 379 380func (e *Engine) tailStep(ctx context.Context, wfLogger *models.WorkflowLogger, execID string, wid models.WorkflowId, stepIdx int, step models.Step) error { 381 if wfLogger == nil { 382 return nil 383 } 384 385 // This actually *starts* the command. Thanks, Docker! 386 logs, err := e.docker.ContainerExecAttach(ctx, execID, container.ExecAttachOptions{}) 387 if err != nil { 388 return err 389 } 390 defer logs.Close() 391 392 _, err = stdcopy.StdCopy( 393 wfLogger.DataWriter(stepIdx, "stdout"), 394 wfLogger.DataWriter(stepIdx, "stderr"), 395 logs.Reader, 396 ) 397 if err != nil && err != io.EOF && !errors.Is(err, context.DeadlineExceeded) { 398 return fmt.Errorf("failed to copy logs: %w", err) 399 } 400 401 return nil 402} 403 404func (e *Engine) DestroyWorkflow(ctx context.Context, wid models.WorkflowId) error { 405 fns := e.drainCleanups(wid) 406 407 for _, fn := range fns { 408 if err := fn(ctx); err != nil { 409 e.l.Error("failed to cleanup workflow resource", "workflowId", wid, "error", err) 410 } 411 } 412 return nil 413} 414 415func (e *Engine) registerCleanup(wid models.WorkflowId, fn cleanupFunc) { 416 e.cleanupMu.Lock() 417 defer e.cleanupMu.Unlock() 418 419 key := wid.String() 420 e.cleanup[key] = append(e.cleanup[key], fn) 421} 422 423func (e *Engine) drainCleanups(wid models.WorkflowId) []cleanupFunc { 424 e.cleanupMu.Lock() 425 key := wid.String() 426 427 fns := e.cleanup[key] 428 delete(e.cleanup, key) 429 e.cleanupMu.Unlock() 430 431 return fns 432} 433 434func networkName(wid models.WorkflowId) string { 435 return fmt.Sprintf("workflow-network-%s", wid) 436}