tangled
alpha
login
or
join now
evan.jarrett.net
/
core
forked from
tangled.org/core
0
fork
atom
Monorepo for Tangled — https://tangled.org
0
fork
atom
overview
issues
pulls
pipelines
POC buildah engine
evan.jarrett.net
4 months ago
61256d17
1b21764c
verified
This commit was signed with the committer's
known signature
.
evan.jarrett.net
SSH Key Fingerprint:
SHA256:bznk0uVPp7XFOl67P0uTM1pCjf2A4ojeP/lsUE7uauQ=
+659
-3
7 changed files
expand all
collapse all
unified
split
.tangled
workflows
docker-build-example.yml
Dockerfile.example
spindle
config
config.go
engines
buildah
build.go
engine.go
errors.go
server.go
+38
.tangled/workflows/docker-build-example.yml
···
1
1
+
when:
2
2
+
- event: ["push"]
3
3
+
branch: master
4
4
+
5
5
+
engine: buildah
6
6
+
7
7
+
build:
8
8
+
# Path to Dockerfile relative to repository root
9
9
+
dockerfile: "./Dockerfile"
10
10
+
11
11
+
# Build context directory
12
12
+
context: "."
13
13
+
14
14
+
# Optional: specify a target for multi-stage builds
15
15
+
# target: "production"
16
16
+
17
17
+
# Optional: build arguments
18
18
+
build_args:
19
19
+
VERSION: "1.0.0"
20
20
+
GO_VERSION: "1.21"
21
21
+
22
22
+
# Image destinations (will be tagged and pushed)
23
23
+
destination:
24
24
+
- "docker.io/myorg/tangled-app:latest"
25
25
+
- "docker.io/myorg/tangled-app:${GIT_SHA}"
26
26
+
27
27
+
# Registry credentials from Spindle secrets
28
28
+
registry_credentials:
29
29
+
username_secret: "DOCKER_USERNAME"
30
30
+
password_secret: "DOCKER_PASSWORD"
31
31
+
registry: "docker.io"
32
32
+
33
33
+
# Optional: additional steps to run after the build
34
34
+
# steps:
35
35
+
# - name: "Verify image"
36
36
+
# command: |
37
37
+
# echo "Image built successfully!"
38
38
+
# buildah images
+38
Dockerfile.example
···
1
1
+
# Example Dockerfile for building a Go application with the buildah engine
2
2
+
# This is just an example - adjust based on your needs
3
3
+
4
4
+
# Build stage
5
5
+
FROM golang:1.21 AS builder
6
6
+
7
7
+
WORKDIR /app
8
8
+
9
9
+
# Copy go mod files
10
10
+
COPY go.mod go.sum ./
11
11
+
RUN go mod download
12
12
+
13
13
+
# Copy source code
14
14
+
COPY . .
15
15
+
16
16
+
# Build the applications
17
17
+
RUN mkdir -p appview/pages/static && touch appview/pages/static/x
18
18
+
RUN CGO_ENABLED=1 go build -o /app/appview.out ./cmd/appview
19
19
+
RUN CGO_ENABLED=1 go build -o /app/knot.out ./cmd/knot
20
20
+
RUN CGO_ENABLED=1 go build -o /app/spindle.out ./cmd/spindle
21
21
+
22
22
+
# Runtime stage
23
23
+
FROM debian:bookworm-slim
24
24
+
25
25
+
# Install runtime dependencies
26
26
+
RUN apt-get update && apt-get install -y \
27
27
+
ca-certificates \
28
28
+
&& rm -rf /var/lib/apt/lists/*
29
29
+
30
30
+
WORKDIR /app
31
31
+
32
32
+
# Copy binaries from builder
33
33
+
COPY --from=builder /app/appview.out /app/appview
34
34
+
COPY --from=builder /app/knot.out /app/knot
35
35
+
COPY --from=builder /app/spindle.out /app/spindle
36
36
+
37
37
+
# Default command
38
38
+
CMD ["/app/appview"]
+8
-2
spindle/config/config.go
···
40
40
WorkflowTimeout string `env:"WORKFLOW_TIMEOUT, default=5m"`
41
41
}
42
42
43
43
+
type BuildahPipelines struct {
44
44
+
Image string `env:"IMAGE, default=quay.io/buildah/stable:latest"`
45
45
+
WorkflowTimeout string `env:"WORKFLOW_TIMEOUT, default=15m"`
46
46
+
}
47
47
+
43
48
type Config struct {
44
44
-
Server Server `env:",prefix=SPINDLE_SERVER_"`
45
45
-
NixeryPipelines NixeryPipelines `env:",prefix=SPINDLE_NIXERY_PIPELINES_"`
49
49
+
Server Server `env:",prefix=SPINDLE_SERVER_"`
50
50
+
NixeryPipelines NixeryPipelines `env:",prefix=SPINDLE_NIXERY_PIPELINES_"`
51
51
+
BuildahPipelines BuildahPipelines `env:",prefix=SPINDLE_BUILDAH_PIPELINES_"`
46
52
}
47
53
48
54
func Load(ctx context.Context) (*Config, error) {
+127
spindle/engines/buildah/build.go
···
1
1
+
package buildah
2
2
+
3
3
+
import (
4
4
+
"context"
5
5
+
"fmt"
6
6
+
"io"
7
7
+
"path"
8
8
+
"strings"
9
9
+
10
10
+
"github.com/docker/docker/api/types/container"
11
11
+
"github.com/docker/docker/api/types/image"
12
12
+
"github.com/docker/docker/api/types/mount"
13
13
+
"tangled.org/core/api/tangled"
14
14
+
"tangled.org/core/spindle/models"
15
15
+
"tangled.org/core/workflow"
16
16
+
)
17
17
+
18
18
+
// populateBuildContext clones the repository into the build context volume
19
19
+
func (e *Engine) populateBuildContext(ctx context.Context, wid models.WorkflowId, volumeName string, twf tangled.Pipeline_Workflow, tr tangled.Pipeline_TriggerMetadata) error {
20
20
+
// Build git clone commands
21
21
+
var commands []string
22
22
+
23
23
+
// Initialize git repo in workspace
24
24
+
commands = append(commands, "git init")
25
25
+
26
26
+
// Add repo as git remote
27
27
+
scheme := "https://"
28
28
+
if e.cfg.Server.Dev {
29
29
+
scheme = "http://"
30
30
+
tr.Repo.Knot = strings.ReplaceAll(tr.Repo.Knot, "localhost", "host.docker.internal")
31
31
+
}
32
32
+
url := scheme + path.Join(tr.Repo.Knot, tr.Repo.Did, tr.Repo.Repo)
33
33
+
commands = append(commands, fmt.Sprintf("git remote add origin %s", url))
34
34
+
35
35
+
// Run git fetch
36
36
+
var fetchArgs []string
37
37
+
38
38
+
// Default clone depth is 1
39
39
+
depth := 1
40
40
+
if twf.Clone.Depth > 1 {
41
41
+
depth = int(twf.Clone.Depth)
42
42
+
}
43
43
+
fetchArgs = append(fetchArgs, fmt.Sprintf("--depth=%d", depth))
44
44
+
45
45
+
// Optionally recurse submodules
46
46
+
if twf.Clone.Submodules {
47
47
+
fetchArgs = append(fetchArgs, "--recurse-submodules=yes")
48
48
+
}
49
49
+
50
50
+
// Set remote to fetch from
51
51
+
fetchArgs = append(fetchArgs, "origin")
52
52
+
53
53
+
// Set revision to checkout based on trigger type
54
54
+
switch workflow.TriggerKind(tr.Kind) {
55
55
+
case workflow.TriggerKindManual:
56
56
+
// TODO: unimplemented
57
57
+
case workflow.TriggerKindPush:
58
58
+
fetchArgs = append(fetchArgs, tr.Push.NewSha)
59
59
+
case workflow.TriggerKindPullRequest:
60
60
+
fetchArgs = append(fetchArgs, tr.PullRequest.SourceSha)
61
61
+
}
62
62
+
63
63
+
commands = append(commands, fmt.Sprintf("git fetch %s", strings.Join(fetchArgs, " ")))
64
64
+
65
65
+
// Run git checkout
66
66
+
commands = append(commands, "git checkout FETCH_HEAD")
67
67
+
68
68
+
script := strings.Join(commands, "\n")
69
69
+
70
70
+
// Use alpine/git image to clone the repository
71
71
+
gitImage := "alpine/git:latest"
72
72
+
73
73
+
// Pull git image
74
74
+
reader, err := e.docker.ImagePull(ctx, gitImage, image.PullOptions{})
75
75
+
if err != nil {
76
76
+
return fmt.Errorf("pulling git image: %w", err)
77
77
+
}
78
78
+
defer reader.Close()
79
79
+
80
80
+
// Consume the pull output
81
81
+
io.Copy(io.Discard, reader)
82
82
+
83
83
+
// Create container to clone repo
84
84
+
resp, err := e.docker.ContainerCreate(ctx, &container.Config{
85
85
+
Image: gitImage,
86
86
+
Cmd: []string{"sh", "-c", script},
87
87
+
WorkingDir: workspaceDir,
88
88
+
}, &container.HostConfig{
89
89
+
Mounts: []mount.Mount{
90
90
+
{
91
91
+
Type: mount.TypeVolume,
92
92
+
Source: volumeName,
93
93
+
Target: workspaceDir,
94
94
+
},
95
95
+
},
96
96
+
}, nil, nil, "")
97
97
+
if err != nil {
98
98
+
return fmt.Errorf("creating git clone container: %w", err)
99
99
+
}
100
100
+
101
101
+
// Ensure cleanup
102
102
+
defer func() {
103
103
+
e.docker.ContainerRemove(ctx, resp.ID, container.RemoveOptions{Force: true})
104
104
+
}()
105
105
+
106
106
+
// Start container
107
107
+
err = e.docker.ContainerStart(ctx, resp.ID, container.StartOptions{})
108
108
+
if err != nil {
109
109
+
return fmt.Errorf("starting git clone container: %w", err)
110
110
+
}
111
111
+
112
112
+
// Wait for completion
113
113
+
statusCh, errCh := e.docker.ContainerWait(ctx, resp.ID, container.WaitConditionNotRunning)
114
114
+
select {
115
115
+
case err := <-errCh:
116
116
+
return fmt.Errorf("git clone error: %w", err)
117
117
+
case status := <-statusCh:
118
118
+
if status.StatusCode != 0 {
119
119
+
e.l.Error("git clone failed", "workflow_id", wid, "exit_code", status.StatusCode)
120
120
+
return fmt.Errorf("git clone failed with exit code %d", status.StatusCode)
121
121
+
}
122
122
+
case <-ctx.Done():
123
123
+
return ctx.Err()
124
124
+
}
125
125
+
126
126
+
return nil
127
127
+
}
+430
spindle/engines/buildah/engine.go
···
1
1
+
package buildah
2
2
+
3
3
+
import (
4
4
+
"context"
5
5
+
"fmt"
6
6
+
"io"
7
7
+
"log/slog"
8
8
+
"os"
9
9
+
"path"
10
10
+
"strings"
11
11
+
"sync"
12
12
+
"time"
13
13
+
14
14
+
"github.com/docker/docker/api/types/container"
15
15
+
"github.com/docker/docker/api/types/image"
16
16
+
"github.com/docker/docker/api/types/mount"
17
17
+
"github.com/docker/docker/api/types/volume"
18
18
+
"github.com/docker/docker/client"
19
19
+
"github.com/docker/docker/pkg/stdcopy"
20
20
+
"gopkg.in/yaml.v3"
21
21
+
"tangled.org/core/api/tangled"
22
22
+
"tangled.org/core/log"
23
23
+
"tangled.org/core/spindle/config"
24
24
+
"tangled.org/core/spindle/engine"
25
25
+
"tangled.org/core/spindle/models"
26
26
+
"tangled.org/core/spindle/secrets"
27
27
+
)
28
28
+
29
29
+
const (
30
30
+
workspaceDir = "/workspace"
31
31
+
homeDir = "/root"
32
32
+
)
33
33
+
34
34
+
type cleanupFunc func(context.Context) error
35
35
+
36
36
+
type Engine struct {
37
37
+
docker client.APIClient
38
38
+
l *slog.Logger
39
39
+
cfg *config.Config
40
40
+
41
41
+
cleanupMu sync.Mutex
42
42
+
cleanup map[string][]cleanupFunc
43
43
+
}
44
44
+
45
45
+
type BuildConfig struct {
46
46
+
Dockerfile string `yaml:"dockerfile"`
47
47
+
Context string `yaml:"context"`
48
48
+
Target string `yaml:"target"`
49
49
+
BuildArgs map[string]string `yaml:"build_args"`
50
50
+
Destination []string `yaml:"destination"`
51
51
+
RegistryCredentials struct {
52
52
+
UsernameSecret string `yaml:"username_secret"`
53
53
+
PasswordSecret string `yaml:"password_secret"`
54
54
+
Registry string `yaml:"registry"`
55
55
+
} `yaml:"registry_credentials"`
56
56
+
}
57
57
+
58
58
+
type Step struct {
59
59
+
name string
60
60
+
kind models.StepKind
61
61
+
command string
62
62
+
}
63
63
+
64
64
+
func (s Step) Name() string {
65
65
+
return s.name
66
66
+
}
67
67
+
68
68
+
func (s Step) Command() string {
69
69
+
return s.command
70
70
+
}
71
71
+
72
72
+
func (s Step) Kind() models.StepKind {
73
73
+
return s.kind
74
74
+
}
75
75
+
76
76
+
type addlFields struct {
77
77
+
volumeName string
78
78
+
buildCfg BuildConfig
79
79
+
workflow tangled.Pipeline_Workflow
80
80
+
triggerMetadata tangled.Pipeline_TriggerMetadata
81
81
+
}
82
82
+
83
83
+
func New(ctx context.Context, cfg *config.Config) (*Engine, error) {
84
84
+
dcli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
85
85
+
if err != nil {
86
86
+
return nil, err
87
87
+
}
88
88
+
89
89
+
l := log.FromContext(ctx).With("component", "buildah-engine")
90
90
+
91
91
+
e := &Engine{
92
92
+
docker: dcli,
93
93
+
l: l,
94
94
+
cfg: cfg,
95
95
+
}
96
96
+
97
97
+
e.cleanup = make(map[string][]cleanupFunc)
98
98
+
99
99
+
return e, nil
100
100
+
}
101
101
+
102
102
+
func (e *Engine) InitWorkflow(twf tangled.Pipeline_Workflow, tpl tangled.Pipeline) (*models.Workflow, error) {
103
103
+
swf := &models.Workflow{}
104
104
+
addl := addlFields{}
105
105
+
106
106
+
dwf := &struct {
107
107
+
Build BuildConfig `yaml:"build"`
108
108
+
Steps []struct {
109
109
+
Command string `yaml:"command"`
110
110
+
Name string `yaml:"name"`
111
111
+
} `yaml:"steps"`
112
112
+
}{}
113
113
+
114
114
+
err := yaml.Unmarshal([]byte(twf.Raw), &dwf)
115
115
+
if err != nil {
116
116
+
return nil, err
117
117
+
}
118
118
+
119
119
+
// Validate build configuration
120
120
+
if dwf.Build.Dockerfile == "" {
121
121
+
dwf.Build.Dockerfile = "Dockerfile"
122
122
+
}
123
123
+
if dwf.Build.Context == "" {
124
124
+
dwf.Build.Context = "."
125
125
+
}
126
126
+
if len(dwf.Build.Destination) == 0 {
127
127
+
return nil, fmt.Errorf("%w: at least one destination required", ErrInvalidConfig)
128
128
+
}
129
129
+
130
130
+
// Convert user steps
131
131
+
for _, dstep := range dwf.Steps {
132
132
+
sstep := Step{}
133
133
+
sstep.command = dstep.Command
134
134
+
sstep.name = dstep.Name
135
135
+
sstep.kind = models.StepKindUser
136
136
+
swf.Steps = append(swf.Steps, sstep)
137
137
+
}
138
138
+
139
139
+
swf.Name = twf.Name
140
140
+
addl.buildCfg = dwf.Build
141
141
+
addl.workflow = twf
142
142
+
if tpl.TriggerMetadata != nil {
143
143
+
addl.triggerMetadata = *tpl.TriggerMetadata
144
144
+
}
145
145
+
swf.Data = addl
146
146
+
147
147
+
return swf, nil
148
148
+
}
149
149
+
150
150
+
func (e *Engine) WorkflowTimeout() time.Duration {
151
151
+
workflowTimeoutStr := e.cfg.BuildahPipelines.WorkflowTimeout
152
152
+
workflowTimeout, err := time.ParseDuration(workflowTimeoutStr)
153
153
+
if err != nil {
154
154
+
e.l.Error("failed to parse workflow timeout", "error", err, "timeout", workflowTimeoutStr)
155
155
+
workflowTimeout = 15 * time.Minute
156
156
+
}
157
157
+
158
158
+
return workflowTimeout
159
159
+
}
160
160
+
161
161
+
func (e *Engine) SetupWorkflow(ctx context.Context, wid models.WorkflowId, wf *models.Workflow) error {
162
162
+
e.l.Info("setting up buildah workflow", "workflow", wid)
163
163
+
164
164
+
addl := wf.Data.(addlFields)
165
165
+
166
166
+
// Create Docker volume for build context
167
167
+
volumeName := fmt.Sprintf("buildah-context-%s", wid.String())
168
168
+
_, err := e.docker.VolumeCreate(ctx, volumeCreateOptions(volumeName))
169
169
+
if err != nil {
170
170
+
return fmt.Errorf("creating build context volume: %w", err)
171
171
+
}
172
172
+
e.registerCleanup(wid, func(ctx context.Context) error {
173
173
+
return e.docker.VolumeRemove(ctx, volumeName, true)
174
174
+
})
175
175
+
176
176
+
// Clone repository into volume using git container
177
177
+
if !addl.workflow.Clone.Skip {
178
178
+
err = e.populateBuildContext(ctx, wid, volumeName, addl.workflow, addl.triggerMetadata)
179
179
+
if err != nil {
180
180
+
return fmt.Errorf("populating build context: %w", err)
181
181
+
}
182
182
+
}
183
183
+
184
184
+
// Update workflow data with volume name
185
185
+
addl.volumeName = volumeName
186
186
+
wf.Data = addl
187
187
+
188
188
+
return nil
189
189
+
}
190
190
+
191
191
+
func (e *Engine) RunStep(ctx context.Context, wid models.WorkflowId, w *models.Workflow, idx int, secrets []secrets.UnlockedSecret, wfLogger *models.WorkflowLogger) error {
192
192
+
addl := w.Data.(addlFields)
193
193
+
194
194
+
// Check for timeout
195
195
+
select {
196
196
+
case <-ctx.Done():
197
197
+
return ctx.Err()
198
198
+
default:
199
199
+
}
200
200
+
201
201
+
// The main build step runs buildah
202
202
+
if idx == 0 {
203
203
+
return e.runBuildahBuild(ctx, wid, addl, secrets, wfLogger)
204
204
+
}
205
205
+
206
206
+
// User-defined steps run as regular bash commands in a buildah container
207
207
+
step := w.Steps[idx].(Step)
208
208
+
return e.runBashStep(ctx, wid, addl, step, secrets, wfLogger)
209
209
+
}
210
210
+
211
211
+
func (e *Engine) runBuildahBuild(ctx context.Context, wid models.WorkflowId, addl addlFields, secrets []secrets.UnlockedSecret, wfLogger *models.WorkflowLogger) error {
212
212
+
buildCfg := addl.buildCfg
213
213
+
214
214
+
// Extract registry credentials from secrets
215
215
+
var registryUser, registryPass string
216
216
+
for _, s := range secrets {
217
217
+
if s.Key == buildCfg.RegistryCredentials.UsernameSecret {
218
218
+
registryUser = s.Value
219
219
+
}
220
220
+
if s.Key == buildCfg.RegistryCredentials.PasswordSecret {
221
221
+
registryPass = s.Value
222
222
+
}
223
223
+
}
224
224
+
225
225
+
// Build the command script
226
226
+
var scriptParts []string
227
227
+
228
228
+
// Login to registry if credentials provided
229
229
+
if registryUser != "" && registryPass != "" && buildCfg.RegistryCredentials.Registry != "" {
230
230
+
scriptParts = append(scriptParts, fmt.Sprintf(
231
231
+
`echo "$REGISTRY_PASSWORD" | buildah login -u "$REGISTRY_USERNAME" --password-stdin %s`,
232
232
+
buildCfg.RegistryCredentials.Registry,
233
233
+
))
234
234
+
}
235
235
+
236
236
+
// Build command
237
237
+
buildCmd := []string{"buildah", "bud"}
238
238
+
buildCmd = append(buildCmd, "--file", path.Join(workspaceDir, buildCfg.Dockerfile))
239
239
+
240
240
+
if buildCfg.Target != "" {
241
241
+
buildCmd = append(buildCmd, "--target", buildCfg.Target)
242
242
+
}
243
243
+
244
244
+
for k, v := range buildCfg.BuildArgs {
245
245
+
buildCmd = append(buildCmd, "--build-arg", fmt.Sprintf("%s=%s", k, v))
246
246
+
}
247
247
+
248
248
+
// Tag with all destinations
249
249
+
for _, dest := range buildCfg.Destination {
250
250
+
buildCmd = append(buildCmd, "--tag", dest)
251
251
+
}
252
252
+
253
253
+
// Use vfs storage driver (doesn't require privileges)
254
254
+
buildCmd = append(buildCmd, "--storage-driver", "vfs")
255
255
+
buildCmd = append(buildCmd, path.Join(workspaceDir, buildCfg.Context))
256
256
+
257
257
+
scriptParts = append(scriptParts, strings.Join(buildCmd, " "))
258
258
+
259
259
+
// Push to all destinations
260
260
+
for _, dest := range buildCfg.Destination {
261
261
+
scriptParts = append(scriptParts, fmt.Sprintf("buildah push %s", dest))
262
262
+
}
263
263
+
264
264
+
script := strings.Join(scriptParts, "\n")
265
265
+
266
266
+
// Pull buildah image
267
267
+
reader, err := e.docker.ImagePull(ctx, e.cfg.BuildahPipelines.Image, image.PullOptions{})
268
268
+
if err != nil {
269
269
+
return fmt.Errorf("pulling buildah image: %w", err)
270
270
+
}
271
271
+
io.Copy(os.Stdout, reader)
272
272
+
reader.Close()
273
273
+
274
274
+
// Create buildah container
275
275
+
resp, err := e.docker.ContainerCreate(ctx, &container.Config{
276
276
+
Image: e.cfg.BuildahPipelines.Image,
277
277
+
Cmd: []string{"sh", "-c", script},
278
278
+
Env: []string{
279
279
+
fmt.Sprintf("REGISTRY_USERNAME=%s", registryUser),
280
280
+
fmt.Sprintf("REGISTRY_PASSWORD=%s", registryPass),
281
281
+
},
282
282
+
WorkingDir: workspaceDir,
283
283
+
Labels: map[string]string{
284
284
+
"sh.tangled.pipeline/workflow_id": wid.String(),
285
285
+
},
286
286
+
}, &container.HostConfig{
287
287
+
Mounts: []mount.Mount{
288
288
+
{
289
289
+
Type: mount.TypeVolume,
290
290
+
Source: addl.volumeName,
291
291
+
Target: workspaceDir,
292
292
+
},
293
293
+
},
294
294
+
CapAdd: []string{"CAP_SETUID", "CAP_SETGID"},
295
295
+
SecurityOpt: []string{"no-new-privileges"},
296
296
+
}, nil, nil, "")
297
297
+
if err != nil {
298
298
+
return fmt.Errorf("creating buildah container: %w", err)
299
299
+
}
300
300
+
301
301
+
// Ensure cleanup
302
302
+
defer func() {
303
303
+
e.docker.ContainerRemove(ctx, resp.ID, container.RemoveOptions{Force: true})
304
304
+
}()
305
305
+
306
306
+
// Start container
307
307
+
err = e.docker.ContainerStart(ctx, resp.ID, container.StartOptions{})
308
308
+
if err != nil {
309
309
+
return fmt.Errorf("starting buildah container: %w", err)
310
310
+
}
311
311
+
312
312
+
// Stream logs
313
313
+
if wfLogger != nil {
314
314
+
go e.streamLogs(ctx, resp.ID, wfLogger)
315
315
+
}
316
316
+
317
317
+
// Wait for completion
318
318
+
statusCh, errCh := e.docker.ContainerWait(ctx, resp.ID, container.WaitConditionNotRunning)
319
319
+
select {
320
320
+
case err := <-errCh:
321
321
+
return err
322
322
+
case status := <-statusCh:
323
323
+
if status.StatusCode != 0 {
324
324
+
e.l.Error("buildah build failed", "workflow_id", wid, "exit_code", status.StatusCode)
325
325
+
return ErrBuildFailed
326
326
+
}
327
327
+
case <-ctx.Done():
328
328
+
return engine.ErrTimedOut
329
329
+
}
330
330
+
331
331
+
return nil
332
332
+
}
333
333
+
334
334
+
func (e *Engine) runBashStep(ctx context.Context, wid models.WorkflowId, addl addlFields, step Step, secrets []secrets.UnlockedSecret, wfLogger *models.WorkflowLogger) error {
335
335
+
// Similar to runBuildahBuild but just runs the user's command
336
336
+
resp, err := e.docker.ContainerCreate(ctx, &container.Config{
337
337
+
Image: e.cfg.BuildahPipelines.Image,
338
338
+
Cmd: []string{"sh", "-c", step.Command()},
339
339
+
WorkingDir: workspaceDir,
340
340
+
}, &container.HostConfig{
341
341
+
Mounts: []mount.Mount{
342
342
+
{
343
343
+
Type: mount.TypeVolume,
344
344
+
Source: addl.volumeName,
345
345
+
Target: workspaceDir,
346
346
+
},
347
347
+
},
348
348
+
CapAdd: []string{"CAP_SETUID", "CAP_SETGID"},
349
349
+
SecurityOpt: []string{"no-new-privileges"},
350
350
+
}, nil, nil, "")
351
351
+
if err != nil {
352
352
+
return fmt.Errorf("creating container for step: %w", err)
353
353
+
}
354
354
+
355
355
+
defer func() {
356
356
+
e.docker.ContainerRemove(ctx, resp.ID, container.RemoveOptions{Force: true})
357
357
+
}()
358
358
+
359
359
+
err = e.docker.ContainerStart(ctx, resp.ID, container.StartOptions{})
360
360
+
if err != nil {
361
361
+
return fmt.Errorf("starting container: %w", err)
362
362
+
}
363
363
+
364
364
+
if wfLogger != nil {
365
365
+
go e.streamLogs(ctx, resp.ID, wfLogger)
366
366
+
}
367
367
+
368
368
+
statusCh, errCh := e.docker.ContainerWait(ctx, resp.ID, container.WaitConditionNotRunning)
369
369
+
select {
370
370
+
case err := <-errCh:
371
371
+
return err
372
372
+
case status := <-statusCh:
373
373
+
if status.StatusCode != 0 {
374
374
+
return engine.ErrWorkflowFailed
375
375
+
}
376
376
+
case <-ctx.Done():
377
377
+
return engine.ErrTimedOut
378
378
+
}
379
379
+
380
380
+
return nil
381
381
+
}
382
382
+
383
383
+
func (e *Engine) streamLogs(ctx context.Context, containerID string, wfLogger *models.WorkflowLogger) error {
384
384
+
logs, err := e.docker.ContainerLogs(ctx, containerID, container.LogsOptions{
385
385
+
ShowStdout: true,
386
386
+
ShowStderr: true,
387
387
+
Follow: true,
388
388
+
})
389
389
+
if err != nil {
390
390
+
return err
391
391
+
}
392
392
+
defer logs.Close()
393
393
+
394
394
+
_, err = stdcopy.StdCopy(
395
395
+
wfLogger.DataWriter("stdout"),
396
396
+
wfLogger.DataWriter("stderr"),
397
397
+
logs,
398
398
+
)
399
399
+
return err
400
400
+
}
401
401
+
402
402
+
func (e *Engine) DestroyWorkflow(ctx context.Context, wid models.WorkflowId) error {
403
403
+
e.cleanupMu.Lock()
404
404
+
key := wid.String()
405
405
+
406
406
+
fns := e.cleanup[key]
407
407
+
delete(e.cleanup, key)
408
408
+
e.cleanupMu.Unlock()
409
409
+
410
410
+
for _, fn := range fns {
411
411
+
if err := fn(ctx); err != nil {
412
412
+
e.l.Error("failed to cleanup workflow resource", "workflowId", wid, "error", err)
413
413
+
}
414
414
+
}
415
415
+
return nil
416
416
+
}
417
417
+
418
418
+
func (e *Engine) registerCleanup(wid models.WorkflowId, fn cleanupFunc) {
419
419
+
e.cleanupMu.Lock()
420
420
+
defer e.cleanupMu.Unlock()
421
421
+
422
422
+
key := wid.String()
423
423
+
e.cleanup[key] = append(e.cleanup[key], fn)
424
424
+
}
425
425
+
426
426
+
func volumeCreateOptions(name string) volume.CreateOptions {
427
427
+
return volume.CreateOptions{
428
428
+
Name: name,
429
429
+
}
430
430
+
}
+11
spindle/engines/buildah/errors.go
···
1
1
+
package buildah
2
2
+
3
3
+
import "errors"
4
4
+
5
5
+
var (
6
6
+
ErrBuildFailed = errors.New("build failed")
7
7
+
ErrPushFailed = errors.New("push failed")
8
8
+
ErrLoginFailed = errors.New("registry login failed")
9
9
+
ErrInvalidConfig = errors.New("invalid build configuration")
10
10
+
ErrMissingDockerfile = errors.New("dockerfile not found")
11
11
+
)
+7
-1
spindle/server.go
···
20
20
"tangled.org/core/spindle/config"
21
21
"tangled.org/core/spindle/db"
22
22
"tangled.org/core/spindle/engine"
23
23
+
"tangled.org/core/spindle/engines/buildah"
23
24
"tangled.org/core/spindle/engines/nixery"
24
25
"tangled.org/core/spindle/models"
25
26
"tangled.org/core/spindle/queue"
···
100
101
return err
101
102
}
102
103
104
104
+
buildahEng, err := buildah.New(ctx, cfg)
105
105
+
if err != nil {
106
106
+
return err
107
107
+
}
108
108
+
103
109
jq := queue.NewQueue(cfg.Server.QueueSize, cfg.Server.MaxJobCount)
104
110
logger.Info("initialized queue", "queueSize", cfg.Server.QueueSize, "numWorkers", cfg.Server.MaxJobCount)
105
111
···
131
137
db: d,
132
138
l: logger,
133
139
n: &n,
134
134
-
engs: map[string]models.Engine{"nixery": nixeryEng},
140
140
+
engs: map[string]models.Engine{"nixery": nixeryEng, "buildah": buildahEng},
135
141
jq: jq,
136
142
cfg: cfg,
137
143
res: resolver,