forked from tilt-dev/tilt
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdocker_builder.go
676 lines (586 loc) · 20.4 KB
/
docker_builder.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
package build
import (
"context"
"encoding/json"
"fmt"
"io"
"os"
"path/filepath"
"regexp"
"strings"
"time"
"github.com/distribution/reference"
"github.com/docker/docker/api/types"
"github.com/docker/docker/client"
"github.com/docker/docker/pkg/jsonmessage"
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/moby/buildkit/session/filesync"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/tonistiigi/fsutil"
fsutiltypes "github.com/tonistiigi/fsutil/types"
"golang.org/x/sync/errgroup"
ktypes "k8s.io/apimachinery/pkg/types"
"github.com/tilt-dev/tilt/internal/container"
"github.com/tilt-dev/tilt/internal/docker"
"github.com/tilt-dev/tilt/internal/dockerfile"
"github.com/tilt-dev/tilt/internal/k8s"
"github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1"
"github.com/tilt-dev/tilt/pkg/logger"
"github.com/tilt-dev/tilt/pkg/model"
)
type DockerBuilder struct {
dCli docker.Client
// A set of extra labels to attach to all builds
// created by this image builder.
//
// By default, all builds are labeled with a build mode.
extraLabels dockerfile.Labels
}
// Describes how a docker instance connects to kubernetes instances.
type DockerKubeConnection interface {
// Returns whether this docker builder is going to build to the given kubernetes context.
WillBuildToKubeContext(kctx k8s.KubeContext) bool
}
func NewDockerBuilder(dCli docker.Client, extraLabels dockerfile.Labels) *DockerBuilder {
return &DockerBuilder{
dCli: dCli,
extraLabels: extraLabels,
}
}
func (d *DockerBuilder) WillBuildToKubeContext(kctx k8s.KubeContext) bool {
return d.dCli.Env().WillBuildToKubeContext(kctx)
}
func (d *DockerBuilder) DumpImageDeployRef(ctx context.Context, ref string) (reference.NamedTagged, error) {
refParsed, err := container.ParseNamed(ref)
if err != nil {
return nil, errors.Wrap(err, "DumpImageDeployRef")
}
data, _, err := d.dCli.ImageInspectWithRaw(ctx, ref)
if err != nil {
return nil, errors.Wrap(err, "DumpImageDeployRef")
}
dig := digest.Digest(data.ID)
tag, err := digestAsTag(dig)
if err != nil {
return nil, errors.Wrap(err, "DumpImageDeployRef")
}
tagged, err := reference.WithTag(refParsed, tag)
if err != nil {
return nil, errors.Wrap(err, "DumpImageDeployRef")
}
return tagged, nil
}
// Tag the digest with the given name and wm-tilt tag.
func (d *DockerBuilder) TagRefs(ctx context.Context, refs container.RefSet, dig digest.Digest) (container.TaggedRefs, error) {
tag, err := digestAsTag(dig)
if err != nil {
return container.TaggedRefs{}, errors.Wrap(err, "TagImage")
}
tagged, err := refs.AddTagSuffix(tag)
if err != nil {
return container.TaggedRefs{}, errors.Wrap(err, "TagImage")
}
// Docker client only needs to care about the localImage
err = d.dCli.ImageTag(ctx, dig.String(), tagged.LocalRef.String())
if err != nil {
return container.TaggedRefs{}, errors.Wrap(err, "TagImage#ImageTag")
}
return tagged, nil
}
// Push the specified ref up to the docker registry specified in the name.
//
// TODO(nick) In the future, I would like us to be smarter about checking if the kubernetes cluster
// we're running in has access to the given registry. And if it doesn't, we should either emit an
// error, or push to a registry that kubernetes does have access to (e.g., a local registry).
func (d *DockerBuilder) PushImage(ctx context.Context, ref reference.NamedTagged) error {
l := logger.Get(ctx)
imagePushResponse, err := d.dCli.ImagePush(ctx, ref)
if err != nil {
return errors.Wrap(err, "PushImage#ImagePush")
}
defer func() {
err := imagePushResponse.Close()
if err != nil {
l.Infof("unable to close imagePushResponse: %s", err)
}
}()
_, _, err = readDockerOutput(ctx, imagePushResponse)
if err != nil {
return errors.Wrapf(err, "pushing image %q", ref.Name())
}
return nil
}
func (d *DockerBuilder) ImageExists(ctx context.Context, ref reference.NamedTagged) (bool, error) {
_, _, err := d.dCli.ImageInspectWithRaw(ctx, ref.String())
if err != nil {
if client.IsErrNotFound(err) {
return false, nil
}
return false, errors.Wrapf(err, "error checking if %s exists", ref.String())
}
return true, nil
}
func (d *DockerBuilder) BuildImage(ctx context.Context, ps *PipelineState, refs container.RefSet,
spec v1alpha1.DockerImageSpec,
cluster *v1alpha1.Cluster,
imageMaps map[ktypes.NamespacedName]*v1alpha1.ImageMap,
filter model.PathMatcher) (container.TaggedRefs, []v1alpha1.DockerImageStageStatus, error) {
spec = InjectClusterPlatform(spec, cluster)
spec, err := InjectImageDependencies(spec, imageMaps)
if err != nil {
return container.TaggedRefs{}, nil, err
}
platformSuffix := ""
if spec.Platform != "" {
platformSuffix = fmt.Sprintf(" for platform %s", spec.Platform)
}
logger.Get(ctx).Infof("Building Dockerfile%s:\n%s\n", platformSuffix, indent(spec.DockerfileContents, " "))
ps.StartBuildStep(ctx, "Building image")
allowBuildkit := true
ctx = ps.AttachLogger(ctx)
digest, stages, err := d.buildToDigest(ctx, spec, filter, allowBuildkit)
if err != nil {
isMysteriousCorruption := strings.Contains(err.Error(), "failed precondition") &&
strings.Contains(err.Error(), "failed commit on ref")
if isMysteriousCorruption {
// We've seen weird corruption issues on buildkit
// that look like
//
// Build Failed: ImageBuild: failed to create LLB definition:
// failed commit on ref "unknown-sha256:b72fa303a3a5fbf52c723bfcfb93948bb53b3d7e8d22418e9d171a27ad7dcd84":
// "unknown-sha256:b72fa303a3a5fbf52c723bfcfb93948bb53b3d7e8d22418e9d171a27ad7dcd84"
// failed size validation: 80941 != 80929: failed precondition
//
// Build Failed: ImageBuild: failed to load cache key: failed commit on
// ref
// "unknown-sha256:d8ad5905555e3af3fa9122515f2b3d4762d4e8734b7ed12f1271bcdee3541267":
// unexpected commit size 69764, expected 76810: failed precondition
//
// If this happens, just try again without buildkit.
allowBuildkit = false
logger.Get(ctx).Infof("Detected Buildkit corruption. Rebuilding without Buildkit")
digest, stages, err = d.buildToDigest(ctx, spec, filter, allowBuildkit)
}
if err != nil {
return container.TaggedRefs{}, stages, err
}
}
tagged, err := d.TagRefs(ctx, refs, digest)
if err != nil {
return container.TaggedRefs{}, stages, errors.Wrap(err, "docker tag")
}
return tagged, stages, nil
}
// A helper function that builds the paths to the given docker image,
// then returns the output digest.
func (d *DockerBuilder) buildToDigest(ctx context.Context, spec v1alpha1.DockerImageSpec, filter model.PathMatcher, allowBuildkit bool) (digest.Digest, []v1alpha1.DockerImageStageStatus, error) {
ctx, cancelBuildSession := context.WithCancel(ctx)
defer cancelBuildSession()
g, ctx := errgroup.WithContext(ctx)
var contextReader io.Reader
buildContext := spec.Context
// Treat context: "-" as an empty context.
if buildContext == "-" {
emptyContextDir, err := os.MkdirTemp("", "tilt-dockercontext-")
if err != nil {
return "", nil, fmt.Errorf("creating context directory: %v", err)
}
defer func() {
_ = os.RemoveAll(emptyContextDir)
}()
buildContext = emptyContextDir
}
_, err := os.Stat(buildContext)
if err != nil {
return "", nil, fmt.Errorf("reading build context: %v", err)
}
builderVersion, err := d.dCli.BuilderVersion(ctx)
if err != nil {
return "", nil, err
}
// Buildkit allows us to use a fs sync server instead of uploading up-front.
useFSSync := allowBuildkit && builderVersion == types.BuilderBuildKit
if !useFSSync {
pipeReader, pipeWriter := io.Pipe()
w := NewProgressWriter(ctx, pipeWriter)
w.Init()
// TODO(nick): Express tarring as a build stage.
g.Go(func() error {
paths := []PathMapping{
{
LocalPath: buildContext,
ContainerPath: "/",
},
}
err := tarContextAndUpdateDf(ctx, w, dockerfile.Dockerfile(spec.DockerfileContents), paths, filter)
if err != nil {
_ = pipeWriter.CloseWithError(err)
} else {
_ = pipeWriter.Close()
}
w.Close() // Print the final progress message
return nil
})
contextReader = pipeReader
defer func() {
_ = pipeReader.Close()
}()
}
options := Options(contextReader, spec)
if useFSSync {
dockerfileDir, err := writeTempDockerfileSyncdir(spec.DockerfileContents)
if err != nil {
return "", nil, err
}
options.DirSource, err = toDirSource(buildContext, dockerfileDir, filter)
if err != nil {
return "", nil, err
}
options.Dockerfile = DockerfileName
defer func() {
_ = os.RemoveAll(dockerfileDir)
}()
}
if !allowBuildkit {
options.ForceLegacyBuilder = true
}
var digest digest.Digest
var status []v1alpha1.DockerImageStageStatus
g.Go(func() error {
defer cancelBuildSession()
imageBuildResponse, err := d.dCli.ImageBuild(
ctx,
g,
contextReader,
options,
)
if err != nil {
return err
}
defer func() {
err := imageBuildResponse.Body.Close()
if err != nil {
logger.Get(ctx).Infof("unable to close imageBuildResponse: %s", err)
}
}()
digest, status, err = d.getDigestFromBuildOutput(ctx, imageBuildResponse.Body)
return err
})
err = g.Wait()
return digest, status, err
}
func (d *DockerBuilder) getDigestFromBuildOutput(ctx context.Context, reader io.Reader) (digest.Digest, []v1alpha1.DockerImageStageStatus, error) {
result, stageStatuses, err := readDockerOutput(ctx, reader)
if err != nil {
return "", stageStatuses, errors.Wrap(err, "ImageBuild")
}
digest, err := d.getDigestFromDockerOutput(ctx, result)
if err != nil {
return "", stageStatuses, errors.Wrap(err, "getDigestFromBuildOutput")
}
return digest, stageStatuses, nil
}
var dockerBuildCleanupRexes = []*regexp.Regexp{
// the "runc did not determinate sucessfully" just seems redundant on top of "executor failed running"
// nolint
regexp.MustCompile("(executor failed running.*): runc did not terminate sucessfully"), // sucessfully (sic)
// when a file is missing, it generates an error like "failed to compute cache key: foo.txt not found: not found"
// most of that seems redundant and/or confusing
regexp.MustCompile("failed to compute cache key: (.* not found): not found"),
regexp.MustCompile("failed to compute cache key: (?:failed to walk [^ ]+): lstat (?:/.*buildkit-[^/]*/)?(.*: no such file or directory)"),
}
// buildkit emits errors that might be useful for people who are into buildkit internals, but aren't really
// at the optimal level for people who just wanna build something
// ideally we'll get buildkit to emit errors with more structure so that we don't have to rely on string manipulation,
// but to have impact via that route, we've got to get the change in and users have to upgrade to a version of docker
// that has that change. So let's clean errors up here until that's in a good place.
func cleanupDockerBuildError(err string) string {
// this is pretty much always the same, and meaningless noise to most users
ret := strings.TrimPrefix(err, "failed to solve with frontend dockerfile.v0: ")
ret = strings.TrimPrefix(ret, "failed to solve with frontend gateway.v0: ")
ret = strings.TrimPrefix(ret, "rpc error: code = Unknown desc = ")
ret = strings.TrimPrefix(ret, "failed to build LLB: ")
for _, re := range dockerBuildCleanupRexes {
ret = re.ReplaceAllString(ret, "$1")
}
return ret
}
type dockerMessageID string
// Docker API commands stream back a sequence of JSON messages.
//
// The result of the command is in a JSON object with field "aux".
//
// Errors are reported in a JSON object with field "errorDetail"
//
// NOTE(nick): I haven't found a good document describing this protocol
// but you can find it implemented in Docker here:
// https://github.com/moby/moby/blob/1da7d2eebf0a7a60ce585f89a05cebf7f631019c/pkg/jsonmessage/jsonmessage.go#L139
func readDockerOutput(ctx context.Context, reader io.Reader) (dockerOutput, []v1alpha1.DockerImageStageStatus, error) {
progressLastPrinted := make(map[dockerMessageID]time.Time)
result := dockerOutput{}
decoder := json.NewDecoder(reader)
b := newBuildkitPrinter(logger.Get(ctx))
for decoder.More() {
message := jsonmessage.JSONMessage{}
err := decoder.Decode(&message)
if err != nil {
return dockerOutput{}, b.toStageStatuses(), errors.Wrap(err, "decoding docker output")
}
if len(message.Stream) > 0 {
msg := message.Stream
builtDigestMatch := oldDigestRegexp.FindStringSubmatch(msg)
if len(builtDigestMatch) >= 2 {
// Old versions of docker (pre 1.30) didn't send down an aux message.
result.shortDigest = builtDigestMatch[1]
}
logger.Get(ctx).Write(logger.InfoLvl, []byte(msg))
}
if message.ErrorMessage != "" {
return dockerOutput{}, b.toStageStatuses(), errors.New(cleanupDockerBuildError(message.ErrorMessage))
}
if message.Error != nil {
return dockerOutput{}, b.toStageStatuses(), errors.New(cleanupDockerBuildError(message.Error.Message))
}
id := dockerMessageID(message.ID)
if id != "" && message.Progress != nil {
// Add a small 2-second backoff so that we don't overwhelm the logstore.
lastPrinted, hasBeenPrinted := progressLastPrinted[id]
shouldPrint := !hasBeenPrinted ||
message.Progress.Current == message.Progress.Total ||
time.Since(lastPrinted) > 2*time.Second
shouldSkip := message.Progress.Current == 0 &&
(message.Status == "Waiting" || message.Status == "Preparing")
if shouldPrint && !shouldSkip {
fields := logger.Fields{logger.FieldNameProgressID: message.ID}
if message.Progress.Current == message.Progress.Total {
fields[logger.FieldNameProgressMustPrint] = "1"
}
logger.Get(ctx).WithFields(fields).
Infof("%s: %s %s", id, message.Status, message.Progress.String())
progressLastPrinted[id] = time.Now()
}
}
if messageIsFromBuildkit(message) {
err := toBuildkitStatus(message.Aux, b)
if err != nil {
return dockerOutput{}, b.toStageStatuses(), err
}
}
if message.Aux != nil && !messageIsFromBuildkit(message) {
result.aux = message.Aux
}
}
if ctx.Err() != nil {
return dockerOutput{}, b.toStageStatuses(), ctx.Err()
}
return result, b.toStageStatuses(), nil
}
func toBuildkitStatus(aux *json.RawMessage, b *buildkitPrinter) error {
var resp controlapi.StatusResponse
var dt []byte
// ignoring all messages that are not understood
if err := json.Unmarshal(*aux, &dt); err != nil {
return err
}
if err := (&resp).Unmarshal(dt); err != nil {
return err
}
return b.parseAndPrint(toVertexes(resp))
}
func toVertexes(resp controlapi.StatusResponse) ([]*vertex, []*vertexLog, []*vertexStatus) {
vertexes := []*vertex{}
logs := []*vertexLog{}
statuses := []*vertexStatus{}
for _, v := range resp.Vertexes {
duration := time.Duration(0)
started := v.Started != nil
completed := v.Completed != nil
if started && completed {
duration = (*v.Completed).Sub((*v.Started))
}
vertexes = append(vertexes, &vertex{
digest: v.Digest,
name: v.Name,
error: v.Error,
started: started,
completed: completed,
cached: v.Cached,
duration: duration,
startedTime: v.Started,
completedTime: v.Completed,
})
}
for _, v := range resp.Logs {
logs = append(logs, &vertexLog{
vertex: v.Vertex,
msg: v.Msg,
})
}
for _, s := range resp.Statuses {
statuses = append(statuses, &vertexStatus{
vertex: s.Vertex,
id: s.ID,
total: s.Total,
current: s.Current,
timestamp: s.Timestamp,
})
}
return vertexes, logs, statuses
}
func messageIsFromBuildkit(msg jsonmessage.JSONMessage) bool {
return msg.ID == "moby.buildkit.trace"
}
func (d *DockerBuilder) getDigestFromDockerOutput(ctx context.Context, output dockerOutput) (digest.Digest, error) {
if output.aux != nil {
return getDigestFromAux(*output.aux)
}
if output.shortDigest != "" {
data, _, err := d.dCli.ImageInspectWithRaw(ctx, output.shortDigest)
if err != nil {
return "", err
}
return digest.Digest(data.ID), nil
}
return "", fmt.Errorf("Docker is not responding. Maybe Docker is out of disk space? Try running `docker system prune`")
}
func getDigestFromAux(aux json.RawMessage) (digest.Digest, error) {
digestMap := make(map[string]string)
err := json.Unmarshal(aux, &digestMap)
if err != nil {
return "", errors.Wrap(err, "getDigestFromAux")
}
id, ok := digestMap["ID"]
if !ok {
return "", fmt.Errorf("getDigestFromAux: ID not found")
}
return digest.Digest(id), nil
}
func digestAsTag(d digest.Digest) (string, error) {
str := d.Encoded()
if len(str) < 16 {
return "", fmt.Errorf("digest too short: %s", str)
}
return fmt.Sprintf("%s%s", ImageTagPrefix, str[:16]), nil
}
func digestMatchesRef(ref reference.NamedTagged, digest digest.Digest) bool {
digestHash := digest.Encoded()
tag := ref.Tag()
if len(tag) <= len(ImageTagPrefix) {
return false
}
tagHash := tag[len(ImageTagPrefix):]
return strings.HasPrefix(digestHash, tagHash)
}
var oldDigestRegexp = regexp.MustCompile(`^Successfully built ([0-9a-f]+)\s*$`)
type dockerOutput struct {
aux *json.RawMessage
shortDigest string
}
func indent(text, indent string) string {
if text == "" {
return indent + text
}
if text[len(text)-1:] == "\n" {
result := ""
for _, j := range strings.Split(text[:len(text)-1], "\n") {
result += indent + j + "\n"
}
return result
}
result := ""
for _, j := range strings.Split(strings.TrimRight(text, "\n"), "\n") {
result += indent + j + "\n"
}
return result[:len(result)-1]
}
const DockerfileName = "Dockerfile"
// Creates a specification for the buildkit filesyncer
//
// Welcome to the magnificent complexity of the fssync protocol!
//
// Originally, the Docker CLI was responsible for creating a context (basically a tarball)
// and sending it to the build server. The Docker CLI used .dockerignore to exclude things
// from that tarball.
//
// Soon, people realized that tarballing the docker context was a huge bottleneck
// for monorepos.
//
// Buildkit solves this problem with the fssync server. You create a fssync.SyncedDir
// for two directories:
// - the "context" dir (with the main build contents)
// - the "dockerfile" dir (with build instructions, i.e., my.Dockerfile and my.Dockerfile.dockerignore)
// and Buildkit requests the files it needs lazily.
//
// As part of this, they decided to do all .dockerignore interpretation
// server-side. There's a little dance Buildkit does to determine whether to
// grab my.Dockerfile.dockerignore from the dockerfile dir, or whether to grab
// .dockerignore from the context dir.
//
// Tilt has its own context filtering rules (ignore= and only= in particular).
// So Tilt can't rely on Buildkit's logic. Instead, Tilt
// - creates a "context" dir (with the main build contents filtered client-side)
// - the "dockerfile" dir with a Dockerfile and a fake Dockerfile.dockerignore
//
// The fake Dockerfile.dockerignore tells buildkit not do to its server-side
// filtering dance.
func toDirSource(context string, dockerfileSyncDir string, filter model.PathMatcher) (filesync.DirSource, error) {
fileMap := func(path string, s *fsutiltypes.Stat) fsutil.MapResult {
if !filepath.IsAbs(path) {
path = filepath.Join(context, path)
}
isDir := s != nil && s.IsDir()
if isDir {
entireDir, _ := filter.MatchesEntireDir(path)
if entireDir {
return fsutil.MapResultSkipDir
}
} else {
matches, _ := filter.Matches(path)
if matches {
return fsutil.MapResultExclude
}
}
s.Uid = 0
s.Gid = 0
return fsutil.MapResultKeep
}
contextFS, err := fsutil.NewFS(context)
if err != nil {
return nil, err
}
contextFS, err = fsutil.NewFilterFS(contextFS, &fsutil.FilterOpt{
Map: fileMap,
})
if err != nil {
return nil, err
}
dockerfileFS, err := fsutil.NewFS(dockerfileSyncDir)
if err != nil {
return nil, err
}
return filesync.StaticDirSource{
"context": contextFS,
"dockerfile": dockerfileFS,
}, nil
}
// Writes Dockerfile and Dockerfile.dockerignore to a temporary directory.
func writeTempDockerfileSyncdir(contents string) (string, error) {
// err is a named return value, due to the defer call below.
dockerfileDir, err := os.MkdirTemp("", "tilt-tempdockerfile-")
if err != nil {
return "", fmt.Errorf("creating temp dockerfile directory: %v", err)
}
err = os.WriteFile(filepath.Join(dockerfileDir, "Dockerfile"), []byte(contents), 0777)
if err != nil {
_ = os.RemoveAll(dockerfileDir)
return "", fmt.Errorf("creating temp dockerfile: %v", err)
}
dockerignoreContents := `# Tilt's fake dockerignore file`
err = os.WriteFile(filepath.Join(dockerfileDir, "Dockerfile.dockerignore"), []byte(dockerignoreContents), 0777)
if err != nil {
_ = os.RemoveAll(dockerfileDir)
return "", fmt.Errorf("creating temp dockerignore file: %v", err)
}
return dockerfileDir, nil
}