forked from tilt-dev/tilt
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathimage_builder.go
211 lines (181 loc) · 6.36 KB
/
image_builder.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
package build
import (
"context"
"errors"
"fmt"
"github.com/distribution/reference"
"k8s.io/apimachinery/pkg/types"
"github.com/tilt-dev/clusterid"
"github.com/tilt-dev/tilt/internal/container"
"github.com/tilt-dev/tilt/internal/ignore"
"github.com/tilt-dev/tilt/internal/k8s"
"github.com/tilt-dev/tilt/pkg/apis"
"github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1"
"github.com/tilt-dev/tilt/pkg/model"
)
type ImageBuilder struct {
db *DockerBuilder
custb *CustomBuilder
kl KINDLoader
}
func NewImageBuilder(db *DockerBuilder, custb *CustomBuilder, kl KINDLoader) *ImageBuilder {
return &ImageBuilder{
db: db,
custb: custb,
kl: kl,
}
}
func (ib *ImageBuilder) CanReuseRef(ctx context.Context, iTarget model.ImageTarget, ref reference.NamedTagged) (bool, error) {
switch iTarget.BuildDetails.(type) {
case model.DockerBuild:
return ib.db.ImageExists(ctx, ref)
case model.CustomBuild:
// Custom build doesn't have a good way to check if the ref still exists in the image
// store, so just assume we can.
return true, nil
}
return false, fmt.Errorf("image %q has no valid buildDetails (neither "+
"DockerBuild nor CustomBuild)", iTarget.ImageMapSpec.Selector)
}
// Build the image, and push it if necessary.
//
// Note that this function can return partial results on an error.
//
// The error is simply the "main" build failure reason.
func (ib *ImageBuilder) Build(ctx context.Context,
iTarget model.ImageTarget,
customBuildCmd *v1alpha1.Cmd,
cluster *v1alpha1.Cluster,
imageMaps map[types.NamespacedName]*v1alpha1.ImageMap,
ps *PipelineState) (container.TaggedRefs, []v1alpha1.DockerImageStageStatus, error) {
refs, stages, err := ib.buildOnly(ctx, iTarget, customBuildCmd, cluster, imageMaps, ps)
if err != nil {
return refs, stages, err
}
pushStage := ib.push(ctx, refs, ps, iTarget, cluster)
if pushStage != nil {
stages = append(stages, *pushStage)
}
if pushStage != nil && pushStage.Error != "" {
err = errors.New(pushStage.Error)
}
return refs, stages, err
}
// Build the image, but don't do any push.
func (ib *ImageBuilder) buildOnly(ctx context.Context,
iTarget model.ImageTarget,
customBuildCmd *v1alpha1.Cmd,
cluster *v1alpha1.Cluster,
imageMaps map[types.NamespacedName]*v1alpha1.ImageMap,
ps *PipelineState,
) (container.TaggedRefs, []v1alpha1.DockerImageStageStatus, error) {
refs, err := iTarget.Refs(cluster)
if err != nil {
return container.TaggedRefs{}, nil, err
}
userFacingRefName := container.FamiliarString(refs.ConfigurationRef)
switch bd := iTarget.BuildDetails.(type) {
case model.DockerBuild:
ps.StartPipelineStep(ctx, "Building Dockerfile: [%s]", userFacingRefName)
defer ps.EndPipelineStep(ctx)
filter := ignore.CreateBuildContextFilter(bd.DockerImageSpec.ContextIgnores)
return ib.db.BuildImage(ctx, ps, refs, bd.DockerImageSpec,
cluster,
imageMaps,
filter)
case model.CustomBuild:
ps.StartPipelineStep(ctx, "Building Custom Build: [%s]", userFacingRefName)
defer ps.EndPipelineStep(ctx)
refs, err := ib.custb.Build(ctx, refs, bd.CmdImageSpec, customBuildCmd, imageMaps)
return refs, nil, err
}
// Theoretically this should never trip b/c we `validate` the manifest beforehand...?
// If we get here, something is very wrong.
return container.TaggedRefs{}, nil, fmt.Errorf("image %q has no valid buildDetails (neither "+
"DockerBuild nor CustomBuild)", refs.ConfigurationRef)
}
// Push the image if the cluster requires it.
func (ib *ImageBuilder) push(ctx context.Context, refs container.TaggedRefs, ps *PipelineState, iTarget model.ImageTarget, cluster *v1alpha1.Cluster) *v1alpha1.DockerImageStageStatus {
// Skip the push phase entirely if we're on Docker Compose.
isDC := cluster != nil &&
cluster.Spec.Connection != nil &&
cluster.Spec.Connection.Docker != nil
if isDC {
return nil
}
// On Kubernetes, we count each push() as a stage, and need to print why
// we're skipping if we don't need to push.
ps.StartPipelineStep(ctx, "Pushing %s", container.FamiliarString(refs.LocalRef))
defer ps.EndPipelineStep(ctx)
cbSkip := false
if iTarget.IsCustomBuild() {
cbSkip = iTarget.CustomBuildInfo().SkipsPush()
}
if cbSkip {
ps.Printf(ctx, "Skipping push: custom_build() configured to handle push itself")
return nil
}
// We can also skip the push of the image if it isn't used
// in any k8s resources! (e.g., it's consumed by another image).
if iTarget.ClusterNeeds() != v1alpha1.ClusterImageNeedsPush {
ps.Printf(ctx, "Skipping push: base image does not need deploy")
return nil
}
if ib.db.WillBuildToKubeContext(k8s.KubeContext(k8sConnStatus(cluster).Context)) {
ps.Printf(ctx, "Skipping push: building on cluster's container runtime")
return nil
}
startTime := apis.NowMicro()
var err error
if ib.shouldUseKINDLoad(refs, cluster) {
ps.Printf(ctx, "Loading image to KIND")
err := ib.kl.LoadToKIND(ps.AttachLogger(ctx), cluster, refs.LocalRef)
endTime := apis.NowMicro()
stage := &v1alpha1.DockerImageStageStatus{
Name: "kind load",
StartedAt: &startTime,
FinishedAt: &endTime,
}
if err != nil {
stage.Error = fmt.Sprintf("Error loading image to KIND: %v", err)
}
return stage
}
ps.Printf(ctx, "Pushing with Docker client")
err = ib.db.PushImage(ps.AttachLogger(ctx), refs.LocalRef)
endTime := apis.NowMicro()
stage := &v1alpha1.DockerImageStageStatus{
Name: "docker push",
StartedAt: &startTime,
FinishedAt: &endTime,
}
if err != nil {
stage.Error = fmt.Sprintf("docker push: %v", err)
}
return stage
}
func (ib *ImageBuilder) shouldUseKINDLoad(refs container.TaggedRefs, cluster *v1alpha1.Cluster) bool {
isKIND := k8sConnStatus(cluster).Product == string(clusterid.ProductKIND)
if !isKIND {
return false
}
// if we're using KIND and the image has a separate ref by which it's referred to
// in the cluster, that implies that we have a local registry in place, and should
// push to that instead of using KIND load.
if refs.LocalRef.String() != refs.ClusterRef.String() {
return false
}
hasRegistry := cluster.Status.Registry != nil && cluster.Status.Registry.Host != ""
if hasRegistry {
return false
}
return true
}
func k8sConnStatus(cluster *v1alpha1.Cluster) *v1alpha1.KubernetesClusterConnectionStatus {
if cluster != nil &&
cluster.Status.Connection != nil &&
cluster.Status.Connection.Kubernetes != nil {
return cluster.Status.Connection.Kubernetes
}
return &v1alpha1.KubernetesClusterConnectionStatus{}
}