diff --git a/go.mod b/go.mod index c1955c0..fa9f40e 100644 --- a/go.mod +++ b/go.mod @@ -7,6 +7,7 @@ require ( github.com/docker/docker v24.0.7+incompatible github.com/google/uuid v1.5.0 github.com/sirupsen/logrus v1.9.3 + github.com/stretchr/testify v1.8.4 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.29.0 k8s.io/apimachinery v0.29.0 @@ -21,6 +22,7 @@ require ( github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-logr/logr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect @@ -47,6 +49,7 @@ require ( github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.2 // indirect github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect github.com/spf13/pflag v1.0.5 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect diff --git a/go.sum b/go.sum index f2a516b..8216c1e 100644 --- a/go.sum +++ b/go.sum @@ -22,6 +22,8 @@ github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4 github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/frankban/quicktest v1.14.5 h1:dfYrrRyLtiqT9GyKXgdh+k4inNeTvmGbuSgZ3lx3GhA= diff --git a/pkg/builder/builder.go b/pkg/builder/builder.go new file mode 100644 index 0000000..e44e2e5 --- /dev/null +++ b/pkg/builder/builder.go @@ -0,0 +1,21 @@ +package builder + +import "context" + +type Builder interface { + Build(ctx context.Context, b *BuilderOptions) (logs string, err error) +} + +type CacheOptions struct { + Enabled bool + Dir string + Repo string +} + +type BuilderOptions struct { + ImageName string + BuildContext string + Args []string + Destination string + Cache *CacheOptions +} diff --git a/pkg/builder/kaniko/errors.go b/pkg/builder/kaniko/errors.go new file mode 100644 index 0000000..fd0ce04 --- /dev/null +++ b/pkg/builder/kaniko/errors.go @@ -0,0 +1,43 @@ +package kaniko + +import ( + "fmt" +) + +type Error struct { + Code string + Message string + Err error +} + +func (e *Error) Error() string { + if e.Err != nil { + return fmt.Sprintf("%s: %v", e.Message, e.Err) + } + return e.Message +} + +func (e *Error) Wrap(err error) error { + e.Err = err + return e +} + +var ( + ErrBuildContextEmpty = &Error{Code: "BuildContextEmpty", Message: "build context cannot be empty"} + ErrCleaningUp = &Error{Code: "CleaningUp", Message: "error cleaning up"} + ErrCreatingJob = &Error{Code: "CreatingJob", Message: "error creating Job"} + ErrDeletingJob = &Error{Code: "DeletingJob", Message: "error deleting Job"} + ErrDeletingPods = &Error{Code: "DeletingPods", Message: "error deleting Pods"} + ErrGeneratingUUID = &Error{Code: "GeneratingUUID", Message: "error generating UUID"} + ErrGettingContainerLogs = &Error{Code: "GettingContainerLogs", Message: "error getting container logs"} + ErrGettingPodFromJob = &Error{Code: "GettingPodFromJob", Message: "error getting Pod from Job"} + ErrListingJobs = &Error{Code: "ListingJobs", Message: "error listing Jobs"} + ErrListingPods = &Error{Code: "ListingPods", Message: "error listing Pods"} + ErrNoContainersFound = &Error{Code: "NoContainersFound", Message: "no containers found"} + ErrNoPodsFound = &Error{Code: "NoPodsFound", Message: "no Pods found"} + ErrPreparingJob = &Error{Code: "PreparingJob", Message: "error preparing Job"} + ErrWaitingJobCompletion = &Error{Code: "WaitingJobCompletion", Message: "error waiting for Job completion"} + ErrWatchingChannelCloseUnexpectedly = &Error{Code: "WatchingChannelCloseUnexpectedly", Message: "watch channel closed unexpectedly"} + ErrWatchingJob = &Error{Code: "WatchingJob", Message: "error watching Job"} + ErrContextCancelled = &Error{Code: "ContextCancelled", Message: "context cancelled"} +) diff --git a/pkg/builder/kaniko/git.go b/pkg/builder/kaniko/git.go new file mode 100644 index 0000000..c7d3298 --- /dev/null +++ b/pkg/builder/kaniko/git.go @@ -0,0 +1,53 @@ +package kaniko + +import ( + "regexp" + "strings" +) + +const ( + regexpGitRepoProtocol = `^(https?|git|ssh|ftp)://` + regexpGitRepoDotGit = `\.git$` + gitProtocol = "git://" +) + +type GitContext struct { + Repo string + Commit string + Username string + Password string +} + +func (g *GitContext) BuildContext() (string, error) { + bCtx := "" + + // cleaning the repo url + rgx, err := regexp.Compile(regexpGitRepoProtocol) + if err != nil { + return "", err + } + g.Repo = rgx.ReplaceAllString(g.Repo, "") + + rgx, err = regexp.Compile(regexpGitRepoDotGit) + if err != nil { + return "", err + } + g.Repo = rgx.ReplaceAllString(g.Repo, "") + g.Repo = strings.TrimSuffix(g.Repo, "/") + + bCtx += gitProtocol + if g.Username != "" { + bCtx += g.Username + if g.Password != "" { + bCtx += ":" + g.Password + } + bCtx += "@" + } + + bCtx += g.Repo + if g.Commit != "" { + bCtx += "#" + g.Commit + } + + return bCtx, nil +} diff --git a/pkg/builder/kaniko/kaniko.go b/pkg/builder/kaniko/kaniko.go new file mode 100644 index 0000000..5d07b96 --- /dev/null +++ b/pkg/builder/kaniko/kaniko.go @@ -0,0 +1,225 @@ +package kaniko + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "fmt" + + "github.com/celestiaorg/knuu/pkg/builder" + "github.com/celestiaorg/knuu/pkg/names" + batchv1 "k8s.io/api/batch/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +const ( + kanikoImage = "gcr.io/kaniko-project/executor:debug" // debug has a shell + kanikoContainerName = "kaniko-container" + kanikoJobNamePrefix = "kaniko-build-job" + + DefaultParallelism = int32(5) + DefaultBackoffLimit = int32(5) +) + +type Kaniko struct { + K8sClientset kubernetes.Interface + K8sNamespace string +} + +var _ builder.Builder = &Kaniko{} + +func (k *Kaniko) Build(ctx context.Context, b *builder.BuilderOptions) (logs string, err error) { + job, err := prepareJob(b) + if err != nil { + return "", ErrPreparingJob.Wrap(err) + } + + cJob, err := k.K8sClientset.BatchV1().Jobs(k.K8sNamespace).Create(ctx, job, metav1.CreateOptions{}) + if err != nil { + return "", ErrCreatingJob.Wrap(err) + } + + kJob, err := k.waitForJobCompletion(ctx, cJob) + if err != nil { + return "", ErrWaitingJobCompletion.Wrap(err) + } + + pod, err := k.firstPodFromJob(ctx, kJob) + if err != nil { + return "", ErrGettingPodFromJob.Wrap(err) + } + + logs, err = k.containerLogs(ctx, pod) + if err != nil { + return "", ErrGettingContainerLogs.Wrap(err) + } + + if err := k.cleanup(ctx, kJob); err != nil { + return "", ErrCleaningUp.Wrap(err) + } + + return logs, nil +} + +func (k *Kaniko) waitForJobCompletion(ctx context.Context, job *batchv1.Job) (*batchv1.Job, error) { + watcher, err := k.K8sClientset.BatchV1().Jobs(k.K8sNamespace).Watch(ctx, metav1.ListOptions{ + FieldSelector: fmt.Sprintf("metadata.name=%s", job.Name), + }) + if err != nil { + return nil, ErrWatchingJob.Wrap(err) + } + defer watcher.Stop() + + for { + select { + case event, ok := <-watcher.ResultChan(): + if !ok { + return nil, ErrWatchingChannelCloseUnexpectedly + } + + j, ok := event.Object.(*batchv1.Job) + if !ok { + continue + } + + if j.Status.Succeeded > 0 || j.Status.Failed > 0 { + // Job completed (successfully or failed) + return j, nil + } + case <-ctx.Done(): + return nil, ErrContextCancelled + } + } +} + +func (k *Kaniko) firstPodFromJob(ctx context.Context, job *batchv1.Job) (*v1.Pod, error) { + podList, err := k.K8sClientset.CoreV1().Pods(k.K8sNamespace).List(ctx, metav1.ListOptions{ + LabelSelector: fmt.Sprintf("job-name=%s", job.Name), + }) + if err != nil { + return nil, ErrListingPods.Wrap(err) + } + + if len(podList.Items) == 0 { + return nil, ErrNoPodsFound.Wrap(fmt.Errorf("job: %s", job.Name)) + } + + return &podList.Items[0], nil +} + +func (k *Kaniko) containerLogs(ctx context.Context, pod *v1.Pod) (string, error) { + if len(pod.Spec.Containers) == 0 { + return "", ErrNoContainersFound.Wrap(fmt.Errorf("pod: %s", pod.Name)) + } + + containerName := pod.Spec.Containers[0].Name + + logOptions := v1.PodLogOptions{ + Container: containerName, + } + + req := k.K8sClientset.CoreV1().Pods(k.K8sNamespace).GetLogs(pod.Name, &logOptions) + logs, err := req.DoRaw(ctx) + if err != nil { + return "", err + } + + return string(logs), nil +} + +func (k *Kaniko) cleanup(ctx context.Context, job *batchv1.Job) error { + err := k.K8sClientset.BatchV1().Jobs(k.K8sNamespace). + Delete(ctx, job.Name, metav1.DeleteOptions{ + PropagationPolicy: &[]metav1.DeletionPropagation{metav1.DeletePropagationBackground}[0], + }) + if err != nil { + return ErrDeletingJob.Wrap(err) + } + + // Delete the associated Pods + err = k.K8sClientset.CoreV1().Pods(k.K8sNamespace). + DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{ + LabelSelector: fmt.Sprintf("job-name=%s", job.Name), + }) + if err != nil { + return ErrDeletingPods.Wrap(err) + } + + return nil +} + +func DefaultCacheOptions(buildContext string) (*builder.CacheOptions, error) { + if buildContext == "" { + return nil, ErrBuildContextEmpty + } + hash := sha256.New() + _, err := hash.Write([]byte(buildContext)) + if err != nil { + return nil, err + } + hashStr := hex.EncodeToString(hash.Sum(nil)) + + return &builder.CacheOptions{ + Enabled: true, + Dir: "", + Repo: fmt.Sprintf("ttl.sh/%s:24h", hashStr), + }, nil +} + +func prepareJob(b *builder.BuilderOptions) (*batchv1.Job, error) { + jobName, err := names.NewRandomK8(kanikoJobNamePrefix) + if err != nil { + return nil, ErrGeneratingUUID.Wrap(err) + } + + parallelism := DefaultParallelism + backoffLimit := DefaultBackoffLimit + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: jobName, + }, + Spec: batchv1.JobSpec{ + Parallelism: ¶llelism, // Set parallelism to 1 to ensure only one Pod + BackoffLimit: &backoffLimit, // Retry the Job at most 5 times + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: kanikoContainerName, + Image: kanikoImage, // debug has a shell + Args: []string{ + `--context=` + b.BuildContext, + // TODO: see if we need it or not + // --git gitoptions Branch to clone if build context is a git repository (default branch=,single-branch=false,recurse-submodules=false) + + // TODO: we might need to add some options to get the auth token for the registry + "--destination=" + b.Destination, + }, + }, + }, + RestartPolicy: "Never", // Ensure that the Pod does not restart + }, + }, + }, + } + + // TODO: we need to add some configs to get the auth token for the cache repo + if b.Cache != nil && b.Cache.Enabled { + cacheArgs := []string{"--cache=true"} + if b.Cache.Dir != "" { + cacheArgs = append(cacheArgs, "--cache-dir="+b.Cache.Dir) + } + if b.Cache.Repo != "" { + cacheArgs = append(cacheArgs, "--cache-repo="+b.Cache.Repo) + } + job.Spec.Template.Spec.Containers[0].Args = append(job.Spec.Template.Spec.Containers[0].Args, cacheArgs...) + } + + // Add extra args + job.Spec.Template.Spec.Containers[0].Args = append(job.Spec.Template.Spec.Containers[0].Args, b.Args...) + + return job, nil + +} diff --git a/pkg/builder/kaniko/kaniko_test.go b/pkg/builder/kaniko/kaniko_test.go new file mode 100644 index 0000000..bf4fc7a --- /dev/null +++ b/pkg/builder/kaniko/kaniko_test.go @@ -0,0 +1,160 @@ +package kaniko + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/celestiaorg/knuu/pkg/builder" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + batchv1 "k8s.io/api/batch/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" +) + +const ( + k8sNamespace = "test-namespace" +) + +func TestKanikoBuilder(t *testing.T) { + k8sCS := fake.NewSimpleClientset() + kb := &Kaniko{ + K8sClientset: k8sCS, + K8sNamespace: k8sNamespace, + } + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + t.Run("BuildSuccess", func(t *testing.T) { + blCtx := "git://github.com/mojtaba-esk/sample-docker" + cacheOpts, err := DefaultCacheOptions(blCtx) + require.NoError(t, err, "GetDefaultCacheOptions should succeed") + + buildOptions := &builder.BuilderOptions{ + ImageName: "test-image", + BuildContext: blCtx, + Destination: "registry.example.com/test-image:latest", + Args: []string{"--build-arg=value"}, + Cache: cacheOpts, + } + + var ( + logs string + wg = &sync.WaitGroup{} + ) + go func() { + wg.Add(1) + defer wg.Done() + logs, err = kb.Build(context.Background(), buildOptions) + }() + + // Simulate the successful completion of the Job after a short delay + time.Sleep(2 * time.Second) + completeAllJobInFakeClientset(t, k8sCS, k8sNamespace) + + wg.Wait() + + assert.NoError(t, err, "Build should succeed") + assert.NotEmpty(t, logs, "Build logs should not be empty") + }) + + t.Run("BuildFailure", func(t *testing.T) { + buildOptions := &builder.BuilderOptions{ + ImageName: "test-image", + BuildContext: "invalid-context", // Simulate an invalid context + Destination: "registry.example.com/test-image:latest", + } + + logs, err := kb.Build(ctx, buildOptions) + + assert.Error(t, err, "Build should fail") + assert.Empty(t, logs, "Build logs should be empty") + }) + + t.Run("BuildWithContextCancellation", func(t *testing.T) { + buildOptions := &builder.BuilderOptions{ + ImageName: "test-image", + BuildContext: "git://example.com/repo", + Destination: "registry.example.com/test-image:latest", + } + + // Cancel the context to simulate cancellation during the build + cancel() + + logs, err := kb.Build(ctx, buildOptions) + + assert.Error(t, err, "Build should fail due to context cancellation") + assert.Empty(t, logs, "Build logs should be empty") + }) + +} + +func completeAllJobInFakeClientset(t *testing.T, clientset *fake.Clientset, namespace string) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + job, err := clientset.BatchV1().Jobs(namespace).List(ctx, metav1.ListOptions{}) + assert.NoError(t, err) + + for _, j := range job.Items { + j.Status.Succeeded = 1 + _, err = clientset.BatchV1().Jobs(namespace).Update(ctx, &j, metav1.UpdateOptions{}) + require.NoError(t, err) + + // Create a Pod with the same name as the Job + pod := createPodFromJob(&j) + _, err = clientset.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{}) + require.NoError(t, err) + } +} + +func createPodFromJob(job *batchv1.Job) *v1.Pod { + return &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: job.Name, + Namespace: job.Namespace, + Labels: map[string]string{ + "job-name": job.Name, + }, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "fake-container", // Adjust as needed + Image: "fake-image", // Adjust as needed + }, + }, + }, + } +} + +func TestGetDefaultCacheOptions(t *testing.T) { + t.Parallel() + + tests := []struct { + buildContext string + expectedRepo string + expectedError bool + }{ + {"git://example.com/repo", "ttl.sh/fd46c51aa5aff87d0f8a329fc578ffcb3b43f8db8aff920d0d01429e15eb9850:24h", false}, + {"", "", true}, + } + + for _, test := range tests { + t.Run(test.buildContext, func(t *testing.T) { + cacheOptions, err := DefaultCacheOptions(test.buildContext) + + if test.expectedError { + assert.Error(t, err, "Expected an error, but got none") + assert.Nil(t, cacheOptions, "Cache options should be nil on error") + } else { + assert.NoError(t, err, "Unexpected error") + assert.NotNil(t, cacheOptions, "Cache options should not be nil") + assert.Equal(t, test.expectedRepo, cacheOptions.Repo, "Unexpected cache repo value") + } + }) + } +} diff --git a/pkg/names/names.go b/pkg/names/names.go new file mode 100644 index 0000000..7b63f7d --- /dev/null +++ b/pkg/names/names.go @@ -0,0 +1,16 @@ +package names + +import ( + "fmt" + + "github.com/google/uuid" +) + +// NewRandomK8 returns a random k8s compatible name with the given prefix. +func NewRandomK8(prefix string) (string, error) { + uuid, err := uuid.NewRandom() + if err != nil { + return "", err + } + return fmt.Sprintf("%s-%s", prefix, uuid.String()[:8]), nil +}