diff --git a/levant/deploy.go b/levant/deploy.go index 01b0c33eb..b59cd3331 100644 --- a/levant/deploy.go +++ b/levant/deploy.go @@ -155,16 +155,7 @@ func (l *levantDeployment) deploy() (success bool) { switch *l.config.Template.Job.Type { case nomad.JobTypeService: - - // If the service job doesn't have an update stanza, the job will not use - // Nomad deployments. - if l.config.Template.Job.Update == nil { - log.Info().Msg("levant/deploy: job is not configured with update stanza, consider adding to use deployments") - return l.jobStatusChecker(&eval.EvalID) - } - log.Info().Msgf("levant/deploy: beginning deployment watcher for job") - // Get the deploymentID from the evaluationID so that we can watch the // deployment for end status. depID, err := l.getDeploymentID(eval.EvalID) @@ -173,6 +164,12 @@ func (l *levantDeployment) deploy() (success bool) { return } + if depID == "" { + log.Info().Msgf("levant/deploy: no deploy ID found for evaluation %s", eval.EvalID) + return l.jobStatusChecker(&eval.EvalID) + } + + log.Info().Msgf("levant/deploy: watching deployment %s for job", depID) // Get the success of the deployment and return if we have success. if success = l.deploymentWatcher(depID); success { return @@ -184,15 +181,17 @@ func (l *levantDeployment) deploy() (success bool) { return } - // If the job is not a canary job, then run the auto-revert checker, the - // current checking mechanism is slightly hacky and should be updated. - // The reason for this is currently the config.Job is populate from the - // rendered job and so a user could potentially not set canary meaning - // the field shows a null. - if l.config.Template.Job.Update.Canary == nil { - l.checkAutoRevert(dep) - } else if *l.config.Template.Job.Update.Canary == 0 { - l.checkAutoRevert(dep) + if l.config.Template.Job.Update != nil { + // If the job is not a canary job, then run the auto-revert checker, the + // current checking mechanism is slightly hacky and should be updated. + // The reason for this is currently the config.Job is populates from the + // rendered job and so a user could potentially not set canary meaning + // the field shows a null. + if l.config.Template.Job.Update.Canary == nil { + l.checkAutoRevert(dep) + } else if *l.config.Template.Job.Update.Canary == 0 { + l.checkAutoRevert(dep) + } } case nomad.JobTypeBatch: diff --git a/test/deploy_test.go b/test/deploy_test.go index f30e0e499..e2f031f4c 100644 --- a/test/deploy_test.go +++ b/test/deploy_test.go @@ -135,6 +135,21 @@ func TestDeploy_canary(t *testing.T) { }) } +func TestDeploy_failed_deploy_with_no_update(t *testing.T) { + acctest.Test(t, acctest.TestCase{ + Steps: []acctest.TestStep{ + { + Runner: acctest.DeployTestStepRunner{ + FixtureName: "deploy_fail_with_no_update.nomad", + }, + ExpectErr: true, + Check: acctest.CheckDeploymentStatus("failed"), + }, + }, + CleanupFunc: acctest.CleanupPurgeJob, + }) +} + func TestDeploy_lifecycle(t *testing.T) { acctest.Test(t, acctest.TestCase{ Steps: []acctest.TestStep{ diff --git a/test/fixtures/deploy_fail_with_no_update.nomad b/test/fixtures/deploy_fail_with_no_update.nomad new file mode 100644 index 000000000..6b4a01a33 --- /dev/null +++ b/test/fixtures/deploy_fail_with_no_update.nomad @@ -0,0 +1,58 @@ +job "[[.job_name]]" { + datacenters = ["dc1"] + type = "service" + + group "test" { + count = 1 + + restart { + attempts = 1 + interval = "5s" + delay = "1s" + mode = "fail" + } + + ephemeral_disk { + size = 300 + } + + update { + max_parallel = 1 + min_healthy_time = "10s" + healthy_deadline = "1m" + } + + network { + port "http" { + to = 80 + } + } + + service { + name = "fake-service" + port = "http" + + check { + name = "alive" + type = "tcp" + interval = "10s" + timeout = "2s" + } + } + + task "alpine" { + driver = "docker" + config { + image = "alpine" + command = "sleep 1 && exit 1" + } + resources { + cpu = 100 + memory = 20 + network { + mbits = 10 + } + } + } + } +}