Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fixed intermittent testcase failures #275

Merged
merged 1 commit into from
Mar 15, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -51,12 +51,15 @@ spec:
displayName: Server Image
path: image
- description: K8sNodeBlockList is a list of Kubernetes nodes which are not
used for Aerospike pods.
used for Aerospike pods. Pods are not scheduled on these nodes. Pods are
migrated from these nodes if already present. This is useful for the maintenance
of Kubernetes nodes.
displayName: Kubernetes Node BlockList
path: k8sNodeBlockList
- description: MaxUnavailable is the percentage/number of pods that can be allowed
to go down or unavailable before application disruption. This value is used
to create PodDisruptionBudget. Defaults to 1.
to create PodDisruptionBudget. Defaults to 1. Refer Aerospike documentation
for more details.
displayName: Max Unavailable
path: maxUnavailable
- description: Certificates to connect to Aerospike.
Expand Down
20 changes: 18 additions & 2 deletions test/cluster_helper.go
Original file line number Diff line number Diff line change
Expand Up @@ -761,7 +761,7 @@ func deployClusterWithTO(
// Wait for aerocluster to reach the desired cluster size.
return waitForAerospikeCluster(
k8sClient, ctx, aeroCluster, int(aeroCluster.Spec.Size), retryInterval,
timeout,
timeout, []asdbv1.AerospikeClusterPhase{asdbv1.AerospikeClusterCompleted},
)
}

Expand All @@ -780,6 +780,21 @@ func updateCluster(
return updateClusterWithTO(k8sClient, ctx, aeroCluster, getTimeout(aeroCluster.Spec.Size))
}

func updateClusterWithExpectedPhases(
k8sClient client.Client, ctx goctx.Context,
aeroCluster *asdbv1.AerospikeCluster, expectedPhases []asdbv1.AerospikeClusterPhase,
) error {
err := k8sClient.Update(ctx, aeroCluster)
if err != nil {
return err
}

return waitForAerospikeCluster(
k8sClient, ctx, aeroCluster, int(aeroCluster.Spec.Size), retryInterval,
getTimeout(aeroCluster.Spec.Size), expectedPhases,
)
}

func updateClusterWithTO(
k8sClient client.Client, ctx goctx.Context,
aeroCluster *asdbv1.AerospikeCluster, timeout time.Duration,
Expand All @@ -791,7 +806,7 @@ func updateClusterWithTO(

return waitForAerospikeCluster(
k8sClient, ctx, aeroCluster, int(aeroCluster.Spec.Size), retryInterval,
timeout,
timeout, []asdbv1.AerospikeClusterPhase{asdbv1.AerospikeClusterCompleted},
)
}

Expand Down Expand Up @@ -1442,6 +1457,7 @@ func aerospikeClusterCreateUpdateWithTO(

return waitForAerospikeCluster(
k8sClient, ctx, desired, int(desired.Spec.Size), retryInterval, timeout,
[]asdbv1.AerospikeClusterPhase{asdbv1.AerospikeClusterCompleted},
)
}

Expand Down
24 changes: 16 additions & 8 deletions test/cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ func ScaleDownWithMigrateFillDelay(ctx goctx.Context) {

err = waitForAerospikeCluster(
k8sClient, ctx, aeroCluster, int(aeroCluster.Spec.Size), retryInterval,
getTimeout(2),
getTimeout(2), []asdbv1.AerospikeClusterPhase{asdbv1.AerospikeClusterCompleted},
)
Expect(err).ToNot(HaveOccurred())

Expand All @@ -134,10 +134,12 @@ func ScaleDownWithMigrateFillDelay(ctx goctx.Context) {

func clusterWithMaxIgnorablePod(ctx goctx.Context) {
var (
aeroCluster *asdbv1.AerospikeCluster
err error
nodeList = &v1.NodeList{}
podList = &v1.PodList{}
aeroCluster *asdbv1.AerospikeCluster
err error
nodeList = &v1.NodeList{}
podList = &v1.PodList{}
expectedPhases = []asdbv1.AerospikeClusterPhase{
asdbv1.AerospikeClusterInProgress, asdbv1.AerospikeClusterCompleted}
)

clusterNamespacedName := getNamespacedName(
Expand Down Expand Up @@ -184,15 +186,19 @@ func clusterWithMaxIgnorablePod(ctx goctx.Context) {
aeroCluster.Spec.AerospikeConfig.Value["service"].(map[string]interface{})["proto-fd-max"] =
int64(18000)

return updateCluster(k8sClient, ctx, aeroCluster)
// As pod is in pending state, CR object will be won't reach the final phase.
// So expectedPhases can be InProgress or Completed
return updateClusterWithExpectedPhases(k8sClient, ctx, aeroCluster, expectedPhases)
}, 1*time.Minute).ShouldNot(HaveOccurred())

By("Upgrade version")
aeroCluster, err = getCluster(k8sClient, ctx, clusterNamespacedName)
Expect(err).ToNot(HaveOccurred())
newImage := baseImage + ":7.0.0.0_2"
aeroCluster.Spec.Image = newImage
err = updateCluster(k8sClient, ctx, aeroCluster)
// As pod is in pending state, CR object will be won't reach the final phase.
// So expectedPhases can be InProgress or Completed
err = updateClusterWithExpectedPhases(k8sClient, ctx, aeroCluster, expectedPhases)
Expect(err).ToNot(HaveOccurred())

By("Verify pending pod")
Expand All @@ -212,7 +218,9 @@ func clusterWithMaxIgnorablePod(ctx goctx.Context) {
aeroCluster, err = getCluster(k8sClient, ctx, clusterNamespacedName)
Expect(err).ToNot(HaveOccurred())
aeroCluster.Spec.Size--
err = updateCluster(k8sClient, ctx, aeroCluster)
// As pod is in pending state, CR object will be won't reach the final phase.
// So expectedPhases can be InProgress or Completed
err = updateClusterWithExpectedPhases(k8sClient, ctx, aeroCluster, expectedPhases)
Expect(err).ToNot(HaveOccurred())

By("Verify if all pods are running")
Expand Down
9 changes: 6 additions & 3 deletions test/large_reconcile_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -319,7 +319,8 @@ func waitForClusterScaleDown(
return false, err
}

return isClusterStateValid(aeroCluster, newCluster, replicas), nil
return isClusterStateValid(aeroCluster, newCluster, replicas,
[]asdbv1.AerospikeClusterPhase{asdbv1.AerospikeClusterCompleted}), nil
},
)
if err != nil {
Expand Down Expand Up @@ -360,7 +361,8 @@ func waitForClusterRollingRestart(
return false, err
}

return isClusterStateValid(aeroCluster, newCluster, replicas), nil
return isClusterStateValid(aeroCluster, newCluster, replicas,
[]asdbv1.AerospikeClusterPhase{asdbv1.AerospikeClusterCompleted}), nil
},
)
if err != nil {
Expand Down Expand Up @@ -401,7 +403,8 @@ func waitForClusterUpgrade(
return false, err
}

return isClusterStateValid(aeroCluster, newCluster, replicas), nil
return isClusterStateValid(aeroCluster, newCluster, replicas,
[]asdbv1.AerospikeClusterPhase{asdbv1.AerospikeClusterCompleted}), nil
},
)
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion test/sample_files_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,7 @@ func deployClusterUsingFile(ctx context.Context, filePath string) (*asdbv1.Aeros

if err := waitForAerospikeCluster(
k8sClient, ctx, aeroCluster, int(aeroCluster.Spec.Size), retryInterval,
getTimeout(aeroCluster.Spec.Size),
getTimeout(aeroCluster.Spec.Size), []asdbv1.AerospikeClusterPhase{asdbv1.AerospikeClusterCompleted},
); err != nil {
return aeroCluster, err
}
Expand Down
18 changes: 11 additions & 7 deletions test/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ import (
"strings"
"time"

set "github.com/deckarep/golang-set/v2"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
Expand Down Expand Up @@ -236,7 +237,7 @@ func getLabels() map[string]string {
func waitForAerospikeCluster(
k8sClient client.Client, ctx goctx.Context,
aeroCluster *asdbv1.AerospikeCluster, replicas int,
retryInterval, timeout time.Duration,
retryInterval, timeout time.Duration, expectedPhases []asdbv1.AerospikeClusterPhase,
) error {
var isValid bool

Expand All @@ -260,7 +261,7 @@ func waitForAerospikeCluster(
return false, err
}

isValid = isClusterStateValid(aeroCluster, newCluster, replicas)
isValid = isClusterStateValid(aeroCluster, newCluster, replicas, expectedPhases)
return isValid, nil
},
)
Expand All @@ -277,7 +278,7 @@ func waitForAerospikeCluster(

func isClusterStateValid(
aeroCluster *asdbv1.AerospikeCluster,
newCluster *asdbv1.AerospikeCluster, replicas int,
newCluster *asdbv1.AerospikeCluster, replicas int, expectedPhases []asdbv1.AerospikeClusterPhase,
) bool {
if int(newCluster.Status.Size) != replicas {
pkgLog.Info("Cluster size is not correct")
Expand Down Expand Up @@ -315,8 +316,9 @@ func isClusterStateValid(
}

pkgLog.Info(
"Cluster pod's image %s not same as spec %s", newCluster.Status.Pods[podName].Image,
aeroCluster.Spec.Image,
fmt.Sprintf("Cluster pod's image %s not same as spec %s", newCluster.Status.Pods[podName].Image,
aeroCluster.Spec.Image,
),
)
}

Expand All @@ -325,8 +327,10 @@ func isClusterStateValid(
return false
}

if newCluster.Status.Phase != asdbv1.AerospikeClusterCompleted {
pkgLog.Info("Cluster phase is not set to Completed")
// Validate phase
phaseSet := set.NewSet(expectedPhases...)
if !phaseSet.Contains(newCluster.Status.Phase) {
pkgLog.Info("Cluster phase is not correct")
return false
}

Expand Down
Loading