Skip to content

Commit

Permalink
Merge branch 'main' into dependabot/go_modules/testing/e2e/skr/provis…
Browse files Browse the repository at this point in the history
…ioning-service-test/golang.org/x/net-0.33.0
  • Loading branch information
jaroslaw-pieszka authored Jan 23, 2025
2 parents 7bb9408 + 87cc226 commit c8a5698
Show file tree
Hide file tree
Showing 14 changed files with 294 additions and 58 deletions.
17 changes: 16 additions & 1 deletion common/runtime/model.go
Original file line number Diff line number Diff line change
Expand Up @@ -391,13 +391,28 @@ type ModuleDTO struct {
type AdditionalWorkerNodePool struct {
Name string `json:"name"`
MachineType string `json:"machineType"`
HAZones bool `json:"haZones"`
AutoScalerMin int `json:"autoScalerMin"`
AutoScalerMax int `json:"autoScalerMax"`
}

func (a AdditionalWorkerNodePool) Validate() error {
if a.AutoScalerMin > a.AutoScalerMax {
return fmt.Errorf("AutoScalerMax %v should be larger than AutoScalerMin %v for %s worker node pool", a.AutoScalerMax, a.AutoScalerMin, a.Name)
return fmt.Errorf("AutoScalerMax %v should be larger than AutoScalerMin %v for %s additional worker node pool", a.AutoScalerMax, a.AutoScalerMin, a.Name)
}
if a.HAZones && a.AutoScalerMin < 3 {
return fmt.Errorf("AutoScalerMin %v should be at least 3 when HA zones are enabled for %s additional worker node pool", a.AutoScalerMin, a.Name)
}
return nil
}

func (a AdditionalWorkerNodePool) ValidateDisablingHAZones(currentAdditionalWorkerNodePools []AdditionalWorkerNodePool) error {
for _, currentAdditionalWorkerNodePool := range currentAdditionalWorkerNodePools {
if a.Name == currentAdditionalWorkerNodePool.Name {
if !a.HAZones && currentAdditionalWorkerNodePool.HAZones {
return fmt.Errorf("HA zones cannot be disabled for %s additional worker node pool", a.Name)
}
}
}
return nil
}
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ require (
github.com/Azure/go-autorest/autorest/adal v0.9.24
github.com/Masterminds/sprig v2.22.0+incompatible
github.com/dlmiddlecote/sqlstats v1.0.2
github.com/docker/docker v27.5.0+incompatible
github.com/docker/docker v27.5.1+incompatible
github.com/docker/go-connections v0.5.0
github.com/gardener/gardener v1.110.4
github.com/go-co-op/gocron v1.37.0
Expand Down
4 changes: 2 additions & 2 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -87,8 +87,8 @@ github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5Qvfr
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/dlmiddlecote/sqlstats v1.0.2 h1:gSU11YN23D/iY50A2zVYwgXgy072khatTsIW6UPjUtI=
github.com/dlmiddlecote/sqlstats v1.0.2/go.mod h1:0CWaIh/Th+z2aI6Q9Jpfg/o21zmGxWhbByHgQSCUQvY=
github.com/docker/docker v27.5.0+incompatible h1:um++2NcQtGRTz5eEgO6aJimo6/JxrTXC941hd05JO6U=
github.com/docker/docker v27.5.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v27.5.1+incompatible h1:4PYU5dnBYqRQi0294d1FBECqT9ECWeQAIfE8q4YnPY8=
github.com/docker/docker v27.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
Expand Down
14 changes: 14 additions & 0 deletions internal/broker/instance_create.go
Original file line number Diff line number Diff line change
Expand Up @@ -305,6 +305,9 @@ func (b *ProvisionEndpoint) validateAndExtract(details domain.ProvisionDetails,
if !supportsAdditionalWorkerNodePools(details.PlanID) {
return ersContext, parameters, fmt.Errorf("additional worker node pools are not supported for plan ID: %s", details.PlanID)
}
if !AreNamesUnique(parameters.AdditionalWorkerNodePools) {
return ersContext, parameters, fmt.Errorf("names of additional worker node pools must be unique")
}
for _, additionalWorkerNodePool := range parameters.AdditionalWorkerNodePools {
if err := additionalWorkerNodePool.Validate(); err != nil {
return ersContext, parameters, apiresponses.NewFailureResponse(err, http.StatusUnprocessableEntity, err.Error())
Expand Down Expand Up @@ -412,6 +415,17 @@ func supportsAdditionalWorkerNodePools(planID string) bool {
return false
}

func AreNamesUnique(pools []pkg.AdditionalWorkerNodePool) bool {
nameSet := make(map[string]struct{})
for _, pool := range pools {
if _, exists := nameSet[pool.Name]; exists {
return false
}
nameSet[pool.Name] = struct{}{}
}
return true
}

// Rudimentary kubeconfig validation
func validateKubeconfig(kubeconfig string) error {
config, err := clientcmd.Load([]byte(kubeconfig))
Expand Down
75 changes: 64 additions & 11 deletions internal/broker/instance_create_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1499,47 +1499,55 @@ func TestAdditionalWorkerNodePools(t *testing.T) {
expectedError bool
}{
"Valid additional worker node pools": {
additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "autoScalerMin": 3, "autoScalerMax": 20}, {"name": "name-2", "machineType": "m6i.large", "autoScalerMin": 3, "autoScalerMax": 20}]`,
additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "haZones": true, "autoScalerMin": 3, "autoScalerMax": 20}, {"name": "name-2", "machineType": "m6i.large", "haZones": false, "autoScalerMin": 1, "autoScalerMax": 20}]`,
expectedError: false,
},
"Empty additional worker node pools": {
additionalWorkerNodePools: `[]`,
expectedError: false,
},
"Empty name": {
additionalWorkerNodePools: `[{"name": "", "machineType": "m6i.large", "autoScalerMin": 3, "autoScalerMax": 20}]`,
additionalWorkerNodePools: `[{"name": "", "machineType": "m6i.large", "haZones": true, "autoScalerMin": 3, "autoScalerMax": 20}]`,
expectedError: true,
},
"Missing name": {
additionalWorkerNodePools: `[{"machineType": "m6i.large", "autoScalerMin": 3, "autoScalerMax": 20}]`,
additionalWorkerNodePools: `[{"machineType": "m6i.large", "haZones": true, "autoScalerMin": 3, "autoScalerMax": 20}]`,
expectedError: true,
},
"Not unique names": {
additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "haZones": true, "autoScalerMin": 3, "autoScalerMax": 20}, {"name": "name-1", "machineType": "m6i.large", "haZones": true, "autoScalerMin": 3, "autoScalerMax": 20}]`,
expectedError: true,
},
"Empty machine type": {
additionalWorkerNodePools: `[{"name": "name-1", "machineType": "", "autoScalerMin": 3, "autoScalerMax": 20}]`,
additionalWorkerNodePools: `[{"name": "name-1", "machineType": "", "haZones": true, "autoScalerMin": 3, "autoScalerMax": 20}]`,
expectedError: true,
},
"Missing machine type": {
additionalWorkerNodePools: `[{"name": "name-1", "autoScalerMin": 3, "autoScalerMax": 20}]`,
additionalWorkerNodePools: `[{"name": "name-1", "haZones": true, "autoScalerMin": 3, "autoScalerMax": 20}]`,
expectedError: true,
},
"Missing HA zones": {
additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "autoScalerMin": 3, "autoScalerMax": 20}]`,
expectedError: true,
},
"Missing autoScalerMin": {
additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "autoScalerMax": 3}]`,
additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "haZones": true, "autoScalerMax": 3}]`,
expectedError: true,
},
"Missing autoScalerMax": {
additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "autoScalerMin": 20}]`,
additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "haZones": true, "autoScalerMin": 20}]`,
expectedError: true,
},
"AutoScalerMin smaller than 3": {
additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "autoScalerMin": 2, "autoScalerMax": 300}]`,
"AutoScalerMin smaller than 3 when HA zones are enabled": {
additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "haZones": true, "autoScalerMin": 2, "autoScalerMax": 300}]`,
expectedError: true,
},
"AutoScalerMax bigger than 300": {
additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "autoScalerMin": 3, "autoScalerMax": 301}]`,
additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "haZones": true, "autoScalerMin": 3, "autoScalerMax": 301}]`,
expectedError: true,
},
"AutoScalerMin bigger than autoScalerMax": {
additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "autoScalerMin": 20, "autoScalerMax": 3}]`,
additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "haZones": true, "autoScalerMin": 20, "autoScalerMax": 3}]`,
expectedError: true,
},
} {
Expand Down Expand Up @@ -1685,6 +1693,51 @@ func TestAdditionalWorkerNodePoolsForUnsupportedPlans(t *testing.T) {
}
}

func TestAreNamesUnique(t *testing.T) {
tests := []struct {
name string
pools []pkg.AdditionalWorkerNodePool
expected bool
}{
{
name: "Unique names",
pools: []pkg.AdditionalWorkerNodePool{
{Name: "name-1", MachineType: "m6i.large", HAZones: true, AutoScalerMin: 5, AutoScalerMax: 5},
{Name: "name-2", MachineType: "m6i.large", HAZones: false, AutoScalerMin: 2, AutoScalerMax: 10},
{Name: "name-3", MachineType: "m6i.large", HAZones: true, AutoScalerMin: 3, AutoScalerMax: 15},
},
expected: true,
},
{
name: "Duplicate names",
pools: []pkg.AdditionalWorkerNodePool{
{Name: "name-1", MachineType: "m6i.large", HAZones: true, AutoScalerMin: 5, AutoScalerMax: 5},
{Name: "name-2", MachineType: "m6i.large", HAZones: false, AutoScalerMin: 2, AutoScalerMax: 10},
{Name: "name-1", MachineType: "m6i.large", HAZones: true, AutoScalerMin: 3, AutoScalerMax: 5},
},
expected: false,
},
{
name: "Empty list",
pools: []pkg.AdditionalWorkerNodePool{},
expected: true,
},
{
name: "Single pool",
pools: []pkg.AdditionalWorkerNodePool{
{Name: "name-1", MachineType: "m6i.large", HAZones: false, AutoScalerMin: 1, AutoScalerMax: 5},
},
expected: true,
},
}

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equal(t, tt.expected, broker.AreNamesUnique(tt.pools))
})
}
}

func TestNetworkingValidation(t *testing.T) {
for tn, tc := range map[string]struct {
givenNetworking string
Expand Down
6 changes: 6 additions & 0 deletions internal/broker/instance_update.go
Original file line number Diff line number Diff line change
Expand Up @@ -268,10 +268,16 @@ func (b *UpdateEndpoint) processUpdateParameters(instance *internal.Instance, de
if !supportsAdditionalWorkerNodePools(details.PlanID) {
return domain.UpdateServiceSpec{}, fmt.Errorf("additional worker node pools are not supported for plan ID: %s", details.PlanID)
}
if !AreNamesUnique(params.AdditionalWorkerNodePools) {
return domain.UpdateServiceSpec{}, fmt.Errorf("names of additional worker node pools must be unique")
}
for _, additionalWorkerNodePool := range params.AdditionalWorkerNodePools {
if err := additionalWorkerNodePool.Validate(); err != nil {
return domain.UpdateServiceSpec{}, apiresponses.NewFailureResponse(err, http.StatusBadRequest, err.Error())
}
if err := additionalWorkerNodePool.ValidateDisablingHAZones(instance.Parameters.Parameters.AdditionalWorkerNodePools); err != nil {
return domain.UpdateServiceSpec{}, apiresponses.NewFailureResponse(err, http.StatusBadRequest, err.Error())
}
}
}

Expand Down
118 changes: 107 additions & 11 deletions internal/broker/instance_update_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -599,47 +599,55 @@ func TestUpdateAdditionalWorkerNodePools(t *testing.T) {
expectedError bool
}{
"Valid additional worker node pools": {
additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "autoScalerMin": 3, "autoScalerMax": 20}, {"name": "name-2", "machineType": "m6i.large", "autoScalerMin": 3, "autoScalerMax": 20}]`,
additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "haZones": true, "autoScalerMin": 3, "autoScalerMax": 20}, {"name": "name-2", "machineType": "m6i.large", "haZones": false, "autoScalerMin": 1, "autoScalerMax": 20}]`,
expectedError: false,
},
"Empty additional worker node pools": {
additionalWorkerNodePools: `[]`,
expectedError: false,
},
"Empty name": {
additionalWorkerNodePools: `[{"name": "", "machineType": "m6i.large", "autoScalerMin": 3, "autoScalerMax": 20}]`,
additionalWorkerNodePools: `[{"name": "", "machineType": "m6i.large", "haZones": true, "autoScalerMin": 3, "autoScalerMax": 20}]`,
expectedError: true,
},
"Missing name": {
additionalWorkerNodePools: `[{"machineType": "m6i.large", "autoScalerMin": 3, "autoScalerMax": 20}]`,
additionalWorkerNodePools: `[{"machineType": "m6i.large", "haZones": true, "autoScalerMin": 3, "autoScalerMax": 20}]`,
expectedError: true,
},
"Not unique names": {
additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "haZones": true, "autoScalerMin": 3, "autoScalerMax": 20}, {"name": "name-1", "machineType": "m6i.large", "haZones": true, "autoScalerMin": 3, "autoScalerMax": 20}]`,
expectedError: true,
},
"Empty machine type": {
additionalWorkerNodePools: `[{"name": "name-1", "machineType": "", "autoScalerMin": 3, "autoScalerMax": 20}]`,
additionalWorkerNodePools: `[{"name": "name-1", "machineType": "", "haZones": true, "autoScalerMin": 3, "autoScalerMax": 20}]`,
expectedError: true,
},
"Missing machine type": {
additionalWorkerNodePools: `[{"name": "name-1", "autoScalerMin": 3, "autoScalerMax": 20}]`,
additionalWorkerNodePools: `[{"name": "name-1", "haZones": true, "autoScalerMin": 3, "autoScalerMax": 20}]`,
expectedError: true,
},
"Missing HA zones": {
additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "autoScalerMin": 3, "autoScalerMax": 20}]`,
expectedError: true,
},
"Missing autoScalerMin": {
additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "autoScalerMax": 3}]`,
additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "haZones": true, "autoScalerMax": 3}]`,
expectedError: true,
},
"Missing autoScalerMax": {
additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "autoScalerMin": 20}]`,
additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "haZones": true, "autoScalerMin": 20}]`,
expectedError: true,
},
"AutoScalerMin smaller than 3": {
additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "autoScalerMin": 2, "autoScalerMax": 300}]`,
"AutoScalerMin smaller than 3 when HA zones are enabled": {
additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "haZones": true, "autoScalerMin": 2, "autoScalerMax": 300}]`,
expectedError: true,
},
"AutoScalerMax bigger than 300": {
additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "autoScalerMin": 3, "autoScalerMax": 301}]`,
additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "haZones": true, "autoScalerMin": 3, "autoScalerMax": 301}]`,
expectedError: true,
},
"AutoScalerMin bigger than autoScalerMax": {
additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "autoScalerMin": 20, "autoScalerMax": 3}]`,
additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "haZones": true, "autoScalerMin": 20, "autoScalerMax": 3}]`,
expectedError: true,
},
} {
Expand Down Expand Up @@ -679,6 +687,94 @@ func TestUpdateAdditionalWorkerNodePools(t *testing.T) {
}
}

func TestHAZones(t *testing.T) {
t.Run("should fail when attempting to disable HA zones for additional worker node pool", func(t *testing.T) {
// given
instance := fixture.FixInstance(instanceID)
instance.ServicePlanID = PreviewPlanID
instance.Parameters.Parameters.AdditionalWorkerNodePools = []pkg.AdditionalWorkerNodePool{
{
Name: "name-1",
MachineType: "m6i.large",
HAZones: true,
AutoScalerMin: 3,
AutoScalerMax: 20,
},
}
st := storage.NewMemoryStorage()
err := st.Instances().Insert(instance)
require.NoError(t, err)
err = st.Operations().InsertProvisioningOperation(fixProvisioningOperation("provisioning01"))
require.NoError(t, err)

handler := &handler{}
q := &automock.Queue{}
q.On("Add", mock.AnythingOfType("string"))
planDefaults := func(planID string, platformProvider pkg.CloudProvider, provider *pkg.CloudProvider) (*gqlschema.ClusterConfigInput, error) {
return &gqlschema.ClusterConfigInput{}, nil
}
kcBuilder := &kcMock.KcBuilder{}
svc := NewUpdate(Config{}, st.Instances(), st.RuntimeStates(), st.Operations(), handler, true, true, false, q, PlansConfig{},
planDefaults, fixLogger(), dashboardConfig, kcBuilder, &OneForAllConvergedCloudRegionsProvider{}, fakeKcpK8sClient)

// when
_, err = svc.Update(context.Background(), instanceID, domain.UpdateDetails{
ServiceID: "",
PlanID: PreviewPlanID,
RawParameters: json.RawMessage(`{"additionalWorkerNodePools": [{"name": "name-1", "machineType": "m6i.large", "haZones": false, "autoScalerMin": 3, "autoScalerMax": 20}]}`),
PreviousValues: domain.PreviousValues{},
RawContext: json.RawMessage("{\"globalaccount_id\":\"globalaccount_id_1\", \"active\":true}"),
MaintenanceInfo: nil,
}, true)

// then
assert.EqualError(t, err, "HA zones cannot be disabled for name-1 additional worker node pool")
})

t.Run("should succeed when enabling HA zones for additional worker node pool", func(t *testing.T) {
// given
instance := fixture.FixInstance(instanceID)
instance.ServicePlanID = PreviewPlanID
instance.Parameters.Parameters.AdditionalWorkerNodePools = []pkg.AdditionalWorkerNodePool{
{
Name: "name-1",
MachineType: "m6i.large",
HAZones: false,
AutoScalerMin: 3,
AutoScalerMax: 20,
},
}
st := storage.NewMemoryStorage()
err := st.Instances().Insert(instance)
require.NoError(t, err)
err = st.Operations().InsertProvisioningOperation(fixProvisioningOperation("provisioning01"))
require.NoError(t, err)

handler := &handler{}
q := &automock.Queue{}
q.On("Add", mock.AnythingOfType("string"))
planDefaults := func(planID string, platformProvider pkg.CloudProvider, provider *pkg.CloudProvider) (*gqlschema.ClusterConfigInput, error) {
return &gqlschema.ClusterConfigInput{}, nil
}
kcBuilder := &kcMock.KcBuilder{}
svc := NewUpdate(Config{}, st.Instances(), st.RuntimeStates(), st.Operations(), handler, true, true, false, q, PlansConfig{},
planDefaults, fixLogger(), dashboardConfig, kcBuilder, &OneForAllConvergedCloudRegionsProvider{}, fakeKcpK8sClient)

// when
_, err = svc.Update(context.Background(), instanceID, domain.UpdateDetails{
ServiceID: "",
PlanID: PreviewPlanID,
RawParameters: json.RawMessage(`{"additionalWorkerNodePools": [{"name": "name-1", "machineType": "m6i.large", "haZones": true, "autoScalerMin": 3, "autoScalerMax": 20}]}`),
PreviousValues: domain.PreviousValues{},
RawContext: json.RawMessage("{\"globalaccount_id\":\"globalaccount_id_1\", \"active\":true}"),
MaintenanceInfo: nil,
}, true)

// then
assert.NoError(t, err)
})
}

func TestUpdateAdditionalWorkerNodePoolsForUnsupportedPlans(t *testing.T) {
for tn, tc := range map[string]struct {
planID string
Expand Down
Loading

0 comments on commit c8a5698

Please sign in to comment.