From 2b86c80e9260d258dcc411ef38f5984d9aa370b1 Mon Sep 17 00:00:00 2001 From: Marek Michali <56163696+MarekMichali@users.noreply.github.com> Date: Thu, 23 Jan 2025 11:52:41 +0100 Subject: [PATCH 1/4] SKR-tester: custom updates (#1669) --- testing/e2e/skr-tester/pkg/command/update.go | 47 +++++++++++++++++++- 1 file changed, 46 insertions(+), 1 deletion(-) diff --git a/testing/e2e/skr-tester/pkg/command/update.go b/testing/e2e/skr-tester/pkg/command/update.go index 5168e0a646..7cfc435463 100644 --- a/testing/e2e/skr-tester/pkg/command/update.go +++ b/testing/e2e/skr-tester/pkg/command/update.go @@ -1,6 +1,7 @@ package command import ( + "encoding/json" "fmt" broker "skr-tester/pkg/broker" kcp "skr-tester/pkg/kcp" @@ -17,6 +18,9 @@ type UpdateCommand struct { updateMachineType bool updateOIDC bool updateAdministrators bool + customMachineType string + customOIDC string + customAdministrators []string } func NewUpdateCommand() *cobra.Command { @@ -28,7 +32,10 @@ func NewUpdateCommand() *cobra.Command { Long: "Update the instance with a new machine type, OIDC configuration, or administrators.", Example: ` skr-tester update -i instanceID -p planID --updateMachineType Update the instance with a new machine type. skr-tester update -i instanceID -p planID --updateOIDC Update the instance with a new OIDC configuration. - skr-tester update -i instanceID -p planID --updateAdministrators Update the instance with new administrators.`, + skr-tester update -i instanceID -p planID --updateAdministrators Update the instance with new administrators. + skr-tester update -i instanceID -p planID --updateMachineType --machineType newMachineType Update the instance with a custom machine type. + skr-tester update -i instanceID -p planID --updateOIDC --customOIDC '{"clientID":"foo-bar","issuerURL":"https://new.custom.ias.com"}' Update the instance with a custom OIDC configuration. + skr-tester update -i instanceID -p planID --updateAdministrators --customAdministrators admin1@acme.com,admin2@acme.com Update the instance with custom administrators.`, PreRunE: func(_ *cobra.Command, _ []string) error { return cmd.Validate() }, RunE: func(_ *cobra.Command, _ []string) error { return cmd.Run() }, } @@ -38,6 +45,9 @@ func NewUpdateCommand() *cobra.Command { cobraCmd.Flags().BoolVarP(&cmd.updateMachineType, "updateMachineType", "m", false, "Update machine type.") cobraCmd.Flags().BoolVarP(&cmd.updateOIDC, "updateOIDC", "o", false, "Update OIDC configuration.") cobraCmd.Flags().BoolVarP(&cmd.updateAdministrators, "updateAdministrators", "a", false, "Update administrators.") + cobraCmd.Flags().StringVar(&cmd.customMachineType, "customMachineType", "", "Machine type to update to (optional).") + cobraCmd.Flags().StringVar(&cmd.customOIDC, "customOIDC", "", "Custom OIDC configuration in JSON format (optional).") + cobraCmd.Flags().StringSliceVar(&cmd.customAdministrators, "customAdministrators", nil, "Custom administrators (optional).") return cobraCmd } @@ -50,6 +60,16 @@ func (cmd *UpdateCommand) Run() error { return fmt.Errorf("failed to create KCP client: %v", err) } if cmd.updateMachineType { + if cmd.customMachineType != "" { + fmt.Printf("User provided machine type: %s\n", cmd.customMachineType) + resp, _, err := brokerClient.UpdateInstance(cmd.instanceID, map[string]interface{}{"machineType": cmd.customMachineType}) + if err != nil { + return fmt.Errorf("error updating instance: %v", err) + } + fmt.Printf("Update operationID: %s\n", resp["operation"].(string)) + return nil + } + catalog, _, err := brokerClient.GetCatalog() if err != nil { return fmt.Errorf("failed to get catalog: %v", err) @@ -104,6 +124,21 @@ func (cmd *UpdateCommand) Run() error { } } } else if cmd.updateOIDC { + if cmd.customOIDC != "" { + var newOIDCConfig map[string]interface{} + err := json.Unmarshal([]byte(cmd.customOIDC), &newOIDCConfig) + if err != nil { + return fmt.Errorf("failed to parse custom OIDC config: %v", err) + } + fmt.Printf("User provided custom OIDC config: %v\n", newOIDCConfig) + resp, _, err := brokerClient.UpdateInstance(cmd.instanceID, map[string]interface{}{"oidc": newOIDCConfig}) + if err != nil { + return fmt.Errorf("error updating instance: %v", err) + } + fmt.Printf("Update operationID: %s\n", resp["operation"].(string)) + return nil + } + currentOIDCConfig, err := kcpClient.GetCurrentOIDCConfig(cmd.instanceID) if err != nil { return fmt.Errorf("failed to get current OIDC config: %v", err) @@ -124,6 +159,16 @@ func (cmd *UpdateCommand) Run() error { } fmt.Printf("Update operationID: %s\n", resp["operation"].(string)) } else if cmd.updateAdministrators { + if len(cmd.customAdministrators) > 0 { + fmt.Printf("User provided custom administrators: %v\n", cmd.customAdministrators) + resp, _, err := brokerClient.UpdateInstance(cmd.instanceID, map[string]interface{}{"administrators": cmd.customAdministrators}) + if err != nil { + return fmt.Errorf("error updating instance: %v", err) + } + fmt.Printf("Update operationID: %s\n", resp["operation"].(string)) + return nil + } + newAdministrators := []string{"admin1@acme.com", "admin2@acme.com"} fmt.Printf("Determined administrators to update: %v\n", newAdministrators) resp, _, err := brokerClient.UpdateInstance(cmd.instanceID, map[string]interface{}{"administrators": newAdministrators}) From 1b36ad3b9fcf07617cf89ae19ff500537cd2d6fc Mon Sep 17 00:00:00 2001 From: KsaweryZietara <91937141+KsaweryZietara@users.noreply.github.com> Date: Thu, 23 Jan 2025 13:08:37 +0100 Subject: [PATCH 2/4] Introduce HA zones (#1668) * Introduce HA zones * Add more unit tests * Make names more descriptive --- common/runtime/model.go | 17 +++- internal/broker/instance_create.go | 14 +++ internal/broker/instance_create_test.go | 75 ++++++++++++--- internal/broker/instance_update.go | 6 ++ internal/broker/instance_update_test.go | 118 +++++++++++++++++++++--- internal/broker/plans_schema.go | 17 +++- 6 files changed, 219 insertions(+), 28 deletions(-) diff --git a/common/runtime/model.go b/common/runtime/model.go index bd0fee02f7..ac42c0f5e9 100644 --- a/common/runtime/model.go +++ b/common/runtime/model.go @@ -391,13 +391,28 @@ type ModuleDTO struct { type AdditionalWorkerNodePool struct { Name string `json:"name"` MachineType string `json:"machineType"` + HAZones bool `json:"haZones"` AutoScalerMin int `json:"autoScalerMin"` AutoScalerMax int `json:"autoScalerMax"` } func (a AdditionalWorkerNodePool) Validate() error { if a.AutoScalerMin > a.AutoScalerMax { - return fmt.Errorf("AutoScalerMax %v should be larger than AutoScalerMin %v for %s worker node pool", a.AutoScalerMax, a.AutoScalerMin, a.Name) + return fmt.Errorf("AutoScalerMax %v should be larger than AutoScalerMin %v for %s additional worker node pool", a.AutoScalerMax, a.AutoScalerMin, a.Name) + } + if a.HAZones && a.AutoScalerMin < 3 { + return fmt.Errorf("AutoScalerMin %v should be at least 3 when HA zones are enabled for %s additional worker node pool", a.AutoScalerMin, a.Name) + } + return nil +} + +func (a AdditionalWorkerNodePool) ValidateDisablingHAZones(currentAdditionalWorkerNodePools []AdditionalWorkerNodePool) error { + for _, currentAdditionalWorkerNodePool := range currentAdditionalWorkerNodePools { + if a.Name == currentAdditionalWorkerNodePool.Name { + if !a.HAZones && currentAdditionalWorkerNodePool.HAZones { + return fmt.Errorf("HA zones cannot be disabled for %s additional worker node pool", a.Name) + } + } } return nil } diff --git a/internal/broker/instance_create.go b/internal/broker/instance_create.go index 1a7672a816..9e6b8c0946 100644 --- a/internal/broker/instance_create.go +++ b/internal/broker/instance_create.go @@ -305,6 +305,9 @@ func (b *ProvisionEndpoint) validateAndExtract(details domain.ProvisionDetails, if !supportsAdditionalWorkerNodePools(details.PlanID) { return ersContext, parameters, fmt.Errorf("additional worker node pools are not supported for plan ID: %s", details.PlanID) } + if !AreNamesUnique(parameters.AdditionalWorkerNodePools) { + return ersContext, parameters, fmt.Errorf("names of additional worker node pools must be unique") + } for _, additionalWorkerNodePool := range parameters.AdditionalWorkerNodePools { if err := additionalWorkerNodePool.Validate(); err != nil { return ersContext, parameters, apiresponses.NewFailureResponse(err, http.StatusUnprocessableEntity, err.Error()) @@ -412,6 +415,17 @@ func supportsAdditionalWorkerNodePools(planID string) bool { return false } +func AreNamesUnique(pools []pkg.AdditionalWorkerNodePool) bool { + nameSet := make(map[string]struct{}) + for _, pool := range pools { + if _, exists := nameSet[pool.Name]; exists { + return false + } + nameSet[pool.Name] = struct{}{} + } + return true +} + // Rudimentary kubeconfig validation func validateKubeconfig(kubeconfig string) error { config, err := clientcmd.Load([]byte(kubeconfig)) diff --git a/internal/broker/instance_create_test.go b/internal/broker/instance_create_test.go index 30cf98ffb7..726e29f177 100644 --- a/internal/broker/instance_create_test.go +++ b/internal/broker/instance_create_test.go @@ -1499,7 +1499,7 @@ func TestAdditionalWorkerNodePools(t *testing.T) { expectedError bool }{ "Valid additional worker node pools": { - additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "autoScalerMin": 3, "autoScalerMax": 20}, {"name": "name-2", "machineType": "m6i.large", "autoScalerMin": 3, "autoScalerMax": 20}]`, + additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "haZones": true, "autoScalerMin": 3, "autoScalerMax": 20}, {"name": "name-2", "machineType": "m6i.large", "haZones": false, "autoScalerMin": 1, "autoScalerMax": 20}]`, expectedError: false, }, "Empty additional worker node pools": { @@ -1507,39 +1507,47 @@ func TestAdditionalWorkerNodePools(t *testing.T) { expectedError: false, }, "Empty name": { - additionalWorkerNodePools: `[{"name": "", "machineType": "m6i.large", "autoScalerMin": 3, "autoScalerMax": 20}]`, + additionalWorkerNodePools: `[{"name": "", "machineType": "m6i.large", "haZones": true, "autoScalerMin": 3, "autoScalerMax": 20}]`, expectedError: true, }, "Missing name": { - additionalWorkerNodePools: `[{"machineType": "m6i.large", "autoScalerMin": 3, "autoScalerMax": 20}]`, + additionalWorkerNodePools: `[{"machineType": "m6i.large", "haZones": true, "autoScalerMin": 3, "autoScalerMax": 20}]`, + expectedError: true, + }, + "Not unique names": { + additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "haZones": true, "autoScalerMin": 3, "autoScalerMax": 20}, {"name": "name-1", "machineType": "m6i.large", "haZones": true, "autoScalerMin": 3, "autoScalerMax": 20}]`, expectedError: true, }, "Empty machine type": { - additionalWorkerNodePools: `[{"name": "name-1", "machineType": "", "autoScalerMin": 3, "autoScalerMax": 20}]`, + additionalWorkerNodePools: `[{"name": "name-1", "machineType": "", "haZones": true, "autoScalerMin": 3, "autoScalerMax": 20}]`, expectedError: true, }, "Missing machine type": { - additionalWorkerNodePools: `[{"name": "name-1", "autoScalerMin": 3, "autoScalerMax": 20}]`, + additionalWorkerNodePools: `[{"name": "name-1", "haZones": true, "autoScalerMin": 3, "autoScalerMax": 20}]`, + expectedError: true, + }, + "Missing HA zones": { + additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "autoScalerMin": 3, "autoScalerMax": 20}]`, expectedError: true, }, "Missing autoScalerMin": { - additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "autoScalerMax": 3}]`, + additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "haZones": true, "autoScalerMax": 3}]`, expectedError: true, }, "Missing autoScalerMax": { - additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "autoScalerMin": 20}]`, + additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "haZones": true, "autoScalerMin": 20}]`, expectedError: true, }, - "AutoScalerMin smaller than 3": { - additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "autoScalerMin": 2, "autoScalerMax": 300}]`, + "AutoScalerMin smaller than 3 when HA zones are enabled": { + additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "haZones": true, "autoScalerMin": 2, "autoScalerMax": 300}]`, expectedError: true, }, "AutoScalerMax bigger than 300": { - additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "autoScalerMin": 3, "autoScalerMax": 301}]`, + additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "haZones": true, "autoScalerMin": 3, "autoScalerMax": 301}]`, expectedError: true, }, "AutoScalerMin bigger than autoScalerMax": { - additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "autoScalerMin": 20, "autoScalerMax": 3}]`, + additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "haZones": true, "autoScalerMin": 20, "autoScalerMax": 3}]`, expectedError: true, }, } { @@ -1685,6 +1693,51 @@ func TestAdditionalWorkerNodePoolsForUnsupportedPlans(t *testing.T) { } } +func TestAreNamesUnique(t *testing.T) { + tests := []struct { + name string + pools []pkg.AdditionalWorkerNodePool + expected bool + }{ + { + name: "Unique names", + pools: []pkg.AdditionalWorkerNodePool{ + {Name: "name-1", MachineType: "m6i.large", HAZones: true, AutoScalerMin: 5, AutoScalerMax: 5}, + {Name: "name-2", MachineType: "m6i.large", HAZones: false, AutoScalerMin: 2, AutoScalerMax: 10}, + {Name: "name-3", MachineType: "m6i.large", HAZones: true, AutoScalerMin: 3, AutoScalerMax: 15}, + }, + expected: true, + }, + { + name: "Duplicate names", + pools: []pkg.AdditionalWorkerNodePool{ + {Name: "name-1", MachineType: "m6i.large", HAZones: true, AutoScalerMin: 5, AutoScalerMax: 5}, + {Name: "name-2", MachineType: "m6i.large", HAZones: false, AutoScalerMin: 2, AutoScalerMax: 10}, + {Name: "name-1", MachineType: "m6i.large", HAZones: true, AutoScalerMin: 3, AutoScalerMax: 5}, + }, + expected: false, + }, + { + name: "Empty list", + pools: []pkg.AdditionalWorkerNodePool{}, + expected: true, + }, + { + name: "Single pool", + pools: []pkg.AdditionalWorkerNodePool{ + {Name: "name-1", MachineType: "m6i.large", HAZones: false, AutoScalerMin: 1, AutoScalerMax: 5}, + }, + expected: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, tt.expected, broker.AreNamesUnique(tt.pools)) + }) + } +} + func TestNetworkingValidation(t *testing.T) { for tn, tc := range map[string]struct { givenNetworking string diff --git a/internal/broker/instance_update.go b/internal/broker/instance_update.go index 8ae3477ba2..875f08bfe4 100644 --- a/internal/broker/instance_update.go +++ b/internal/broker/instance_update.go @@ -268,10 +268,16 @@ func (b *UpdateEndpoint) processUpdateParameters(instance *internal.Instance, de if !supportsAdditionalWorkerNodePools(details.PlanID) { return domain.UpdateServiceSpec{}, fmt.Errorf("additional worker node pools are not supported for plan ID: %s", details.PlanID) } + if !AreNamesUnique(params.AdditionalWorkerNodePools) { + return domain.UpdateServiceSpec{}, fmt.Errorf("names of additional worker node pools must be unique") + } for _, additionalWorkerNodePool := range params.AdditionalWorkerNodePools { if err := additionalWorkerNodePool.Validate(); err != nil { return domain.UpdateServiceSpec{}, apiresponses.NewFailureResponse(err, http.StatusBadRequest, err.Error()) } + if err := additionalWorkerNodePool.ValidateDisablingHAZones(instance.Parameters.Parameters.AdditionalWorkerNodePools); err != nil { + return domain.UpdateServiceSpec{}, apiresponses.NewFailureResponse(err, http.StatusBadRequest, err.Error()) + } } } diff --git a/internal/broker/instance_update_test.go b/internal/broker/instance_update_test.go index 0e6ea81487..527094e368 100644 --- a/internal/broker/instance_update_test.go +++ b/internal/broker/instance_update_test.go @@ -599,7 +599,7 @@ func TestUpdateAdditionalWorkerNodePools(t *testing.T) { expectedError bool }{ "Valid additional worker node pools": { - additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "autoScalerMin": 3, "autoScalerMax": 20}, {"name": "name-2", "machineType": "m6i.large", "autoScalerMin": 3, "autoScalerMax": 20}]`, + additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "haZones": true, "autoScalerMin": 3, "autoScalerMax": 20}, {"name": "name-2", "machineType": "m6i.large", "haZones": false, "autoScalerMin": 1, "autoScalerMax": 20}]`, expectedError: false, }, "Empty additional worker node pools": { @@ -607,39 +607,47 @@ func TestUpdateAdditionalWorkerNodePools(t *testing.T) { expectedError: false, }, "Empty name": { - additionalWorkerNodePools: `[{"name": "", "machineType": "m6i.large", "autoScalerMin": 3, "autoScalerMax": 20}]`, + additionalWorkerNodePools: `[{"name": "", "machineType": "m6i.large", "haZones": true, "autoScalerMin": 3, "autoScalerMax": 20}]`, expectedError: true, }, "Missing name": { - additionalWorkerNodePools: `[{"machineType": "m6i.large", "autoScalerMin": 3, "autoScalerMax": 20}]`, + additionalWorkerNodePools: `[{"machineType": "m6i.large", "haZones": true, "autoScalerMin": 3, "autoScalerMax": 20}]`, + expectedError: true, + }, + "Not unique names": { + additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "haZones": true, "autoScalerMin": 3, "autoScalerMax": 20}, {"name": "name-1", "machineType": "m6i.large", "haZones": true, "autoScalerMin": 3, "autoScalerMax": 20}]`, expectedError: true, }, "Empty machine type": { - additionalWorkerNodePools: `[{"name": "name-1", "machineType": "", "autoScalerMin": 3, "autoScalerMax": 20}]`, + additionalWorkerNodePools: `[{"name": "name-1", "machineType": "", "haZones": true, "autoScalerMin": 3, "autoScalerMax": 20}]`, expectedError: true, }, "Missing machine type": { - additionalWorkerNodePools: `[{"name": "name-1", "autoScalerMin": 3, "autoScalerMax": 20}]`, + additionalWorkerNodePools: `[{"name": "name-1", "haZones": true, "autoScalerMin": 3, "autoScalerMax": 20}]`, + expectedError: true, + }, + "Missing HA zones": { + additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "autoScalerMin": 3, "autoScalerMax": 20}]`, expectedError: true, }, "Missing autoScalerMin": { - additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "autoScalerMax": 3}]`, + additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "haZones": true, "autoScalerMax": 3}]`, expectedError: true, }, "Missing autoScalerMax": { - additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "autoScalerMin": 20}]`, + additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "haZones": true, "autoScalerMin": 20}]`, expectedError: true, }, - "AutoScalerMin smaller than 3": { - additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "autoScalerMin": 2, "autoScalerMax": 300}]`, + "AutoScalerMin smaller than 3 when HA zones are enabled": { + additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "haZones": true, "autoScalerMin": 2, "autoScalerMax": 300}]`, expectedError: true, }, "AutoScalerMax bigger than 300": { - additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "autoScalerMin": 3, "autoScalerMax": 301}]`, + additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "haZones": true, "autoScalerMin": 3, "autoScalerMax": 301}]`, expectedError: true, }, "AutoScalerMin bigger than autoScalerMax": { - additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "autoScalerMin": 20, "autoScalerMax": 3}]`, + additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "haZones": true, "autoScalerMin": 20, "autoScalerMax": 3}]`, expectedError: true, }, } { @@ -679,6 +687,94 @@ func TestUpdateAdditionalWorkerNodePools(t *testing.T) { } } +func TestHAZones(t *testing.T) { + t.Run("should fail when attempting to disable HA zones for additional worker node pool", func(t *testing.T) { + // given + instance := fixture.FixInstance(instanceID) + instance.ServicePlanID = PreviewPlanID + instance.Parameters.Parameters.AdditionalWorkerNodePools = []pkg.AdditionalWorkerNodePool{ + { + Name: "name-1", + MachineType: "m6i.large", + HAZones: true, + AutoScalerMin: 3, + AutoScalerMax: 20, + }, + } + st := storage.NewMemoryStorage() + err := st.Instances().Insert(instance) + require.NoError(t, err) + err = st.Operations().InsertProvisioningOperation(fixProvisioningOperation("provisioning01")) + require.NoError(t, err) + + handler := &handler{} + q := &automock.Queue{} + q.On("Add", mock.AnythingOfType("string")) + planDefaults := func(planID string, platformProvider pkg.CloudProvider, provider *pkg.CloudProvider) (*gqlschema.ClusterConfigInput, error) { + return &gqlschema.ClusterConfigInput{}, nil + } + kcBuilder := &kcMock.KcBuilder{} + svc := NewUpdate(Config{}, st.Instances(), st.RuntimeStates(), st.Operations(), handler, true, true, false, q, PlansConfig{}, + planDefaults, fixLogger(), dashboardConfig, kcBuilder, &OneForAllConvergedCloudRegionsProvider{}, fakeKcpK8sClient) + + // when + _, err = svc.Update(context.Background(), instanceID, domain.UpdateDetails{ + ServiceID: "", + PlanID: PreviewPlanID, + RawParameters: json.RawMessage(`{"additionalWorkerNodePools": [{"name": "name-1", "machineType": "m6i.large", "haZones": false, "autoScalerMin": 3, "autoScalerMax": 20}]}`), + PreviousValues: domain.PreviousValues{}, + RawContext: json.RawMessage("{\"globalaccount_id\":\"globalaccount_id_1\", \"active\":true}"), + MaintenanceInfo: nil, + }, true) + + // then + assert.EqualError(t, err, "HA zones cannot be disabled for name-1 additional worker node pool") + }) + + t.Run("should succeed when enabling HA zones for additional worker node pool", func(t *testing.T) { + // given + instance := fixture.FixInstance(instanceID) + instance.ServicePlanID = PreviewPlanID + instance.Parameters.Parameters.AdditionalWorkerNodePools = []pkg.AdditionalWorkerNodePool{ + { + Name: "name-1", + MachineType: "m6i.large", + HAZones: false, + AutoScalerMin: 3, + AutoScalerMax: 20, + }, + } + st := storage.NewMemoryStorage() + err := st.Instances().Insert(instance) + require.NoError(t, err) + err = st.Operations().InsertProvisioningOperation(fixProvisioningOperation("provisioning01")) + require.NoError(t, err) + + handler := &handler{} + q := &automock.Queue{} + q.On("Add", mock.AnythingOfType("string")) + planDefaults := func(planID string, platformProvider pkg.CloudProvider, provider *pkg.CloudProvider) (*gqlschema.ClusterConfigInput, error) { + return &gqlschema.ClusterConfigInput{}, nil + } + kcBuilder := &kcMock.KcBuilder{} + svc := NewUpdate(Config{}, st.Instances(), st.RuntimeStates(), st.Operations(), handler, true, true, false, q, PlansConfig{}, + planDefaults, fixLogger(), dashboardConfig, kcBuilder, &OneForAllConvergedCloudRegionsProvider{}, fakeKcpK8sClient) + + // when + _, err = svc.Update(context.Background(), instanceID, domain.UpdateDetails{ + ServiceID: "", + PlanID: PreviewPlanID, + RawParameters: json.RawMessage(`{"additionalWorkerNodePools": [{"name": "name-1", "machineType": "m6i.large", "haZones": true, "autoScalerMin": 3, "autoScalerMax": 20}]}`), + PreviousValues: domain.PreviousValues{}, + RawContext: json.RawMessage("{\"globalaccount_id\":\"globalaccount_id_1\", \"active\":true}"), + MaintenanceInfo: nil, + }, true) + + // then + assert.NoError(t, err) + }) +} + func TestUpdateAdditionalWorkerNodePoolsForUnsupportedPlans(t *testing.T) { for tn, tc := range map[string]struct { planID string diff --git a/internal/broker/plans_schema.go b/internal/broker/plans_schema.go index 312c795d4c..3bcff5a781 100644 --- a/internal/broker/plans_schema.go +++ b/internal/broker/plans_schema.go @@ -164,9 +164,10 @@ type AdditionalWorkerNodePoolsItems struct { type AdditionalWorkerNodePoolsItemsProperties struct { Name Type `json:"name,omitempty"` + MachineType Type `json:"machineType,omitempty"` + HAZones Type `json:"haZones,omitempty"` AutoScalerMin Type `json:"autoScalerMin,omitempty"` AutoScalerMax Type `json:"autoScalerMax,omitempty"` - MachineType Type `json:"machineType,omitempty"` } func NewModulesSchema() *Modules { @@ -442,8 +443,8 @@ func NewAdditionalWorkerNodePoolsSchema(machineTypesDisplay map[string]string, m UniqueItems: true, Description: "Specifies the list of additional worker node pools."}, Items: AdditionalWorkerNodePoolsItems{ - ControlsOrder: []string{"name", "machineType", "autoScalerMin", "autoScalerMax"}, - Required: []string{"name", "machineType", "autoScalerMin", "autoScalerMax"}, + ControlsOrder: []string{"name", "machineType", "haZones", "autoScalerMin", "autoScalerMax"}, + Required: []string{"name", "machineType", "haZones", "autoScalerMin", "autoScalerMax"}, Type: Type{ Type: "object", }, @@ -462,15 +463,21 @@ func NewAdditionalWorkerNodePoolsSchema(machineTypesDisplay map[string]string, m EnumDisplayName: machineTypesDisplay, Description: "Specifies the type of the virtual machine.", }, + HAZones: Type{ + Type: "boolean", + Title: "HA zones", + Default: true, + Description: "Specifies whether high availability (HA) zones are supported. If HA is disabled, all resources are placed in a single, randomly selected zone. Disabling HA allows setting both autoScalerMin and autoScalerMax to 1, which helps reduce costs. It is not recommended for production environments. Once HA is enabled, it cannot be disabled. When enabled, resources are distributed across three zones to enhance fault tolerance. Enabling HA requires setting autoScalerMin to the minimal value 3.", + }, AutoScalerMin: Type{ Type: "integer", - Minimum: 3, + Minimum: 1, Default: 3, Description: "Specifies the minimum number of virtual machines to create.", }, AutoScalerMax: Type{ Type: "integer", - Minimum: 3, + Minimum: 1, Maximum: 300, Default: 20, Description: "Specifies the maximum number of virtual machines to create.", From 53dc152c439451642f75c710f805150c66d04ce3 Mon Sep 17 00:00:00 2001 From: Kyma gopher bot <123084774+kyma-gopher-bot@users.noreply.github.com> Date: Thu, 23 Jan 2025 13:17:57 +0100 Subject: [PATCH 3/4] Bump sec-scanners-config.yaml, KEB images and Chart to 1.11.15 (#1670) --- resources/keb/Chart.yaml | 4 ++-- resources/keb/values.yaml | 20 ++++++++-------- sec-scanners-config.yaml | 24 +++++++++---------- .../kyma-environment-broker-archiver.yaml | 2 +- .../kyma-environments-cleanup-job.yaml | 2 +- 5 files changed, 26 insertions(+), 26 deletions(-) diff --git a/resources/keb/Chart.yaml b/resources/keb/Chart.yaml index d5df89ee0a..a0f5600239 100644 --- a/resources/keb/Chart.yaml +++ b/resources/keb/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 -appVersion: "1.11.14" +appVersion: "1.11.15" name: keb description: description: Kyma Environment Broker Helm chart for Kubernetes -version: 1.11.14 +version: 1.11.15 type: application diff --git a/resources/keb/values.yaml b/resources/keb/values.yaml index a4327ac7e8..8bbde6ab86 100644 --- a/resources/keb/values.yaml +++ b/resources/keb/values.yaml @@ -9,34 +9,34 @@ global: path: europe-docker.pkg.dev/kyma-project/prod kyma_environment_broker: dir: - version: "1.11.14" + version: "1.11.15" kyma_environment_broker_schema_migrator: dir: - version: 1.11.14 + version: 1.11.15 kyma_environments_subaccount_cleanup_job: dir: - version: "1.11.14" + version: "1.11.15" kyma_environment_trial_cleanup_job: dir: - version: "1.11.14" + version: "1.11.15" kyma_environment_expirator_job: dir: - version: "1.11.14" + version: "1.11.15" kyma_environment_deprovision_retrigger_job: dir: - version: "1.11.14" + version: "1.11.15" kyma_environment_runtime_reconciler: dir: - version: "1.11.14" + version: "1.11.15" kyma_environment_subaccount_sync: dir: - version: "1.11.14" + version: "1.11.15" kyma_environment_globalaccounts: dir: - version: "1.11.14" + version: "1.11.15" kyma_environment_service_binding_cleanup_job: dir: - version: 1.11.14 + version: 1.11.15 kyma_environment_broker: serviceAccountName: "kcp-kyma-environment-broker" diff --git a/sec-scanners-config.yaml b/sec-scanners-config.yaml index 30e5ed9edf..51423d3d29 100644 --- a/sec-scanners-config.yaml +++ b/sec-scanners-config.yaml @@ -1,17 +1,17 @@ module-name: kyma-environment-broker -rc-tag: 1.11.14 +rc-tag: 1.11.15 protecode: - - europe-docker.pkg.dev/kyma-project/prod/kyma-environment-broker:1.11.14 - - europe-docker.pkg.dev/kyma-project/prod/kyma-environment-deprovision-retrigger-job:1.11.14 - - europe-docker.pkg.dev/kyma-project/prod/kyma-environments-cleanup-job:1.11.14 - - europe-docker.pkg.dev/kyma-project/prod/kyma-environment-runtime-reconciler:1.11.14 - - europe-docker.pkg.dev/kyma-project/prod/kyma-environment-archiver-job:1.11.14 - - europe-docker.pkg.dev/kyma-project/prod/kyma-environment-expirator-job:1.11.14 - - europe-docker.pkg.dev/kyma-project/prod/kyma-environment-subaccount-cleanup-job:1.11.14 - - europe-docker.pkg.dev/kyma-project/prod/kyma-environment-subaccount-sync:1.11.14 - - europe-docker.pkg.dev/kyma-project/prod/kyma-environment-broker-globalaccounts:1.11.14 - - europe-docker.pkg.dev/kyma-project/prod/kyma-environment-broker-schema-migrator:1.11.14 - - europe-docker.pkg.dev/kyma-project/prod/kyma-environment-service-binding-cleanup-job:1.11.14 + - europe-docker.pkg.dev/kyma-project/prod/kyma-environment-broker:1.11.15 + - europe-docker.pkg.dev/kyma-project/prod/kyma-environment-deprovision-retrigger-job:1.11.15 + - europe-docker.pkg.dev/kyma-project/prod/kyma-environments-cleanup-job:1.11.15 + - europe-docker.pkg.dev/kyma-project/prod/kyma-environment-runtime-reconciler:1.11.15 + - europe-docker.pkg.dev/kyma-project/prod/kyma-environment-archiver-job:1.11.15 + - europe-docker.pkg.dev/kyma-project/prod/kyma-environment-expirator-job:1.11.15 + - europe-docker.pkg.dev/kyma-project/prod/kyma-environment-subaccount-cleanup-job:1.11.15 + - europe-docker.pkg.dev/kyma-project/prod/kyma-environment-subaccount-sync:1.11.15 + - europe-docker.pkg.dev/kyma-project/prod/kyma-environment-broker-globalaccounts:1.11.15 + - europe-docker.pkg.dev/kyma-project/prod/kyma-environment-broker-schema-migrator:1.11.15 + - europe-docker.pkg.dev/kyma-project/prod/kyma-environment-service-binding-cleanup-job:1.11.15 whitesource: language: golang-mod subprojects: false diff --git a/utils/archiver/kyma-environment-broker-archiver.yaml b/utils/archiver/kyma-environment-broker-archiver.yaml index 7da6aa9301..b5d4eaadaa 100644 --- a/utils/archiver/kyma-environment-broker-archiver.yaml +++ b/utils/archiver/kyma-environment-broker-archiver.yaml @@ -74,4 +74,4 @@ spec: template: spec: containers: - - image: europe-docker.pkg.dev/kyma-project/prod/kyma-environment-archiver-job:1.11.14 + - image: europe-docker.pkg.dev/kyma-project/prod/kyma-environment-archiver-job:1.11.15 diff --git a/utils/kyma-environments-cleanup-job/kyma-environments-cleanup-job.yaml b/utils/kyma-environments-cleanup-job/kyma-environments-cleanup-job.yaml index 9a1257857c..e292ad1048 100644 --- a/utils/kyma-environments-cleanup-job/kyma-environments-cleanup-job.yaml +++ b/utils/kyma-environments-cleanup-job/kyma-environments-cleanup-job.yaml @@ -28,7 +28,7 @@ spec: containers: - name: kyma-environments-cleanup command: ["/bin/main"] - image: europe-docker.pkg.dev/kyma-project/prod/kyma-environments-cleanup-job:1.11.14 + image: europe-docker.pkg.dev/kyma-project/prod/kyma-environments-cleanup-job:1.11.15 imagePullPolicy: IfNotPresent env: - name: DATABASE_EMBEDDED From 87cc2267838fca7ee704d571b1d91c5c9fa25e97 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 23 Jan 2025 14:33:44 +0000 Subject: [PATCH 4/4] gomod(deps): bump github.com/docker/docker from 27.5.0+incompatible to 27.5.1+incompatible (#1667) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit gomod(deps): bump github.com/docker/docker Bumps [github.com/docker/docker](https://github.com/docker/docker) from 27.5.0+incompatible to 27.5.1+incompatible. - [Release notes](https://github.com/docker/docker/releases) - [Commits](https://github.com/docker/docker/compare/v27.5.0...v27.5.1) --- updated-dependencies: - dependency-name: github.com/docker/docker dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Jarosław Pieszka --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 4290d75fec..d6c59b36bc 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/Azure/go-autorest/autorest/adal v0.9.24 github.com/Masterminds/sprig v2.22.0+incompatible github.com/dlmiddlecote/sqlstats v1.0.2 - github.com/docker/docker v27.5.0+incompatible + github.com/docker/docker v27.5.1+incompatible github.com/docker/go-connections v0.5.0 github.com/gardener/gardener v1.110.4 github.com/go-co-op/gocron v1.37.0 diff --git a/go.sum b/go.sum index ec11493586..f4f399612b 100644 --- a/go.sum +++ b/go.sum @@ -87,8 +87,8 @@ github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5Qvfr github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dlmiddlecote/sqlstats v1.0.2 h1:gSU11YN23D/iY50A2zVYwgXgy072khatTsIW6UPjUtI= github.com/dlmiddlecote/sqlstats v1.0.2/go.mod h1:0CWaIh/Th+z2aI6Q9Jpfg/o21zmGxWhbByHgQSCUQvY= -github.com/docker/docker v27.5.0+incompatible h1:um++2NcQtGRTz5eEgO6aJimo6/JxrTXC941hd05JO6U= -github.com/docker/docker v27.5.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v27.5.1+incompatible h1:4PYU5dnBYqRQi0294d1FBECqT9ECWeQAIfE8q4YnPY8= +github.com/docker/docker v27.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=