Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Additional worker node pools feature flag #1695

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 4 additions & 3 deletions cmd/broker/provisioning_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1882,8 +1882,9 @@ func TestProvisioningWithAdditionalWorkerNodePools(t *testing.T) {
//given
cfg := fixConfig()
cfg.Broker.KimConfig.Enabled = true
cfg.Broker.KimConfig.Plans = []string{"preview"}
cfg.Broker.KimConfig.KimOnlyPlans = []string{"preview"}
cfg.Broker.KimConfig.Plans = []string{"aws"}
cfg.Broker.KimConfig.KimOnlyPlans = []string{"aws"}
cfg.Broker.EnableAdditionalWorkerNodePools = true

suite := NewBrokerSuiteTestWithConfig(t, cfg)
defer suite.TearDown()
Expand All @@ -1893,7 +1894,7 @@ func TestProvisioningWithAdditionalWorkerNodePools(t *testing.T) {
resp := suite.CallAPI("PUT", fmt.Sprintf("oauth/cf-eu21/v2/service_instances/%s?accepts_incomplete=true", iid),
`{
"service_id": "47c9dcbf-ff30-448e-ab36-d3bad66ba281",
"plan_id": "5cb3d976-b85c-42ea-a636-79cadda109a9",
"plan_id": "361c511f-f939-4621-b228-d0fb79a1fe15",
"context": {
"globalaccount_id": "g-account-id",
"subaccount_id": "sub-id",
Expand Down
42 changes: 30 additions & 12 deletions cmd/broker/update_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2530,14 +2530,20 @@ func TestUpdateErsContextAndParamsForExpiredInstance(t *testing.T) {
func TestUpdateAdditionalWorkerNodePools(t *testing.T) {
t.Run("should add additional worker node pools", func(t *testing.T) {
// given
suite := NewBrokerSuiteTest(t)
cfg := fixConfig()
cfg.Broker.KimConfig.Enabled = true
cfg.Broker.KimConfig.Plans = []string{"aws"}
cfg.Broker.KimConfig.KimOnlyPlans = []string{"aws"}
cfg.Broker.EnableAdditionalWorkerNodePools = true

suite := NewBrokerSuiteTestWithConfig(t, cfg)
defer suite.TearDown()
iid := uuid.New().String()

resp := suite.CallAPI("PUT", fmt.Sprintf("oauth/cf-eu10/v2/service_instances/%s?accepts_incomplete=true&plan_id=7d55d31d-35ae-4438-bf13-6ffdfa107d9f&service_id=47c9dcbf-ff30-448e-ab36-d3bad66ba281", iid),
resp := suite.CallAPI("PUT", fmt.Sprintf("oauth/cf-eu10/v2/service_instances/%s?accepts_incomplete=true&plan_id=361c511f-f939-4621-b228-d0fb79a1fe15&service_id=47c9dcbf-ff30-448e-ab36-d3bad66ba281", iid),
`{
"service_id": "47c9dcbf-ff30-448e-ab36-d3bad66ba281",
"plan_id": "5cb3d976-b85c-42ea-a636-79cadda109a9",
"plan_id": "361c511f-f939-4621-b228-d0fb79a1fe15",
"context": {
"globalaccount_id": "g-account-id",
"subaccount_id": "sub-id",
Expand All @@ -2557,7 +2563,7 @@ func TestUpdateAdditionalWorkerNodePools(t *testing.T) {
resp = suite.CallAPI("PATCH", fmt.Sprintf("oauth/cf-eu10/v2/service_instances/%s?accepts_incomplete=true", iid),
`{
"service_id": "47c9dcbf-ff30-448e-ab36-d3bad66ba281",
"plan_id": "5cb3d976-b85c-42ea-a636-79cadda109a9",
"plan_id": "361c511f-f939-4621-b228-d0fb79a1fe15",
"context": {
"globalaccount_id": "g-account-id",
"user_id": "[email protected]"
Expand Down Expand Up @@ -2594,14 +2600,20 @@ func TestUpdateAdditionalWorkerNodePools(t *testing.T) {

t.Run("should replace additional worker node pools", func(t *testing.T) {
// given
suite := NewBrokerSuiteTest(t)
cfg := fixConfig()
cfg.Broker.KimConfig.Enabled = true
cfg.Broker.KimConfig.Plans = []string{"aws"}
cfg.Broker.KimConfig.KimOnlyPlans = []string{"aws"}
cfg.Broker.EnableAdditionalWorkerNodePools = true

suite := NewBrokerSuiteTestWithConfig(t, cfg)
defer suite.TearDown()
iid := uuid.New().String()

resp := suite.CallAPI("PUT", fmt.Sprintf("oauth/cf-eu10/v2/service_instances/%s?accepts_incomplete=true&plan_id=7d55d31d-35ae-4438-bf13-6ffdfa107d9f&service_id=47c9dcbf-ff30-448e-ab36-d3bad66ba281", iid),
resp := suite.CallAPI("PUT", fmt.Sprintf("oauth/cf-eu10/v2/service_instances/%s?accepts_incomplete=true&plan_id=361c511f-f939-4621-b228-d0fb79a1fe15&service_id=47c9dcbf-ff30-448e-ab36-d3bad66ba281", iid),
`{
"service_id": "47c9dcbf-ff30-448e-ab36-d3bad66ba281",
"plan_id": "5cb3d976-b85c-42ea-a636-79cadda109a9",
"plan_id": "361c511f-f939-4621-b228-d0fb79a1fe15",
"context": {
"globalaccount_id": "g-account-id",
"subaccount_id": "sub-id",
Expand Down Expand Up @@ -2630,7 +2642,7 @@ func TestUpdateAdditionalWorkerNodePools(t *testing.T) {
resp = suite.CallAPI("PATCH", fmt.Sprintf("oauth/cf-eu10/v2/service_instances/%s?accepts_incomplete=true", iid),
`{
"service_id": "47c9dcbf-ff30-448e-ab36-d3bad66ba281",
"plan_id": "5cb3d976-b85c-42ea-a636-79cadda109a9",
"plan_id": "361c511f-f939-4621-b228-d0fb79a1fe15",
"context": {
"globalaccount_id": "g-account-id",
"user_id": "[email protected]"
Expand Down Expand Up @@ -2659,14 +2671,20 @@ func TestUpdateAdditionalWorkerNodePools(t *testing.T) {

t.Run("should remove additional worker node pools when list is empty", func(t *testing.T) {
// given
suite := NewBrokerSuiteTest(t)
cfg := fixConfig()
cfg.Broker.KimConfig.Enabled = true
cfg.Broker.KimConfig.Plans = []string{"aws"}
cfg.Broker.KimConfig.KimOnlyPlans = []string{"aws"}
cfg.Broker.EnableAdditionalWorkerNodePools = true

suite := NewBrokerSuiteTestWithConfig(t, cfg)
defer suite.TearDown()
iid := uuid.New().String()

resp := suite.CallAPI("PUT", fmt.Sprintf("oauth/cf-eu10/v2/service_instances/%s?accepts_incomplete=true&plan_id=7d55d31d-35ae-4438-bf13-6ffdfa107d9f&service_id=47c9dcbf-ff30-448e-ab36-d3bad66ba281", iid),
resp := suite.CallAPI("PUT", fmt.Sprintf("oauth/cf-eu10/v2/service_instances/%s?accepts_incomplete=true&plan_id=361c511f-f939-4621-b228-d0fb79a1fe15&service_id=47c9dcbf-ff30-448e-ab36-d3bad66ba281", iid),
`{
"service_id": "47c9dcbf-ff30-448e-ab36-d3bad66ba281",
"plan_id": "5cb3d976-b85c-42ea-a636-79cadda109a9",
"plan_id": "361c511f-f939-4621-b228-d0fb79a1fe15",
"context": {
"globalaccount_id": "g-account-id",
"subaccount_id": "sub-id",
Expand Down Expand Up @@ -2702,7 +2720,7 @@ func TestUpdateAdditionalWorkerNodePools(t *testing.T) {
resp = suite.CallAPI("PATCH", fmt.Sprintf("oauth/cf-eu10/v2/service_instances/%s?accepts_incomplete=true", iid),
`{
"service_id": "47c9dcbf-ff30-448e-ab36-d3bad66ba281",
"plan_id": "5cb3d976-b85c-42ea-a636-79cadda109a9",
"plan_id": "361c511f-f939-4621-b228-d0fb79a1fe15",
"context": {
"globalaccount_id": "g-account-id",
"user_id": "[email protected]"
Expand Down
2 changes: 2 additions & 0 deletions internal/broker/broker.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,8 @@ type Config struct {
TrialDocsURL string `envconfig:"default="`
EnableShootAndSeedSameRegion bool `envconfig:"default=false"`
AllowUpdateExpiredInstanceWithContext bool `envconfig:"default=false"`
EnableAdditionalWorkerNodePools bool `envconfig:"default=false"`
EnableLoadCurrentConfig bool `envconfig:"default=false"`

Binding BindingConfig
KimConfig KimConfig
Expand Down
19 changes: 12 additions & 7 deletions internal/broker/instance_create.go
Original file line number Diff line number Diff line change
Expand Up @@ -302,6 +302,10 @@ func (b *ProvisionEndpoint) validateAndExtract(details domain.ProvisionDetails,
}

if parameters.AdditionalWorkerNodePools != nil {
if !b.config.EnableAdditionalWorkerNodePools {
message := fmt.Sprintf("additional worker node pools are not supported")
return ersContext, parameters, apiresponses.NewFailureResponse(fmt.Errorf(message), http.StatusUnprocessableEntity, message)
}
if !supportsAdditionalWorkerNodePools(details.PlanID) {
message := fmt.Sprintf("additional worker node pools are not supported for plan ID: %s", details.PlanID)
return ersContext, parameters, apiresponses.NewFailureResponse(fmt.Errorf(message), http.StatusUnprocessableEntity, message)
Expand Down Expand Up @@ -406,15 +410,16 @@ func isEuRestrictedAccess(ctx context.Context) bool {
}

func supportsAdditionalWorkerNodePools(planID string) bool {
var supportedPlans = []string{
PreviewPlanID,
var unsupportedPlans = []string{
FreemiumPlanID,
TrialPlanID,
}
for _, supportedPlan := range supportedPlans {
if planID == supportedPlan {
return true
for _, unsupportedPlan := range unsupportedPlans {
if planID == unsupportedPlan {
return false
}
}
return false
return true
}

func AreNamesUnique(pools []pkg.AdditionalWorkerNodePool) bool {
Expand Down Expand Up @@ -507,7 +512,7 @@ func (b *ProvisionEndpoint) determineLicenceType(planId string) *string {

func (b *ProvisionEndpoint) validator(details *domain.ProvisionDetails, provider pkg.CloudProvider, ctx context.Context) (JSONSchemaValidator, error) {
platformRegion, _ := middleware.RegionFromContext(ctx)
plans := Plans(b.plansConfig, provider, b.config.IncludeAdditionalParamsInSchema, euaccess.IsEURestrictedAccess(platformRegion), b.config.UseSmallerMachineTypes, b.config.EnableShootAndSeedSameRegion, b.convergedCloudRegionsProvider.GetRegions(platformRegion), assuredworkloads.IsKSA(platformRegion))
plans := Plans(b.plansConfig, provider, b.config.IncludeAdditionalParamsInSchema, euaccess.IsEURestrictedAccess(platformRegion), b.config.UseSmallerMachineTypes, b.config.EnableShootAndSeedSameRegion, b.convergedCloudRegionsProvider.GetRegions(platformRegion), assuredworkloads.IsKSA(platformRegion), b.config.EnableAdditionalWorkerNodePools, b.config.EnableLoadCurrentConfig)
plan := plans[details.PlanID]
schema := string(Marshal(plan.Schemas.Instance.Create.Parameters))

Expand Down
37 changes: 12 additions & 25 deletions internal/broker/instance_create_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1563,7 +1563,7 @@ func TestAdditionalWorkerNodePools(t *testing.T) {
queue.On("Add", mock.AnythingOfType("string"))

factoryBuilder := &automock.PlanValidator{}
factoryBuilder.On("IsPlanSupport", broker.PreviewPlanID).Return(true)
factoryBuilder.On("IsPlanSupport", broker.AWSPlanID).Return(true)

planDefaults := func(planID string, platformProvider pkg.CloudProvider, provider *pkg.CloudProvider) (*gqlschema.ClusterConfigInput, error) {
return &gqlschema.ClusterConfigInput{}, nil
Expand All @@ -1573,10 +1573,11 @@ func TestAdditionalWorkerNodePools(t *testing.T) {
// #create provisioner endpoint
provisionEndpoint := broker.NewProvision(
broker.Config{
EnablePlans: []string{"preview"},
URL: brokerURL,
OnlySingleTrialPerGA: true,
EnableKubeconfigURLLabel: true,
EnablePlans: []string{"aws"},
URL: brokerURL,
OnlySingleTrialPerGA: true,
EnableKubeconfigURLLabel: true,
EnableAdditionalWorkerNodePools: true,
},
gardener.Config{Project: "test", ShootDomain: "example.com", DNSProviders: fixDNSProviders()},
memoryStorage.Operations(),
Expand All @@ -1596,7 +1597,7 @@ func TestAdditionalWorkerNodePools(t *testing.T) {
// when
_, err := provisionEndpoint.Provision(fixRequestContext(t, "cf-eu10"), instanceID, domain.ProvisionDetails{
ServiceID: serviceID,
PlanID: broker.PreviewPlanID,
PlanID: broker.AWSPlanID,
RawParameters: json.RawMessage(fmt.Sprintf(`{"name": "%s", "region": "%s","additionalWorkerNodePools": %s }`, clusterName, "eu-central-1", tc.additionalWorkerNodePools)),
RawContext: json.RawMessage(fmt.Sprintf(`{"globalaccount_id": "%s", "subaccount_id": "%s", "user_id": "%s"}`, "any-global-account-id", subAccountID, "[email protected]")),
}, true)
Expand All @@ -1618,21 +1619,6 @@ func TestAdditionalWorkerNodePoolsForUnsupportedPlans(t *testing.T) {
"Free": {
planID: broker.FreemiumPlanID,
},
"Azure": {
planID: broker.AzurePlanID,
},
"Azure lite": {
planID: broker.AzureLitePlanID,
},
"AWS": {
planID: broker.AWSPlanID,
},
"GCP": {
planID: broker.GCPPlanID,
},
"SAP converged cloud": {
planID: broker.SapConvergedCloudPlanID,
},
} {
t.Run(tn, func(t *testing.T) {
// given
Expand All @@ -1656,10 +1642,11 @@ func TestAdditionalWorkerNodePoolsForUnsupportedPlans(t *testing.T) {
// #create provisioner endpoint
provisionEndpoint := broker.NewProvision(
broker.Config{
EnablePlans: []string{"trial", "free", "azure", "azure_lite", "aws", "gcp", "sap-converged-cloud"},
URL: brokerURL,
OnlySingleTrialPerGA: true,
EnableKubeconfigURLLabel: true,
EnablePlans: []string{"trial", "free"},
URL: brokerURL,
OnlySingleTrialPerGA: true,
EnableKubeconfigURLLabel: true,
EnableAdditionalWorkerNodePools: true,
},
gardener.Config{Project: "test", ShootDomain: "example.com", DNSProviders: fixDNSProviders()},
memoryStorage.Operations(),
Expand Down
6 changes: 5 additions & 1 deletion internal/broker/instance_update.go
Original file line number Diff line number Diff line change
Expand Up @@ -265,6 +265,10 @@ func (b *UpdateEndpoint) processUpdateParameters(instance *internal.Instance, de
}

if params.AdditionalWorkerNodePools != nil {
if !b.config.EnableAdditionalWorkerNodePools {
message := fmt.Sprintf("additional worker node pools are not supported")
return domain.UpdateServiceSpec{}, apiresponses.NewFailureResponse(fmt.Errorf(message), http.StatusBadRequest, message)
}
if !supportsAdditionalWorkerNodePools(details.PlanID) {
message := fmt.Sprintf("additional worker node pools are not supported for plan ID: %s", details.PlanID)
return domain.UpdateServiceSpec{}, apiresponses.NewFailureResponse(fmt.Errorf(message), http.StatusBadRequest, message)
Expand Down Expand Up @@ -428,7 +432,7 @@ func (b *UpdateEndpoint) extractActiveValue(id string, provisioning internal.Pro
func (b *UpdateEndpoint) getJsonSchemaValidator(provider pkg.CloudProvider, planID string, platformRegion string) (JSONSchemaValidator, error) {
// shootAndSeedSameRegion is never enabled for update
b.log.Info(fmt.Sprintf("region is: %s", platformRegion))
plans := Plans(b.plansConfig, provider, b.config.IncludeAdditionalParamsInSchema, euaccess.IsEURestrictedAccess(platformRegion), b.config.UseSmallerMachineTypes, false, b.convergedCloudRegionsProvider.GetRegions(platformRegion), assuredworkloads.IsKSA(platformRegion))
plans := Plans(b.plansConfig, provider, b.config.IncludeAdditionalParamsInSchema, euaccess.IsEURestrictedAccess(platformRegion), b.config.UseSmallerMachineTypes, false, b.convergedCloudRegionsProvider.GetRegions(platformRegion), assuredworkloads.IsKSA(platformRegion), b.config.EnableAdditionalWorkerNodePools, b.config.EnableLoadCurrentConfig)
plan := plans[planID]
schema := string(Marshal(plan.Schemas.Instance.Update.Parameters))

Expand Down
33 changes: 11 additions & 22 deletions internal/broker/instance_update_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -654,7 +654,7 @@ func TestUpdateAdditionalWorkerNodePools(t *testing.T) {
t.Run(tn, func(t *testing.T) {
// given
instance := fixture.FixInstance(instanceID)
instance.ServicePlanID = PreviewPlanID
instance.ServicePlanID = AWSPlanID
st := storage.NewMemoryStorage()
err := st.Instances().Insert(instance)
require.NoError(t, err)
Expand All @@ -670,11 +670,12 @@ func TestUpdateAdditionalWorkerNodePools(t *testing.T) {
kcBuilder := &kcMock.KcBuilder{}
svc := NewUpdate(Config{}, st.Instances(), st.RuntimeStates(), st.Operations(), handler, true, true, false, q, PlansConfig{},
planDefaults, fixLogger(), dashboardConfig, kcBuilder, &OneForAllConvergedCloudRegionsProvider{}, fakeKcpK8sClient)
svc.config.EnableAdditionalWorkerNodePools = true

// when
_, err = svc.Update(context.Background(), instanceID, domain.UpdateDetails{
ServiceID: "",
PlanID: PreviewPlanID,
PlanID: AWSPlanID,
RawParameters: json.RawMessage("{\"additionalWorkerNodePools\":" + tc.additionalWorkerNodePools + "}"),
PreviousValues: domain.PreviousValues{},
RawContext: json.RawMessage("{\"globalaccount_id\":\"globalaccount_id_1\", \"active\":true}"),
Expand All @@ -691,7 +692,7 @@ func TestHAZones(t *testing.T) {
t.Run("should fail when attempting to disable HA zones for existing additional worker node pool", func(t *testing.T) {
// given
instance := fixture.FixInstance(instanceID)
instance.ServicePlanID = PreviewPlanID
instance.ServicePlanID = AWSPlanID
instance.Parameters.Parameters.AdditionalWorkerNodePools = []pkg.AdditionalWorkerNodePool{
{
Name: "name-1",
Expand All @@ -716,11 +717,12 @@ func TestHAZones(t *testing.T) {
kcBuilder := &kcMock.KcBuilder{}
svc := NewUpdate(Config{}, st.Instances(), st.RuntimeStates(), st.Operations(), handler, true, true, false, q, PlansConfig{},
planDefaults, fixLogger(), dashboardConfig, kcBuilder, &OneForAllConvergedCloudRegionsProvider{}, fakeKcpK8sClient)
svc.config.EnableAdditionalWorkerNodePools = true

// when
_, err = svc.Update(context.Background(), instanceID, domain.UpdateDetails{
ServiceID: "",
PlanID: PreviewPlanID,
PlanID: AWSPlanID,
RawParameters: json.RawMessage(`{"additionalWorkerNodePools": [{"name": "name-1", "machineType": "m6i.large", "haZones": false, "autoScalerMin": 3, "autoScalerMax": 20}]}`),
PreviousValues: domain.PreviousValues{},
RawContext: json.RawMessage("{\"globalaccount_id\":\"globalaccount_id_1\", \"active\":true}"),
Expand All @@ -734,7 +736,7 @@ func TestHAZones(t *testing.T) {
t.Run("should fail when attempting to enable HA zones for existing additional worker node pool", func(t *testing.T) {
// given
instance := fixture.FixInstance(instanceID)
instance.ServicePlanID = PreviewPlanID
instance.ServicePlanID = AWSPlanID
instance.Parameters.Parameters.AdditionalWorkerNodePools = []pkg.AdditionalWorkerNodePool{
{
Name: "name-1",
Expand All @@ -759,11 +761,12 @@ func TestHAZones(t *testing.T) {
kcBuilder := &kcMock.KcBuilder{}
svc := NewUpdate(Config{}, st.Instances(), st.RuntimeStates(), st.Operations(), handler, true, true, false, q, PlansConfig{},
planDefaults, fixLogger(), dashboardConfig, kcBuilder, &OneForAllConvergedCloudRegionsProvider{}, fakeKcpK8sClient)
svc.config.EnableAdditionalWorkerNodePools = true

// when
_, err = svc.Update(context.Background(), instanceID, domain.UpdateDetails{
ServiceID: "",
PlanID: PreviewPlanID,
PlanID: AWSPlanID,
RawParameters: json.RawMessage(`{"additionalWorkerNodePools": [{"name": "name-1", "machineType": "m6i.large", "haZones": true, "autoScalerMin": 3, "autoScalerMax": 20}]}`),
PreviousValues: domain.PreviousValues{},
RawContext: json.RawMessage("{\"globalaccount_id\":\"globalaccount_id_1\", \"active\":true}"),
Expand All @@ -785,21 +788,6 @@ func TestUpdateAdditionalWorkerNodePoolsForUnsupportedPlans(t *testing.T) {
"Free": {
planID: FreemiumPlanID,
},
"Azure": {
planID: AzurePlanID,
},
"Azure lite": {
planID: AzureLitePlanID,
},
"AWS": {
planID: AWSPlanID,
},
"GCP": {
planID: GCPPlanID,
},
"SAP converged cloud": {
planID: SapConvergedCloudPlanID,
},
} {
t.Run(tn, func(t *testing.T) {
// given
Expand All @@ -820,8 +808,9 @@ func TestUpdateAdditionalWorkerNodePoolsForUnsupportedPlans(t *testing.T) {
kcBuilder := &kcMock.KcBuilder{}
svc := NewUpdate(Config{}, st.Instances(), st.RuntimeStates(), st.Operations(), handler, true, true, false, q, PlansConfig{},
planDefaults, fixLogger(), dashboardConfig, kcBuilder, &OneForAllConvergedCloudRegionsProvider{}, fakeKcpK8sClient)
svc.config.EnableAdditionalWorkerNodePools = true

additionalWorkerNodePools := `[{"name": "name-1", "machineType": "m6i.large", "autoScalerMin": 3, "autoScalerMax": 20}]`
additionalWorkerNodePools := `[{"name": "name-1", "machineType": "m6i.large", "haZones": true, "autoScalerMin": 3, "autoScalerMax": 20}]`

// when
_, err = svc.Update(context.Background(), instanceID, domain.UpdateDetails{
Expand Down
Loading
Loading