From df4c50699401c46ef036740999a812dec6d492f4 Mon Sep 17 00:00:00 2001 From: Jian Wang Date: Fri, 9 Aug 2024 09:24:33 +0200 Subject: [PATCH] Update minor typos and add dummy endpoint when necessary Signed-off-by: Jian Wang --- pkg/lb/servicelb/manager.go | 99 +++++++++++-- pkg/webhook/loadbalancer/mutator.go | 72 +++++++++- pkg/webhook/loadbalancer/mutator_test.go | 82 +++++++++++ pkg/webhook/loadbalancer/validator.go | 62 +++++++++ pkg/webhook/loadbalancer/validator_test.go | 155 ++++++++++++++++++++- 5 files changed, 453 insertions(+), 17 deletions(-) diff --git a/pkg/lb/servicelb/manager.go b/pkg/lb/servicelb/manager.go index 60e5cc8c..ce73034f 100644 --- a/pkg/lb/servicelb/manager.go +++ b/pkg/lb/servicelb/manager.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "reflect" + "slices" "strconv" "strings" "time" @@ -132,7 +133,7 @@ func (m *Manager) updateAllConditions(lb *lbv1.LoadBalancer, eps *discoveryv1.En epsCopy := eps.DeepCopy() updated := false for i := range epsCopy.Endpoints { - if needUpdateEndpointConditions(&epsCopy.Endpoints[i].Conditions, isHealthy) { + if !isDummyEndpoint(&epsCopy.Endpoints[i]) && needUpdateEndpointConditions(&epsCopy.Endpoints[i].Conditions, isHealthy) { updateEndpointConditions(&epsCopy.Endpoints[i].Conditions, isHealthy) updated = true } @@ -161,7 +162,7 @@ func (m *Manager) GetProbeReadyBackendServerCount(lb *lbv1.LoadBalancer) (int, e // if use `for _, ep := range eps.Endpoints` // get: G601: Implicit memory aliasing in for loop. (gosec) for i := range eps.Endpoints { - if isEndpointConditionsReady(&eps.Endpoints[i].Conditions) { + if !isDummyEndpoint(&eps.Endpoints[i]) && isEndpointConditionsReady(&eps.Endpoints[i].Conditions) { count++ } } @@ -209,7 +210,7 @@ func (m *Manager) getServiceBackendServers(lb *lbv1.LoadBalancer) ([]pkglb.Backe servers := make([]pkglb.BackendServer, 0, len(vmis)) - // skip being-deleting vmi, no-address vmi + // skip being-deleted vmi, no-address vmi for _, vmi := range vmis { if vmi.DeletionTimestamp != nil { continue @@ -244,22 +245,22 @@ func (m *Manager) EnsureBackendServers(lb *lbv1.LoadBalancer) ([]pkglb.BackendSe return nil, err } - epsCopy, err := m.constructEndpointSliceFromBackendServers(eps, lb, servers) + epsNew, err := m.constructEndpointSliceFromBackendServers(eps, lb, servers) if err != nil { return nil, err } - // reate a new one + // create a new one if eps == nil { - // do not check IsAlreadyExists - eps, err = m.endpointSliceClient.Create(epsCopy) + // it is ok to do not check IsAlreadyExists, reconciler will pass + eps, err = m.endpointSliceClient.Create(epsNew) if err != nil { return nil, fmt.Errorf("fail to create endpointslice, error: %w", err) } } else { - if !reflect.DeepEqual(eps, epsCopy) { + if !reflect.DeepEqual(eps, epsNew) { logrus.Debugf("update endpointslice %s/%s", lb.Namespace, lb.Name) - eps, err = m.endpointSliceClient.Update(epsCopy) + eps, err = m.endpointSliceClient.Update(epsNew) if err != nil { return nil, fmt.Errorf("fail to update endpointslice, error: %w", err) } @@ -268,7 +269,12 @@ func (m *Manager) EnsureBackendServers(lb *lbv1.LoadBalancer) ([]pkglb.BackendSe // always ensure probs if err := m.ensureProbes(lb, eps); err != nil { - return nil, fmt.Errorf("fail to enuse probs, error: %w", err) + return nil, fmt.Errorf("fail to ensure probs, error: %w", err) + } + + // always ensure dummy endpoint + if err := m.ensureDummyEndpoint(lb, eps); err != nil { + return nil, fmt.Errorf("fail to ensure dummy endpointslice, error: %w", err) } return servers, nil @@ -313,7 +319,7 @@ func (m *Manager) ensureProbes(lb *lbv1.LoadBalancer, eps *discoveryv1.EndpointS targetProbers := make(map[string]prober.HealthOption) // indexing to skip G601 in go v121 for i := range eps.Endpoints { - if len(eps.Endpoints[i].Addresses) == 0 { + if len(eps.Endpoints[i].Addresses) == 0 || isDummyEndpoint(&eps.Endpoints[i]) { continue } targetProbers[marshalPorberAddress(lb, &eps.Endpoints[i])] = m.generateOneProber(lb, &eps.Endpoints[i]) @@ -345,11 +351,11 @@ func (m *Manager) updateAllProbers(uid string, activeProbers, targetProbers map[ return err } } - // proessed above or equal; then delete it from both maps + // replaced or equal; then delete it from both maps delete(activeProbers, ap.Address) delete(targetProbers, tp.Address) } - // not found in targetProbers, processed in next lines + // for those not found in the targetProbers, will be processed in next lines } // remove all remainings of activeProbers @@ -362,7 +368,6 @@ func (m *Manager) updateAllProbers(uid string, activeProbers, targetProbers map[ // add all remainings of targetProbers for _, tp := range targetProbers { - // already checked, skip error logrus.Debugf("+probe %s %s", uid, tp.Address) if err := m.AddWorker(uid, tp.Address, tp); err != nil { return err @@ -372,6 +377,45 @@ func (m *Manager) updateAllProbers(uid string, activeProbers, targetProbers map[ return nil } +// without at least one Ready (dummy) endpoint, the service may route traffic to local host +func (m *Manager) ensureDummyEndpoint(lb *lbv1.LoadBalancer, eps *discoveryv1.EndpointSlice) error { + dummyCount := 0 + activeCount := 0 + // if use `for _, ep := range eps.Endpoints` + // get: G601: Implicit memory aliasing in for loop. (gosec) + for i := range eps.Endpoints { + if isDummyEndpoint(&eps.Endpoints[i]) { + dummyCount++ + } else if isEndpointConditionsReady(&eps.Endpoints[i].Conditions) { + activeCount++ + } + } + + // add the dummy endpoint + if activeCount == 0 && dummyCount == 0 { + epsCopy := eps.DeepCopy() + epsCopy.Endpoints = appendDummyEndpoint(epsCopy.Endpoints, lb) + if _, err := m.endpointSliceClient.Update(epsCopy); err != nil { + return fmt.Errorf("fail to append dummy endpoint to lb %v endpoint, error: %w", lb.Name, err) + } + return nil + } + + // remove the dummy endpoint + if activeCount > 0 && dummyCount > 0 { + epsCopy := eps.DeepCopy() + epsCopy.Endpoints = slices.DeleteFunc(epsCopy.Endpoints, func(ep discoveryv1.Endpoint) bool { + return ep.TargetRef.UID == dummyEndpointID + }) + if _, err := m.endpointSliceClient.Update(epsCopy); err != nil { + return fmt.Errorf("fail to remove dummy endpoint from lb %v endpoint, error: %w", lb.Name, err) + } + return nil + } + + return nil +} + func (m *Manager) removeLBProbers(lb *lbv1.LoadBalancer) (int, error) { return m.RemoveWorkersByUid(marshalUID(lb.Namespace, lb.Name)) } @@ -515,6 +559,29 @@ func constructService(cur *corev1.Service, lb *lbv1.LoadBalancer) *corev1.Servic return svc } +const dummyEndpointIPv4Address = "10.52.0.255" +const dummyEndpointID = "dummy347-546a-4642-9da6-5608endpoint" + +func appendDummyEndpoint(eps []discoveryv1.Endpoint, lb *lbv1.LoadBalancer) []discoveryv1.Endpoint { + endpoint := discoveryv1.Endpoint{ + Addresses: []string{dummyEndpointIPv4Address}, + TargetRef: &corev1.ObjectReference{ + Namespace: lb.Namespace, + Name: lb.Name, + UID: dummyEndpointID, + }, + Conditions: discoveryv1.EndpointConditions{ + Ready: pointer.Bool(true), + }, + } + eps = append(eps, endpoint) + return eps +} + +func isDummyEndpoint(ep *discoveryv1.Endpoint) bool { + return ep.TargetRef.UID == dummyEndpointID +} + func (m *Manager) constructEndpointSliceFromBackendServers(cur *discoveryv1.EndpointSlice, lb *lbv1.LoadBalancer, servers []pkglb.BackendServer) (*discoveryv1.EndpointSlice, error) { eps := &discoveryv1.EndpointSlice{} if cur != nil { @@ -586,6 +653,10 @@ func (m *Manager) constructEndpointSliceFromBackendServers(cur *discoveryv1.Endp endpoints = append(endpoints, endpoint) } } + // a dummy endpoint avoids the LB traffic is routed to other services/local host accidentally + if len(endpoints) == 0 { + endpoints = appendDummyEndpoint(endpoints, lb) + } eps.Endpoints = endpoints logrus.Debugln("constructEndpointSliceFromBackendServers: ", eps) diff --git a/pkg/webhook/loadbalancer/mutator.go b/pkg/webhook/loadbalancer/mutator.go index b8f4168b..ad95c035 100644 --- a/pkg/webhook/loadbalancer/mutator.go +++ b/pkg/webhook/loadbalancer/mutator.go @@ -41,7 +41,20 @@ func NewMutator(namespaceCache ctlcorev1.NamespaceCache, func (m *mutator) Create(_ *admission.Request, newObj runtime.Object) (admission.Patch, error) { lb := newObj.(*lbv1.LoadBalancer) - return m.getAnnotationsPatch(lb) + ap, err := m.getAnnotationsPatch(lb) + if err != nil { + return nil, err + } + + hcp, err := m.getHealthyCheckPatch(lb) + if err != nil { + return nil, err + } + + if len(ap) == 0 { + return hcp, nil + } + return append(ap, hcp...), nil } func (m *mutator) Update(_ *admission.Request, _, newObj runtime.Object) (admission.Patch, error) { @@ -51,7 +64,62 @@ func (m *mutator) Update(_ *admission.Request, _, newObj runtime.Object) (admiss return nil, nil } - return m.getAnnotationsPatch(lb) + ap, err := m.getAnnotationsPatch(lb) + if err != nil { + return nil, err + } + + hcp, err := m.getHealthyCheckPatch(lb) + if err != nil { + return nil, err + } + + if len(ap) == 0 { + return hcp, nil + } + return append(ap, hcp...), nil +} + +// those fields are not checked in the past, necessary to overwrite them to at least 1 +func (m *mutator) getHealthyCheckPatch(lb *lbv1.LoadBalancer) (admission.Patch, error) { + if lb.Spec.HealthCheck == nil || lb.Spec.HealthCheck.Port == 0 { + return nil, nil + } + + hc := *lb.Spec.HealthCheck + patched := false + + if hc.SuccessThreshold == 0 { + hc.SuccessThreshold = 2 + patched = true + } + + if hc.FailureThreshold == 0 { + hc.FailureThreshold = 2 + patched = true + } + + if hc.PeriodSeconds == 0 { + hc.PeriodSeconds = 1 + patched = true + } + + if hc.TimeoutSeconds == 0 { + hc.TimeoutSeconds = 1 + patched = true + } + + if patched { + return []admission.PatchOp{ + { + Op: admission.PatchOpReplace, + Path: "/spec/healthCheck", + Value: hc, + }, + }, nil + } + + return nil, nil } func (m *mutator) getAnnotationsPatch(lb *lbv1.LoadBalancer) (admission.Patch, error) { diff --git a/pkg/webhook/loadbalancer/mutator_test.go b/pkg/webhook/loadbalancer/mutator_test.go index 980d2299..d9754c6e 100644 --- a/pkg/webhook/loadbalancer/mutator_test.go +++ b/pkg/webhook/loadbalancer/mutator_test.go @@ -8,6 +8,10 @@ import ( harvesterfakeclients "github.com/harvester/harvester/pkg/util/fakeclients" corefake "k8s.io/client-go/kubernetes/fake" + lbv1 "github.com/harvester/harvester-load-balancer/pkg/apis/loadbalancer.harvesterhci.io/v1beta1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/harvester/harvester-load-balancer/pkg/utils" ) @@ -39,6 +43,67 @@ func TestFindProject(t *testing.T) { }, } + testsHealthCheckMutatored := []struct { + name string + lb *lbv1.LoadBalancer + wantErr bool + opsLen int + }{ + { + name: "health check mutatored case", + lb: &lbv1.LoadBalancer{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "test", + }, + Spec: lbv1.LoadBalancerSpec{ + Listeners: []lbv1.Listener{ + {Name: "a", BackendPort: 80, Protocol: corev1.ProtocolTCP}, + {Name: "b", BackendPort: 32, Protocol: corev1.ProtocolUDP}, + }, + HealthCheck: &lbv1.HealthCheck{Port: 80, SuccessThreshold: 0, FailureThreshold: 1, PeriodSeconds: 1, TimeoutSeconds: 1}, + }, + }, + wantErr: false, + opsLen: 2, + }, + { + name: "health check right case: valid parameters", + lb: &lbv1.LoadBalancer{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "test", + }, + Spec: lbv1.LoadBalancerSpec{ + Listeners: []lbv1.Listener{ + {Name: "a", BackendPort: 80, Protocol: corev1.ProtocolTCP}, + {Name: "b", BackendPort: 32, Protocol: corev1.ProtocolUDP}, + }, + HealthCheck: &lbv1.HealthCheck{Port: 80, SuccessThreshold: 1, FailureThreshold: 1, PeriodSeconds: 1, TimeoutSeconds: 1}, + }, + }, + wantErr: false, + opsLen: 1, + }, + { + name: "health check right case: no health check", + lb: &lbv1.LoadBalancer{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "test", + }, + Spec: lbv1.LoadBalancerSpec{ + Listeners: []lbv1.Listener{ + {Name: "a", BackendPort: 80, Protocol: corev1.ProtocolTCP}, + {Name: "b", BackendPort: 32, Protocol: corev1.ProtocolUDP}, + }, + }, + }, + wantErr: false, + opsLen: 1, + }, + } + for _, test := range tests { if project, err := m.findProject(test.namespace); err != nil { t.Error(err) @@ -46,6 +111,23 @@ func TestFindProject(t *testing.T) { t.Errorf("want project %s through namespace %s, got %s", test.wantProject, test.namespace, project) } } + + for _, test := range testsHealthCheckMutatored { + if pt, err := m.Create(nil, test.lb); (err != nil) != test.wantErr { + t.Error(err) + } else if len(pt) != test.opsLen { + // return 2 ops + // [{Op:replace Path:/metadata/annotations Value:map[loadbalancer.harvesterhci.io/namespace:default loadbalancer.harvesterhci.io/network: loadbalancer.harvesterhci.io/project:local/p-abcde]} + // {Op:replace Path:/spec/healthCheck Value:{Port:80 SuccessThreshold:2 FailureThreshold:1 PeriodSeconds:1 TimeoutSeconds:1}}] + t.Errorf("create test %v return patchOps len %v != %v, %+v", test.name, len(pt), test.opsLen, pt) + } + + if pt, err := m.Update(nil, nil, test.lb); (err != nil) != test.wantErr { + t.Error(err) + } else if len(pt) != test.opsLen { + t.Errorf("update test %v return patchOps len %v != %v, %+v", test.name, len(pt), test.opsLen, pt) + } + } } // TestFindNetwork tests the function findNetwork diff --git a/pkg/webhook/loadbalancer/validator.go b/pkg/webhook/loadbalancer/validator.go index a59f271f..cb115a50 100644 --- a/pkg/webhook/loadbalancer/validator.go +++ b/pkg/webhook/loadbalancer/validator.go @@ -5,6 +5,7 @@ import ( "github.com/harvester/webhook/pkg/server/admission" admissionregv1 "k8s.io/api/admissionregistration/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" lbv1 "github.com/harvester/harvester-load-balancer/pkg/apis/loadbalancer.harvesterhci.io/v1beta1" @@ -27,6 +28,10 @@ func (v *validator) Create(_ *admission.Request, newObj runtime.Object) error { return fmt.Errorf("create loadbalancer %s/%s failed: %w", lb.Namespace, lb.Name, err) } + if err := checkHealthyCheck(lb); err != nil { + return fmt.Errorf("create loadbalancer %s/%s failed with healthyCheck: %w", lb.Namespace, lb.Name, err) + } + return nil } @@ -41,6 +46,10 @@ func (v *validator) Update(_ *admission.Request, oldObj, newObj runtime.Object) return fmt.Errorf("update loadbalancer %s/%s failed: %w", lb.Namespace, lb.Name, err) } + if err := checkHealthyCheck(lb); err != nil { + return fmt.Errorf("update loadbalancer %s/%s failed with healthyCheck: %w", lb.Namespace, lb.Name, err) + } + return nil } @@ -58,8 +67,13 @@ func (v *validator) Resource() admission.Resource { } } +const maxPort = 65535 + func checkListeners(lb *lbv1.LoadBalancer) error { nameMap, portMap, backendMap := map[string]bool{}, map[int32]int{}, map[int32]int{} + if len(lb.Spec.Listeners) == 0 { + return fmt.Errorf("the loadbalancer needs to have at least one listener") + } for i, listener := range lb.Spec.Listeners { // check listener name if _, ok := nameMap[listener.Name]; ok { @@ -82,5 +96,53 @@ func checkListeners(lb *lbv1.LoadBalancer) error { backendMap[listener.BackendPort] = i } + for _, listener := range lb.Spec.Listeners { + // check listener name + if listener.Port > maxPort { + return fmt.Errorf("listener port %v must <= %v", listener.Port, maxPort) + } else if listener.Port < 1 { + return fmt.Errorf("listener port %v must >= 1", listener.Port) + } + if listener.BackendPort > maxPort { + return fmt.Errorf("listener backend port %v must <= %v", listener.Port, maxPort) + } else if listener.BackendPort < 1 { + return fmt.Errorf("listener backend port %v must >= 1", listener.Port) + } + } + + return nil +} + +func checkHealthyCheck(lb *lbv1.LoadBalancer) error { + if lb.Spec.HealthCheck != nil && lb.Spec.HealthCheck.Port != 0 { + wrongProtocol := false + for _, listener := range lb.Spec.Listeners { + // check listener port and protocol, only TCP is supported now + if uint(listener.BackendPort) == lb.Spec.HealthCheck.Port { + if listener.Protocol == corev1.ProtocolTCP { + if lb.Spec.HealthCheck.SuccessThreshold == 0 { + return fmt.Errorf("healthcheck SuccessThreshold should > 0") + } + if lb.Spec.HealthCheck.FailureThreshold == 0 { + return fmt.Errorf("healthcheck FailureThreshold should > 0") + } + if lb.Spec.HealthCheck.PeriodSeconds == 0 { + return fmt.Errorf("healthcheck PeriodSeconds should > 0") + } + if lb.Spec.HealthCheck.TimeoutSeconds == 0 { + return fmt.Errorf("healthcheck TimeoutSeconds should > 0") + } + return nil + } + // not the expected TCP + wrongProtocol = true + } + } + if wrongProtocol { + return fmt.Errorf("healthcheck port %v can only be a TCP backend port", lb.Spec.HealthCheck.Port) + } + return fmt.Errorf("healthcheck port %v is not in listener backend port list", lb.Spec.HealthCheck.Port) + } + return nil } diff --git a/pkg/webhook/loadbalancer/validator_test.go b/pkg/webhook/loadbalancer/validator_test.go index e487c4d0..aa4ac70b 100644 --- a/pkg/webhook/loadbalancer/validator_test.go +++ b/pkg/webhook/loadbalancer/validator_test.go @@ -4,6 +4,7 @@ import ( "testing" lbv1 "github.com/harvester/harvester-load-balancer/pkg/apis/loadbalancer.harvesterhci.io/v1beta1" + corev1 "k8s.io/api/core/v1" ) func TestCheckListeners(t *testing.T) { @@ -48,6 +49,54 @@ func TestCheckListeners(t *testing.T) { }, wantErr: true, }, + { + name: "port < 1", + lb: &lbv1.LoadBalancer{ + Spec: lbv1.LoadBalancerSpec{ + Listeners: []lbv1.Listener{ + {Name: "a", Port: -1}, + {Name: "b", Port: 80}, + }, + }, + }, + wantErr: true, + }, + { + name: "port > 65535", + lb: &lbv1.LoadBalancer{ + Spec: lbv1.LoadBalancerSpec{ + Listeners: []lbv1.Listener{ + {Name: "a", Port: 80}, + {Name: "b", Port: 8000}, + }, + }, + }, + wantErr: true, + }, + { + name: "backend port < 1", + lb: &lbv1.LoadBalancer{ + Spec: lbv1.LoadBalancerSpec{ + Listeners: []lbv1.Listener{ + {Name: "a", BackendPort: 0}, + {Name: "b", BackendPort: 80}, + }, + }, + }, + wantErr: true, + }, + { + name: "backend port > 65535", + lb: &lbv1.LoadBalancer{ + Spec: lbv1.LoadBalancerSpec{ + Listeners: []lbv1.Listener{ + {Name: "a", BackendPort: 65536}, + {Name: "b", BackendPort: 80}, + }, + }, + }, + wantErr: true, + }, { name: "right case", lb: &lbv1.LoadBalancer{ @@ -62,9 +111,113 @@ func TestCheckListeners(t *testing.T) { }, } + testsHealtyCheck := []struct { + name string + lb *lbv1.LoadBalancer + wantErr bool + }{ + { + name: "health check port is not in backend port list", + lb: &lbv1.LoadBalancer{ + Spec: lbv1.LoadBalancerSpec{ + Listeners: []lbv1.Listener{ + {Name: "a", BackendPort: 80, Protocol: corev1.ProtocolTCP}, + {Name: "b", BackendPort: 32, Protocol: corev1.ProtocolUDP}, + }, + HealthCheck: &lbv1.HealthCheck{Port: 99}, + }, + }, + wantErr: true, + }, + { + name: "health check protocol is not expected tcp", + lb: &lbv1.LoadBalancer{ + Spec: lbv1.LoadBalancerSpec{ + Listeners: []lbv1.Listener{ + {Name: "a", BackendPort: 80, Protocol: corev1.ProtocolTCP}, + {Name: "b", BackendPort: 32, Protocol: corev1.ProtocolUDP}, + }, + HealthCheck: &lbv1.HealthCheck{Port: 32}, + }, + }, + wantErr: true, + }, + { + name: "health check parameter SuccessThreshold is error", + lb: &lbv1.LoadBalancer{ + Spec: lbv1.LoadBalancerSpec{ + Listeners: []lbv1.Listener{ + {Name: "a", BackendPort: 80, Protocol: corev1.ProtocolTCP}, + {Name: "b", BackendPort: 32, Protocol: corev1.ProtocolUDP}, + }, + HealthCheck: &lbv1.HealthCheck{Port: 80, SuccessThreshold: 0}, + }, + }, + wantErr: true, + }, + { + name: "health check parameter FailureThreshold is error", + lb: &lbv1.LoadBalancer{ + Spec: lbv1.LoadBalancerSpec{ + Listeners: []lbv1.Listener{ + {Name: "a", BackendPort: 80, Protocol: corev1.ProtocolTCP}, + {Name: "b", BackendPort: 32, Protocol: corev1.ProtocolUDP}, + }, + HealthCheck: &lbv1.HealthCheck{Port: 80, FailureThreshold: 0}, + }, + }, + wantErr: true, + }, + { + name: "health check parameter PeriodSeconds is error", + lb: &lbv1.LoadBalancer{ + Spec: lbv1.LoadBalancerSpec{ + Listeners: []lbv1.Listener{ + {Name: "a", BackendPort: 80, Protocol: corev1.ProtocolTCP}, + {Name: "b", BackendPort: 32, Protocol: corev1.ProtocolUDP}, + }, + HealthCheck: &lbv1.HealthCheck{Port: 80, PeriodSeconds: 0}, + }, + }, + wantErr: true, + }, + { + name: "health check parameter TimeoutSeconds is error", + lb: &lbv1.LoadBalancer{ + Spec: lbv1.LoadBalancerSpec{ + Listeners: []lbv1.Listener{ + {Name: "a", BackendPort: 80, Protocol: corev1.ProtocolTCP}, + {Name: "b", BackendPort: 32, Protocol: corev1.ProtocolUDP}, + }, + HealthCheck: &lbv1.HealthCheck{Port: 80, TimeoutSeconds: 0}, + }, + }, + wantErr: true, + }, + { + name: "health check right case", + lb: &lbv1.LoadBalancer{ + Spec: lbv1.LoadBalancerSpec{ + Listeners: []lbv1.Listener{ + {Name: "a", BackendPort: 80, Protocol: corev1.ProtocolTCP}, + {Name: "b", BackendPort: 32, Protocol: corev1.ProtocolUDP}, + }, + HealthCheck: &lbv1.HealthCheck{Port: 80, SuccessThreshold: 1, FailureThreshold: 1, PeriodSeconds: 1, TimeoutSeconds: 1}, + }, + }, + wantErr: false, + }, + } + for _, tt := range tests { if err := checkListeners(tt.lb); (err != nil) != tt.wantErr { - t.Errorf("%q. checkPorts() error = %v, wantErr %v", tt.name, err, tt.wantErr) + t.Errorf("%q. checkListeners() error = %v, wantErr %v", tt.name, err, tt.wantErr) + } + } + + for _, tt := range testsHealtyCheck { + if err := checkHealthyCheck(tt.lb); (err != nil) != tt.wantErr { + t.Errorf("%q. checkHealthyCheck() error = %v, wantErr %v", tt.name, err, tt.wantErr) } } }