From c53cd52b4199e164e247afc52efcf8365852c702 Mon Sep 17 00:00:00 2001 From: Shriram Rajagopalan Date: Fri, 16 Aug 2019 13:16:01 -0400 Subject: [PATCH] Fix UDS and Sidecar ingress listeners issues (#16298) * Fix UDS and Sidecar ingress listeners issues Signed-off-by: Shriram Rajagopalan * lint Signed-off-by: Shriram Rajagopalan * fixes Signed-off-by: Shriram Rajagopalan * lint Signed-off-by: Shriram Rajagopalan * listener fixes * tests Signed-off-by: Shriram Rajagopalan --- pilot/pkg/networking/core/v1alpha3/cluster.go | 51 ++++----- .../pkg/networking/core/v1alpha3/listener.go | 74 +++++-------- .../networking/core/v1alpha3/listener_test.go | 21 +++- pilot/pkg/proxy/envoy/v2/lds_test.go | 23 ++-- pkg/config/validation/validation.go | 41 ++++--- pkg/config/validation/validation_test.go | 17 ++- .../pilot/sidecarscope/main_test.go | 9 ++ .../scope_serviceentry_static_test.go | 102 ++++++++++++++++++ 8 files changed, 234 insertions(+), 104 deletions(-) diff --git a/pilot/pkg/networking/core/v1alpha3/cluster.go b/pilot/pkg/networking/core/v1alpha3/cluster.go index 6953fa485204..e29a8bd3cbec 100644 --- a/pilot/pkg/networking/core/v1alpha3/cluster.go +++ b/pilot/pkg/networking/core/v1alpha3/cluster.go @@ -39,6 +39,7 @@ import ( "istio.io/istio/pilot/pkg/networking/util" authn_model "istio.io/istio/pilot/pkg/security/model" "istio.io/istio/pkg/config/constants" + "istio.io/istio/pkg/config/host" "istio.io/istio/pkg/config/labels" "istio.io/istio/pkg/config/protocol" ) @@ -498,13 +499,11 @@ func (configgen *ConfigGeneratorImpl) buildInboundClusters(env *model.Environmen clusters = append(clusters, mgmtCluster) } } else { - if len(instances) == 0 { - return clusters - } rule := sidecarScope.Config.Spec.(*networking.Sidecar) + sidecarScopeID := sidecarScope.Config.Name + "." + sidecarScope.Config.Namespace for _, ingressListener := range rule.Ingress { // LDS would have setup the inbound clusters - // as inbound|portNumber|portName|Hostname + // as inbound|portNumber|portName|Hostname[or]SidecarScopeID listenPort := &model.Port{ Port: int(ingressListener.Port.Number), Protocol: protocol.Parse(ingressListener.Port.Protocol), @@ -515,11 +514,13 @@ func (configgen *ConfigGeneratorImpl) buildInboundClusters(env *model.Environmen // by the user and parse it into host:port or a unix domain socket // The default endpoint can be 127.0.0.1:port or :port or unix domain socket endpointAddress := actualLocalHost + endpointFamily := model.AddressFamilyTCP port := 0 var err error if strings.HasPrefix(ingressListener.DefaultEndpoint, model.UnixAddressPrefix) { // this is a UDS endpoint. assign it as is endpointAddress = ingressListener.DefaultEndpoint + endpointFamily = model.AddressFamilyUnix } else { // parse the ip, port. Validation guarantees presence of : parts := strings.Split(ingressListener.DefaultEndpoint, ":") @@ -532,19 +533,27 @@ func (configgen *ConfigGeneratorImpl) buildInboundClusters(env *model.Environmen } // Find the service instance that corresponds to this ingress listener by looking - // for a service instance that either matches this ingress port or one that has - // a port with same name as this ingress port + // for a service instance that either matches this ingress port as this will allow us + // to generate the right cluster name that LDS expects inbound|portNumber|portName|Hostname instance := configgen.findServiceInstanceForIngressListener(instances, ingressListener) if instance == nil { - // We didn't find a matching instance - continue + // We didn't find a matching instance. Create a dummy one because we need the right + // params to generate the right cluster name. LDS would have setup the cluster as + // as inbound|portNumber|portName|SidecarScopeID + instance = &model.ServiceInstance{ + Endpoint: model.NetworkEndpoint{}, + Service: &model.Service{ + Hostname: host.Name(sidecarScopeID), + Attributes: model.ServiceAttributes{ + Name: sidecarScope.Config.Name, + Namespace: sidecarScope.Config.Namespace, + }, + }, + } } - // Update the values here so that the plugins use the right ports - // uds values - // TODO: all plugins need to be updated to account for the fact that - // the port may be 0 but bind may have a UDS value + instance.Endpoint.Family = endpointFamily instance.Endpoint.Address = endpointAddress instance.Endpoint.ServicePort = listenPort instance.Endpoint.Port = port @@ -577,23 +586,7 @@ func (configgen *ConfigGeneratorImpl) findServiceInstanceForIngressListener(inst Labels: realInstance.Labels, ServiceAccount: realInstance.ServiceAccount, } - return instance - } - } - - // If the port number does not match, the user might have specified a - // UDS socket with port number 0. So search by name - for _, realInstance := range instances { - for _, iport := range realInstance.Service.Ports { - if iport.Name == ingressListener.Port.Name { - instance = &model.ServiceInstance{ - Endpoint: realInstance.Endpoint, - Service: realInstance.Service, - Labels: realInstance.Labels, - ServiceAccount: realInstance.ServiceAccount, - } - return instance - } + break } } diff --git a/pilot/pkg/networking/core/v1alpha3/listener.go b/pilot/pkg/networking/core/v1alpha3/listener.go index e8aade1880cf..a5c36fb05577 100644 --- a/pilot/pkg/networking/core/v1alpha3/listener.go +++ b/pilot/pkg/networking/core/v1alpha3/listener.go @@ -387,8 +387,9 @@ func (configgen *ConfigGeneratorImpl) buildSidecarInboundListeners( } else { rule := sidecarScope.Config.Spec.(*networking.Sidecar) + sidecarScopeID := sidecarScope.Config.Name + "." + sidecarScope.Config.Namespace for _, ingressListener := range rule.Ingress { - // determine the bindToPort setting for listeners + // determine the bindToPort setting for listeners. Validation guarantees that these are all IP listeners. bindToPort := false if noneMode { // dont care what the listener's capture mode setting is. The proxy does not use iptables @@ -409,26 +410,29 @@ func (configgen *ConfigGeneratorImpl) buildSidecarInboundListeners( Name: ingressListener.Port.Name, } - // if app doesn't have a declared ServicePort, but a sidecar ingress is defined - we can't generate a listener - // for that port since we don't know what policies or configs apply to it ( many are based on service matching). - // Sidecar doesn't include all the info needed to configure a port. + bind := ingressListener.Bind + if len(bind) == 0 { + // User did not provide one. Pick the proxy's IP or wildcard inbound listener. + bind = getSidecarInboundBindIP(node) + } + instance := configgen.findServiceInstanceForIngressListener(node.ServiceInstances, ingressListener) if instance == nil { - // We didn't find a matching service instance. Skip this ingress listener - continue - } - - bind := ingressListener.Bind - // if bindToPort is true, we set the bind address if empty to instance unicast IP - this is an inbound port. - // if no global unicast IP is available, then default to wildcard IP - 0.0.0.0 or :: - if len(bind) == 0 && bindToPort { - bind = getSidecarInboundBindIP(node) - } else if len(bind) == 0 { - // auto infer the IP from the proxyInstances - // We assume all endpoints in the proxy instances have the same IP - // as they should all be pointing to the same network endpoint - bind = instance.Endpoint.Address + // We didn't find a matching instance. Create a dummy one because we need the right + // params to generate the right cluster name. CDS would have setup the cluster as + // as inbound|portNumber|portName|SidecarScopeID + instance = &model.ServiceInstance{ + Endpoint: model.NetworkEndpoint{}, + Service: &model.Service{ + Hostname: host.Name(sidecarScopeID), + Attributes: model.ServiceAttributes{ + Name: sidecarScope.Config.Name, + // This will ensure that the right AuthN policies are selected + Namespace: sidecarScope.Config.Namespace, + }, + }, + } } listenerOpts := buildListenerOpts{ @@ -441,15 +445,10 @@ func (configgen *ConfigGeneratorImpl) buildSidecarInboundListeners( bindToPort: bindToPort, } - // Update the values here so that the plugins use the right ports - // uds values - // TODO: all plugins need to be updated to account for the fact that - // the port may be 0 but bind may have a UDS value - // Inboundroute will be different for - instance.Endpoint.Address = bind + // we don't need to set other fields of the endpoint here as + // the consumers of this service instance (listener/filter chain constructors) + // are simply looking for the service port and the service associated with the instance. instance.Endpoint.ServicePort = listenPort - // TODO: this should be parsed from the defaultEndpoint field in the ingressListener - instance.Endpoint.Port = listenPort.Port // Validation ensures that the protocol specified in Sidecar.ingress // is always a valid known protocol @@ -542,7 +541,6 @@ func (configgen *ConfigGeneratorImpl) buildSidecarInboundListenerForPortOrUDS(no log.Debugf("Multiple plugins setup inbound filter chains for listener %s, FilterChainMatch may not work as intended!", listenerMapKey) } else { - log.Debugf("Use default filter chain for %v", pluginParams.ServiceInstance.Endpoint) // add one empty entry to the list so we generate a default listener below allChains = []plugin.FilterChain{{}} } @@ -857,12 +855,6 @@ func (configgen *ConfigGeneratorImpl) buildSidecarOutboundListeners(env *model.E } for _, service := range services { for _, servicePort := range service.Ports { - // check if this node is capable of starting a listener on this service port - // if bindToPort is true. Else Envoy will crash - if !validatePort(node, servicePort.Port, bindToPort) { - continue - } - listenerOpts := buildListenerOpts{ env: env, proxy: node, @@ -985,22 +977,6 @@ func (configgen *ConfigGeneratorImpl) buildHTTPProxy(env *model.Environment, nod return l } -// validatePort checks if the sidecar proxy is capable of listening on a -// given port in a particular bind mode for a given UID. Sidecars not running -// as root wont be able to listen on ports <1024 when using bindToPort = true -func validatePort(node *model.Proxy, i int, bindToPort bool) bool { - if !bindToPort { - return true // all good, iptables doesn't care - } - - if i > 1024 { - return true - } - - proxyProcessUID := node.Metadata[model.NodeMetadataSidecarUID] - return proxyProcessUID == "0" -} - func (configgen *ConfigGeneratorImpl) buildSidecarOutboundHTTPListenerOptsForPortOrUDS(node *model.Proxy, listenerMapKey *string, currentListenerEntry **outboundListenerEntry, listenerOpts *buildListenerOpts, pluginParams *plugin.InputParams, listenerMap map[string]*outboundListenerEntry, actualWildcard string) (bool, []*filterChainOpts) { diff --git a/pilot/pkg/networking/core/v1alpha3/listener_test.go b/pilot/pkg/networking/core/v1alpha3/listener_test.go index 61bb4341bd3c..7ced2631edcd 100644 --- a/pilot/pkg/networking/core/v1alpha3/listener_test.go +++ b/pilot/pkg/networking/core/v1alpha3/listener_test.go @@ -568,9 +568,20 @@ func testInboundListenerConfigWithSidecarWithoutServicesV13(t *testing.T, proxy }, } listeners := buildInboundListeners(p, proxy, sidecarConfig) - if expected := 0; len(listeners) != expected { + if expected := 1; len(listeners) != expected { t.Fatalf("expected %d listeners, found %d", expected, len(listeners)) } + + if len(listeners[0].FilterChains) != 4 || + !isHTTPFilterChain(listeners[0].FilterChains[0]) || + !isHTTPFilterChain(listeners[0].FilterChains[1]) || + !isTCPFilterChain(listeners[0].FilterChains[2]) || + !isTCPFilterChain(listeners[0].FilterChains[3]) { + t.Fatalf("expectd %d filter chains, %d http filter chains and %d tcp filter chain", 4, 2, 2) + } + + verifyHTTPFilterChainMatch(t, listeners[0].FilterChains[0]) + verifyHTTPFilterChainMatch(t, listeners[0].FilterChains[1]) } func testInboundListenerConfigWithoutServiceV13(t *testing.T, proxy *model.Proxy) { @@ -794,9 +805,15 @@ func testInboundListenerConfigWithSidecarWithoutServices(t *testing.T, proxy *mo }, } listeners := buildInboundListeners(p, proxy, sidecarConfig) - if expected := 0; len(listeners) != expected { + if expected := 1; len(listeners) != expected { t.Fatalf("expected %d listeners, found %d", expected, len(listeners)) } + if !isHTTPListener(listeners[0]) { + t.Fatal("expected HTTP listener, found TCP") + } + for _, l := range listeners { + verifyInboundHTTP10(t, isNodeHTTP10(proxy), l) + } } func testOutboundListenerConfigWithSidecar(t *testing.T, services ...*model.Service) { diff --git a/pilot/pkg/proxy/envoy/v2/lds_test.go b/pilot/pkg/proxy/envoy/v2/lds_test.go index 09403ca998ae..431eb53ae009 100644 --- a/pilot/pkg/proxy/envoy/v2/lds_test.go +++ b/pilot/pkg/proxy/envoy/v2/lds_test.go @@ -75,13 +75,13 @@ func TestLDSIsolated(t *testing.T) { t.Fatal(err) } - // 7071 (inbound), 2001 (service - also as http proxy), 15002 (http-proxy) + // 7071 (inbound), 2001 (service - also as http proxy), 15002 (http-proxy), 18010 (fortio) // We dont get mixer on 9091 or 15004 because there are no services defined in istio-system namespace // in the none.yaml setup - if len(ldsr.GetHTTPListeners()) != 3 { + if len(ldsr.GetHTTPListeners()) != 4 { // TODO: we are still debating if for HTTP services we have any use case to create a 127.0.0.1:port outbound // for the service (the http proxy is already covering this) - t.Error("HTTP listeners, expecting 5 got ", len(ldsr.GetHTTPListeners()), ldsr.GetHTTPListeners()) + t.Error("HTTP listeners, expecting 4 got ", len(ldsr.GetHTTPListeners()), ldsr.GetHTTPListeners()) } // s1tcp:2000 outbound, bind=true (to reach other instances of the service) @@ -416,9 +416,11 @@ func TestLDSWithSidecarForWorkloadWithoutService(t *testing.T) { return } - // Expect 1 HTTP listeners for 8081 - if len(adsResponse.GetHTTPListeners()) != 1 { - t.Fatalf("Expected 1 http listeners, got %d", len(adsResponse.GetHTTPListeners())) + // Expect 3 HTTP listeners for outbound 8081, inbound 9080 and one virtualInbound which has the same inbound 9080 + // as a filter chain. Since the adsclient code treats any listener with a HTTP connection manager filter in ANY + // filter chain, as a HTTP listener, we end up getting both 9080 and virtualInbound. + if len(adsResponse.GetHTTPListeners()) != 3 { + t.Fatalf("Expected 3 http listeners, got %d", len(adsResponse.GetHTTPListeners())) } // TODO: This is flimsy. The ADSC code treats any listener with http connection manager as a HTTP listener @@ -435,6 +437,15 @@ func TestLDSWithSidecarForWorkloadWithoutService(t *testing.T) { t.Fatal("Expected listener for 0.0.0.0_8081") } + // Also check that the other two listeners are 98.1.1.1_9080, and virtualInbound + if l := adsResponse.GetHTTPListeners()["98.1.1.1_9080"]; l == nil { + t.Fatal("Expected listener for 98.1.1.1_9080") + } + + if l := adsResponse.GetHTTPListeners()["virtualInbound"]; l == nil { + t.Fatal("Expected listener virtualInbound") + } + // Expect only one eds cluster for http1.ns1.svc.cluster.local if len(adsResponse.GetEdsClusters()) != 1 { t.Fatalf("Expected 1 eds cluster, got %d", len(adsResponse.GetEdsClusters())) diff --git a/pkg/config/validation/validation.go b/pkg/config/validation/validation.go index 710cff052492..c8a979af9998 100644 --- a/pkg/config/validation/validation.go +++ b/pkg/config/validation/validation.go @@ -637,7 +637,6 @@ func ValidateSidecar(_, _ string, msg proto.Message) (errs error) { } portMap := make(map[uint32]struct{}) - udsMap := make(map[string]struct{}) for _, i := range rule.Ingress { if i.Port == nil { errs = appendErrors(errs, fmt.Errorf("sidecar: port is required for ingress listeners")) @@ -645,20 +644,12 @@ func ValidateSidecar(_, _ string, msg proto.Message) (errs error) { } bind := i.GetBind() - captureMode := i.GetCaptureMode() - errs = appendErrors(errs, validateSidecarPortBindAndCaptureMode(i.Port, bind, captureMode)) + errs = appendErrors(errs, validateSidecarIngressPortAndBind(i.Port, bind)) - if i.Port.Number == 0 { - if _, found := udsMap[bind]; found { - errs = appendErrors(errs, fmt.Errorf("sidecar: unix domain socket values for listeners must be unique")) - } - udsMap[bind] = struct{}{} - } else { - if _, found := portMap[i.Port.Number]; found { - errs = appendErrors(errs, fmt.Errorf("sidecar: ports on IP bound listeners must be unique")) - } - portMap[i.Port.Number] = struct{}{} + if _, found := portMap[i.Port.Number]; found { + errs = appendErrors(errs, fmt.Errorf("sidecar: ports on IP bound listeners must be unique")) } + portMap[i.Port.Number] = struct{}{} if len(i.DefaultEndpoint) == 0 { errs = appendErrors(errs, fmt.Errorf("sidecar: default endpoint must be set for all ingress listeners")) @@ -687,7 +678,7 @@ func ValidateSidecar(_, _ string, msg proto.Message) (errs error) { } portMap = make(map[uint32]struct{}) - udsMap = make(map[string]struct{}) + udsMap := make(map[string]struct{}) catchAllEgressListenerFound := false for index, i := range rule.Egress { // there can be only one catch all egress listener with empty port, and it should be the last listener. @@ -705,7 +696,7 @@ func ValidateSidecar(_, _ string, msg proto.Message) (errs error) { } else { bind := i.GetBind() captureMode := i.GetCaptureMode() - errs = appendErrors(errs, validateSidecarPortBindAndCaptureMode(i.Port, bind, captureMode)) + errs = appendErrors(errs, validateSidecarEgressPortBindAndCaptureMode(i.Port, bind, captureMode)) if i.Port.Number == 0 { if _, found := udsMap[bind]; found { @@ -734,7 +725,7 @@ func ValidateSidecar(_, _ string, msg proto.Message) (errs error) { return } -func validateSidecarPortBindAndCaptureMode(port *networking.Port, bind string, +func validateSidecarEgressPortBindAndCaptureMode(port *networking.Port, bind string, captureMode networking.CaptureMode) (errs error) { // Port name is optional. Validate if exists. @@ -770,6 +761,24 @@ func validateSidecarPortBindAndCaptureMode(port *networking.Port, bind string, return } +func validateSidecarIngressPortAndBind(port *networking.Port, bind string) (errs error) { + + // Port name is optional. Validate if exists. + if len(port.Name) > 0 { + errs = appendErrors(errs, validatePortName(port.Name)) + } + + errs = appendErrors(errs, + validateProtocol(port.Protocol), + ValidatePort(int(port.Number))) + + if len(bind) != 0 { + errs = appendErrors(errs, ValidateIPv4Address(bind)) + } + + return +} + func validateTrafficPolicy(policy *networking.TrafficPolicy) error { if policy == nil { return nil diff --git a/pkg/config/validation/validation_test.go b/pkg/config/validation/validation_test.go index b0638f729200..3381ee2b4f17 100644 --- a/pkg/config/validation/validation_test.go +++ b/pkg/config/validation/validation_test.go @@ -4351,7 +4351,7 @@ func TestValidateSidecar(t *testing.T) { }, }, }, true}, - {"UDS bind", &networking.Sidecar{ + {"UDS bind in outbound", &networking.Sidecar{ Egress: []*networking.IstioEgressListener{ { Port: &networking.Port{ @@ -4366,7 +4366,20 @@ func TestValidateSidecar(t *testing.T) { }, }, }, true}, - {"UDS bind 2", &networking.Sidecar{ + {"UDS bind in inbound", &networking.Sidecar{ + Ingress: []*networking.IstioIngressListener{ + { + Port: &networking.Port{ + Protocol: "http", + Number: 0, + Name: "uds", + }, + Bind: "unix:///@foo/bar/com", + DefaultEndpoint: "127.0.0.1:9999", + }, + }, + }, false}, + {"UDS bind in outbound 2", &networking.Sidecar{ Egress: []*networking.IstioEgressListener{ { Port: &networking.Port{ diff --git a/tests/integration/pilot/sidecarscope/main_test.go b/tests/integration/pilot/sidecarscope/main_test.go index acb465f04c32..971507996d64 100644 --- a/tests/integration/pilot/sidecarscope/main_test.go +++ b/tests/integration/pilot/sidecarscope/main_test.go @@ -38,6 +38,14 @@ metadata: name: sidecar namespace: {{.AppNamespace}} spec: +{{- if .IngressListener }} + ingress: + - port: + number: 9080 + protocol: HTTP + name: custom-http + defaultEndpoint: unix:///var/run/someuds.sock +{{- end }} egress: - hosts: {{ range $i, $ns := .ImportedNamespaces }} @@ -188,6 +196,7 @@ type Config struct { ExcludedNamespace string AppNamespace string Resolution string + IngressListener bool } func setupTest(t *testing.T, ctx resource.Context, modifyConfig func(c Config) Config) (pilot.Instance, *model.Proxy) { diff --git a/tests/integration/pilot/sidecarscope/scope_serviceentry_static_test.go b/tests/integration/pilot/sidecarscope/scope_serviceentry_static_test.go index d702cd02e683..7a91e42636c2 100644 --- a/tests/integration/pilot/sidecarscope/scope_serviceentry_static_test.go +++ b/tests/integration/pilot/sidecarscope/scope_serviceentry_static_test.go @@ -17,6 +17,7 @@ package sidecarscope import ( "fmt" "reflect" + "strings" "testing" "time" @@ -70,6 +71,83 @@ func TestServiceEntryStatic(t *testing.T) { }) } +func TestSidecarScopeIngressListener(t *testing.T) { + framework.Run(t, func(ctx framework.TestContext) { + configFn := func(c Config) Config { + c.Resolution = "STATIC" + c.IngressListener = true + return c + } + p, nodeID := setupTest(t, ctx, configFn) + // Change the node's IP so that it does not match with any service entry + nodeID.IPAddresses = []string{"100.100.100.100"} + + req := &xdsapi.DiscoveryRequest{ + Node: &xdscore.Node{ + Id: nodeID.ServiceNode(), + }, + TypeUrl: v2.ClusterType, + } + + if err := p.StartDiscovery(req); err != nil { + t.Fatal(err) + } + if err := p.WatchDiscovery(time.Second*5, checkSidecarIngressCluster); err != nil { + t.Fatal(err) + } + + listenerReq := &xdsapi.DiscoveryRequest{ + Node: &xdscore.Node{ + Id: nodeID.ServiceNode(), + }, + TypeUrl: v2.ListenerType, + } + + if err := p.StartDiscovery(listenerReq); err != nil { + t.Fatal(err) + } + if err := p.WatchDiscovery(time.Second*500, checkSidecarIngressListener); err != nil { + t.Error(err) + } + }) +} + +func checkSidecarIngressCluster(resp *xdsapi.DiscoveryResponse) (success bool, e error) { + expectedClusterNamePrefix := "inbound|9080|custom-http|sidecar." + expectedEndpoints := map[string]int{ + "unix:///var/run/someuds.sock": 1, + } + if len(resp.Resources) == 0 { + return true, nil + } + + for _, res := range resp.Resources { + c := &xdsapi.Cluster{} + if err := proto.Unmarshal(res.Value, c); err != nil { + return false, err + } + if !strings.HasPrefix(c.Name, expectedClusterNamePrefix) { + continue + } + + got := map[string]int{} + for _, ep := range c.LoadAssignment.Endpoints { + for _, lb := range ep.LbEndpoints { + if lb.GetEndpoint().Address.GetSocketAddress() != nil { + got[lb.GetEndpoint().Address.GetSocketAddress().Address]++ + } else { + got[lb.GetEndpoint().Address.GetPipe().Path]++ + } + } + } + if !reflect.DeepEqual(expectedEndpoints, got) { + return false, fmt.Errorf("excepted load assignments %+v, got %+v", expectedEndpoints, got) + } + return true, nil + } + return false, fmt.Errorf("did not find expected cluster %s", expectedClusterNamePrefix) +} + func checkResultStatic(resp *xdsapi.DiscoveryResponse) (success bool, e error) { expected := map[string]int{ "1.1.1.1": 1, @@ -119,3 +197,27 @@ func checkResultStaticListener(resp *xdsapi.DiscoveryResponse) (success bool, e } return true, nil } + +func checkSidecarIngressListener(resp *xdsapi.DiscoveryResponse) (success bool, e error) { + expected := map[string]struct{}{ + "100.100.100.100_9080": {}, // corresponds to the proxy IP + "0.0.0.0_80": {}, + "5.5.5.5_443": {}, + "virtualInbound": {}, + "virtualOutbound": {}, + } + + got := map[string]struct{}{} + for _, res := range resp.Resources { + c := &xdsapi.Listener{} + if err := proto.Unmarshal(res.Value, c); err != nil { + return false, err + } + got[c.Name] = struct{}{} + } + if !reflect.DeepEqual(expected, got) { + return false, fmt.Errorf("excepted listeners %+v, got %+v", expected, got) + } + + return true, nil +}