diff --git a/cmd/tnf/generate/catalog/catalog_test.go b/cmd/tnf/generate/catalog/catalog_test.go index 78a24e9ed..e460f2f0f 100644 --- a/cmd/tnf/generate/catalog/catalog_test.go +++ b/cmd/tnf/generate/catalog/catalog_test.go @@ -23,7 +23,6 @@ import ( "testing" "github.com/stretchr/testify/assert" - "github.com/test-network-function/cnf-certification-test/pkg/arrayhelper" "github.com/test-network-function/test-network-function-claim/pkg/claim" ) @@ -59,33 +58,6 @@ func TestRunGenerateMarkdownCmd(t *testing.T) { assert.Nil(t, runGenerateMarkdownCmd(nil, nil)) } -func TestUnique(t *testing.T) { - testCases := []struct { - testSlice []string - expectedSlice []string - }{ - { - testSlice: []string{"one", "two", "three"}, - expectedSlice: []string{"one", "two", "three"}, - }, - { - testSlice: []string{"one", "two", "three", "three"}, - expectedSlice: []string{"one", "two", "three"}, - }, - { - testSlice: []string{}, - expectedSlice: []string{}, - }, - } - - for _, tc := range testCases { - sort.Strings(tc.expectedSlice) - results := arrayhelper.Unique(tc.testSlice) - sort.Strings(results) - assert.True(t, reflect.DeepEqual(tc.expectedSlice, results)) - } -} - func TestGetSuitesFromIdentifiers(t *testing.T) { testCases := []struct { testKeys []claim.Identifier diff --git a/cnf-certification-test/accesscontrol/rbac/automount.go b/cnf-certification-test/accesscontrol/rbac/automount.go index e0c85ec7a..84f60d65e 100644 --- a/cnf-certification-test/accesscontrol/rbac/automount.go +++ b/cnf-certification-test/accesscontrol/rbac/automount.go @@ -20,15 +20,14 @@ import ( "context" "fmt" - "github.com/test-network-function/cnf-certification-test/internal/clientsholder" "github.com/test-network-function/cnf-certification-test/internal/log" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1typed "k8s.io/client-go/kubernetes/typed/core/v1" ) -func AutomountServiceAccountSetOnSA(serviceAccountName, podNamespace string) (*bool, error) { - clientsHolder := clientsholder.GetClientsHolder() - sa, err := clientsHolder.K8sClient.CoreV1().ServiceAccounts(podNamespace).Get(context.TODO(), serviceAccountName, metav1.GetOptions{}) +func AutomountServiceAccountSetOnSA(client corev1typed.CoreV1Interface, serviceAccountName, podNamespace string) (*bool, error) { + sa, err := client.ServiceAccounts(podNamespace).Get(context.TODO(), serviceAccountName, metav1.GetOptions{}) if err != nil { log.Error("executing serviceaccount command failed with error: %v", err) return nil, err @@ -37,7 +36,7 @@ func AutomountServiceAccountSetOnSA(serviceAccountName, podNamespace string) (*b } //nolint:gocritic -func EvaluateAutomountTokens(put *corev1.Pod) (bool, string) { +func EvaluateAutomountTokens(client corev1typed.CoreV1Interface, put *corev1.Pod) (bool, string) { // The token can be specified in the pod directly // or it can be specified in the service account of the pod // if no service account is configured, then the pod will use the configuration @@ -50,7 +49,7 @@ func EvaluateAutomountTokens(put *corev1.Pod) (bool, string) { } // Collect information about the service account attached to the pod. - saAutomountServiceAccountToken, err := AutomountServiceAccountSetOnSA(put.Spec.ServiceAccountName, put.Namespace) + saAutomountServiceAccountToken, err := AutomountServiceAccountSetOnSA(client, put.Spec.ServiceAccountName, put.Namespace) if err != nil { return false, "" } diff --git a/cnf-certification-test/accesscontrol/rbac/automount_test.go b/cnf-certification-test/accesscontrol/rbac/automount_test.go index e84497a58..4c4ea3e25 100644 --- a/cnf-certification-test/accesscontrol/rbac/automount_test.go +++ b/cnf-certification-test/accesscontrol/rbac/automount_test.go @@ -80,8 +80,8 @@ func TestAutomountServiceAccountSetOnSA(t *testing.T) { var testRuntimeObjects []runtime.Object testRuntimeObjects = append(testRuntimeObjects, &testSA) - _ = clientsholder.GetTestClientsHolder(testRuntimeObjects) - isSet, err := AutomountServiceAccountSetOnSA("testSA", "podNS") + client := clientsholder.GetTestClientsHolder(testRuntimeObjects) + isSet, err := AutomountServiceAccountSetOnSA(client.K8sClient.CoreV1(), "testSA", "podNS") assert.Nil(t, err) assert.Equal(t, tc.automountServiceTokenSet, *isSet) } @@ -138,8 +138,8 @@ func TestEvaluateAutomountTokens(t *testing.T) { } for _, tc := range testCases { - _ = clientsholder.GetTestClientsHolder(buildServiceAccountTokenTestObjects()) - podPassed, msg := EvaluateAutomountTokens(tc.testPod) + client := clientsholder.GetTestClientsHolder(buildServiceAccountTokenTestObjects()) + podPassed, msg := EvaluateAutomountTokens(client.K8sClient.CoreV1(), tc.testPod) assert.Equal(t, tc.expectedMsg, msg) assert.Equal(t, tc.expectedResult, podPassed) } diff --git a/cnf-certification-test/accesscontrol/suite.go b/cnf-certification-test/accesscontrol/suite.go index a90b757d0..780c86861 100644 --- a/cnf-certification-test/accesscontrol/suite.go +++ b/cnf-certification-test/accesscontrol/suite.go @@ -704,7 +704,8 @@ func testAutomountServiceToken(check *checksdb.Check, env *provider.TestEnvironm } // Evaluate the pod's automount service tokens and any attached service accounts - podPassed, newMsg := rbac.EvaluateAutomountTokens(put.Pod) + client := clientsholder.GetClientsHolder() + podPassed, newMsg := rbac.EvaluateAutomountTokens(client.K8sClient.CoreV1(), put.Pod) if !podPassed { check.LogError(newMsg) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, newMsg, false)) diff --git a/cnf-certification-test/certification/suite.go b/cnf-certification-test/certification/suite.go index 7d86ac4c1..d5845f4e2 100644 --- a/cnf-certification-test/certification/suite.go +++ b/cnf-certification-test/certification/suite.go @@ -45,7 +45,6 @@ var ( validator certdb.CertificationStatusValidator beforeEachFn = func(check *checksdb.Check) error { - check.LogInfo("Check %s: getting test environment and certdb validator.", check.ID) env = provider.GetTestEnvironment() var err error @@ -75,7 +74,7 @@ var ( ) func LoadChecks() { - log.Debug("Loading %s checks", common.AffiliatedCertTestKey) + log.Debug("Loading %s suite checks", common.AffiliatedCertTestKey) checksGroup := checksdb.NewChecksGroup(common.AffiliatedCertTestKey). WithBeforeEachFn(beforeEachFn) @@ -120,8 +119,6 @@ func testContainerCertification(c provider.ContainerImageIdentifier, validator c func testAllOperatorCertified(check *checksdb.Check, env *provider.TestEnvironment, validator certdb.CertificationStatusValidator) { operatorsUnderTest := env.Operators - check.LogInfo("Verify operator as certified. Number of operators to check: %d", len(operatorsUnderTest)) - var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -132,20 +129,19 @@ func testAllOperatorCertified(check *checksdb.Check, env *provider.TestEnvironme splitVersion := strings.SplitN(env.OpenshiftVersion, ".", majorMinorPatchCount) ocpMinorVersion = splitVersion[0] + "." + splitVersion[1] } - for i := range operatorsUnderTest { - name := operatorsUnderTest[i].Name - channel := operatorsUnderTest[i].Channel - isCertified := validator.IsOperatorCertified(name, ocpMinorVersion, channel) + for _, operator := range operatorsUnderTest { + check.LogInfo("Testing Operator %q", operator) + isCertified := validator.IsOperatorCertified(operator.Name, ocpMinorVersion, operator.Channel) if !isCertified { - check.LogInfo("Operator %s (channel %s) failed to be certified for OpenShift %s", name, channel, ocpMinorVersion) - nonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(operatorsUnderTest[i].Namespace, operatorsUnderTest[i].Name, "Operator failed to be certified for OpenShift", false). + check.LogError("Operator %q (channel %q) failed to be certified for OpenShift %s", operator.Name, operator.Channel, ocpMinorVersion) + nonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(operator.Namespace, operator.Name, "Operator failed to be certified for OpenShift", false). AddField(testhelper.OCPVersion, ocpMinorVersion). - AddField(testhelper.OCPChannel, channel)) + AddField(testhelper.OCPChannel, operator.Channel)) } else { - log.Info("Operator %s (channel %s) certified OK.", name, channel) - compliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(operatorsUnderTest[i].Namespace, operatorsUnderTest[i].Name, "Operator certified OK", true). + check.LogInfo("Operator %q (channel %q) is certified for OpenShift %s", operator.Name, operator.Channel, ocpMinorVersion) + compliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(operator.Namespace, operator.Name, "Operator certified OK", true). AddField(testhelper.OCPVersion, ocpMinorVersion). - AddField(testhelper.OCPChannel, channel)) + AddField(testhelper.OCPChannel, operator.Channel)) } } @@ -159,13 +155,14 @@ func testHelmCertified(check *checksdb.Check, env *provider.TestEnvironment, val var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject for _, helm := range helmchartsReleases { + check.LogInfo("Testing Helm Chart Release %q", helm.Name) if !validator.IsHelmChartCertified(helm, env.K8sVersion) { + check.LogError("Helm Chart %q version %q is not certified.", helm.Name, helm.Chart.Metadata.Version) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewHelmChartReportObject(helm.Namespace, helm.Name, "helm chart is not certified", false). SetType(testhelper.HelmVersionType). AddField(testhelper.Version, helm.Chart.Metadata.Version)) - check.LogDebug("Helm Chart %s version %s is not certified.", helm.Name, helm.Chart.Metadata.Version) } else { - log.Info("Helm Chart %s version %s is certified.", helm.Name, helm.Chart.Metadata.Version) + check.LogInfo("Helm Chart %q version %q is certified.", helm.Name, helm.Chart.Metadata.Version) compliantObjects = append(compliantObjects, testhelper.NewHelmChartReportObject(helm.Namespace, helm.Name, "helm chart is certified", true). SetType(testhelper.HelmVersionType). AddField(testhelper.Version, helm.Chart.Metadata.Version)) @@ -179,15 +176,16 @@ func testContainerCertificationStatusByDigest(check *checksdb.Check, env *provid var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject for _, c := range env.Containers { + check.LogInfo("Testing Container %q", c) switch { case c.ContainerImageIdentifier.Digest == "": - check.LogDebug("%s is missing digest field, failing validation (repo=%s image=%s digest=%s)", c, c.ContainerImageIdentifier.Registry, c.ContainerImageIdentifier.Repository, c.ContainerImageIdentifier.Digest) + check.LogError("Container %q is missing digest field, failing validation (repo=%q image=%q)", c, c.ContainerImageIdentifier.Registry, c.ContainerImageIdentifier.Repository) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(c.Namespace, c.Podname, c.Name, "Missing digest field", false). AddField(testhelper.Repository, c.ContainerImageIdentifier.Registry). AddField(testhelper.ImageName, c.ContainerImageIdentifier.Repository). AddField(testhelper.ImageDigest, c.ContainerImageIdentifier.Digest)) case !testContainerCertification(c.ContainerImageIdentifier, validator): - check.LogDebug("%s digest not found in database, failing validation (repo=%s image=%s tag=%s digest=%s)", c, + check.LogError("Container %q digest not found in database, failing validation (repo=%q image=%q tag=%q digest=%q)", c, c.ContainerImageIdentifier.Registry, c.ContainerImageIdentifier.Repository, c.ContainerImageIdentifier.Tag, c.ContainerImageIdentifier.Digest) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(c.Namespace, c.Podname, c.Name, "Digest not found in database", false). @@ -195,6 +193,9 @@ func testContainerCertificationStatusByDigest(check *checksdb.Check, env *provid AddField(testhelper.ImageName, c.ContainerImageIdentifier.Repository). AddField(testhelper.ImageDigest, c.ContainerImageIdentifier.Digest)) default: + check.LogInfo("Container %q digest found in database, image certified (repo=%q image=%q tag=%q digest=%q)", c, + c.ContainerImageIdentifier.Registry, c.ContainerImageIdentifier.Repository, + c.ContainerImageIdentifier.Tag, c.ContainerImageIdentifier.Digest) compliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(c.Namespace, c.Podname, c.Name, "Container is certified", true)) } } @@ -212,11 +213,12 @@ func testHelmVersion(check *checksdb.Check) error { LabelSelector: "app=helm,name=tiller", }) if err != nil { + check.LogError("Could not get Tiller pod, err=%v", err) return fmt.Errorf("failed getting Tiller pod: %v", err) } if len(podList.Items) == 0 { - check.LogDebug("Tiller pod not found in any namespaces. Helm version is v3.") + check.LogInfo("Tiller pod not found in any namespaces. Helm version is v3.") for _, helm := range env.HelmChartReleases { compliantObjects = append(compliantObjects, testhelper.NewHelmChartReportObject(helm.Namespace, helm.Name, "helm chart was installed with helm v3", true)) } @@ -224,7 +226,7 @@ func testHelmVersion(check *checksdb.Check) error { return nil } - check.LogDebug("Tiller pod found, helm version is v2.") + check.LogError("Tiller pod found, Helm version is v2 but v3 required") for i := range podList.Items { nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(podList.Items[i].Namespace, podList.Items[i].Name, "This pod is a Tiller pod. Helm Chart version is v2 but needs to be v3 due to the security risks associated with Tiller", false)) diff --git a/cnf-certification-test/networking/icmp/icmp.go b/cnf-certification-test/networking/icmp/icmp.go index d8ee00a6d..2b5f2a1da 100644 --- a/cnf-certification-test/networking/icmp/icmp.go +++ b/cnf-certification-test/networking/icmp/icmp.go @@ -50,19 +50,20 @@ func (results PingResults) String() string { func BuildNetTestContext(pods []*provider.Pod, aIPVersion netcommons.IPVersion, aType netcommons.IFType, logger *log.Logger) (netsUnderTest map[string]netcommons.NetTestContext) { netsUnderTest = make(map[string]netcommons.NetTestContext) for _, put := range pods { + logger.Info("Testing Pod %q", put) if put.SkipNetTests { - logger.Info("Skipping %s because it is excluded from all connectivity tests", put) + logger.Info("Skipping %q because it is excluded from all connectivity tests", put) continue } if aType == netcommons.MULTUS { if put.SkipMultusNetTests { - logger.Info("Skipping pod %s because it is excluded from %s connectivity tests only", put.Name, aType) + logger.Info("Skipping pod %q because it is excluded from %q connectivity tests only", put.Name, aType) continue } for netKey, multusIPAddress := range put.MultusIPs { // The first container is used to get the network namespace - ProcessContainerIpsPerNet(put.Containers[0], netKey, multusIPAddress, netsUnderTest, aIPVersion) + processContainerIpsPerNet(put.Containers[0], netKey, multusIPAddress, netsUnderTest, aIPVersion, logger) } continue } @@ -70,22 +71,23 @@ func BuildNetTestContext(pods []*provider.Pod, aIPVersion netcommons.IPVersion, const defaultNetKey = "default" defaultIPAddress := put.Status.PodIPs // The first container is used to get the network namespace - ProcessContainerIpsPerNet(put.Containers[0], defaultNetKey, netcommons.PodIPsToStringList(defaultIPAddress), netsUnderTest, aIPVersion) + processContainerIpsPerNet(put.Containers[0], defaultNetKey, netcommons.PodIPsToStringList(defaultIPAddress), netsUnderTest, aIPVersion, logger) } return netsUnderTest } // processContainerIpsPerNet takes a container ip addresses for a given network attachment's and uses it as a test target. // The first container in the loop is selected as the test initiator. the Oc context of the container is used to initiate the pings -func ProcessContainerIpsPerNet(containerID *provider.Container, +func processContainerIpsPerNet(containerID *provider.Container, netKey string, ipAddresses []string, netsUnderTest map[string]netcommons.NetTestContext, - aIPVersion netcommons.IPVersion) { + aIPVersion netcommons.IPVersion, + logger *log.Logger) { ipAddressesFiltered := netcommons.FilterIPListByIPVersion(ipAddresses, aIPVersion) if len(ipAddressesFiltered) == 0 { // if no multus addresses found, skip this container - log.Debug("Skipping %s, Network %s because no multus IPs are present", containerID, netKey) + logger.Debug("Skipping %q, Network %q because no multus IPs are present", containerID, netKey) return } // Create an entry at "key" if it is not present @@ -97,7 +99,7 @@ func ProcessContainerIpsPerNet(containerID *provider.Container, // Then modify the copy firstIPIndex := 0 if entry.TesterSource.ContainerIdentifier == nil { - log.Debug("%s selected to initiate ping tests", containerID) + logger.Debug("%q selected to initiate ping tests", containerID) entry.TesterSource.ContainerIdentifier = containerID // if multiple interfaces are present for this network on this container/pod, pick the first one as the tester source ip entry.TesterSource.IP = ipAddressesFiltered[firstIPIndex] @@ -126,7 +128,7 @@ func RunNetworkingTests( //nolint:funlen logger.Debug("%s", netcommons.PrintNetTestContextMap(netsUnderTest)) skip = false if len(netsUnderTest) == 0 { - logger.Debug("There are no %s networks to test, skipping test", aIPVersion) + logger.Debug("There are no %q networks to test, skipping test", aIPVersion) skip = true return report, skip } @@ -139,27 +141,32 @@ func RunNetworkingTests( //nolint:funlen compliantNets[netName] = 0 nonCompliantNets[netName] = 0 if len(netUnderTest.DestTargets) == 0 { - logger.Debug("There are no containers to ping for %s network %s. A minimum of 2 containers is needed to run a ping test (a source and a destination) Skipping test", aIPVersion, netName) + logger.Debug("There are no containers to ping for %q network %q. A minimum of 2 containers is needed to run a ping test (a source and a destination) Skipping test", aIPVersion, netName) continue } atLeastOneNetworkTested = true - logger.Debug("%s Ping tests on network %s. Number of target IPs: %d", aIPVersion, netName, len(netUnderTest.DestTargets)) + logger.Debug("%q Ping tests on network %q. Number of target IPs: %d", aIPVersion, netName, len(netUnderTest.DestTargets)) for _, aDestIP := range netUnderTest.DestTargets { - logger.Debug("%s ping test on network %s from ( %s srcip: %s ) to ( %s dstip: %s )", + logger.Debug("%q ping test on network %q from ( %q srcip: %q ) to ( %q dstip: %q )", aIPVersion, netName, netUnderTest.TesterSource.ContainerIdentifier, netUnderTest.TesterSource.IP, aDestIP.ContainerIdentifier, aDestIP.IP) result, err := TestPing(netUnderTest.TesterSource.ContainerIdentifier, aDestIP, count) - logger.Debug("Ping results: %s", result.String()) - logger.Info("%s ping test on network %s from ( %s srcip: %s ) to ( %s dstip: %s ) result: %s", + logger.Debug("Ping results: %q", result) + logger.Info("%q ping test on network %q from ( %q srcip: %q ) to ( %q dstip: %q ) result: %q", aIPVersion, netName, netUnderTest.TesterSource.ContainerIdentifier, netUnderTest.TesterSource.IP, - aDestIP.ContainerIdentifier, aDestIP.IP, result.String()) + aDestIP.ContainerIdentifier, aDestIP.IP, result) if err != nil { - logger.Debug("Ping failed with err:%s", err) + logger.Debug("Ping failed, err=%v", err) } if result.outcome != testhelper.SUCCESS { + logger.Error("Ping from %q (srcip: %q) to %q (dstip: %q) failed", + netUnderTest.TesterSource.ContainerIdentifier, + netUnderTest.TesterSource.IP, + aDestIP.ContainerIdentifier, + aDestIP.IP) nonCompliantNets[netName]++ nonCompliantObject := testhelper.NewContainerReportObject(netUnderTest.TesterSource.ContainerIdentifier.Namespace, netUnderTest.TesterSource.ContainerIdentifier.Podname, @@ -173,6 +180,11 @@ func RunNetworkingTests( //nolint:funlen AddField(testhelper.DestinationIP, aDestIP.IP) report.NonCompliantObjectsOut = append(report.NonCompliantObjectsOut, nonCompliantObject) } else { + logger.Info("Ping from %q (srcip: %q) to %q (dstip: %q) succeeded", + netUnderTest.TesterSource.ContainerIdentifier, + netUnderTest.TesterSource.IP, + aDestIP.ContainerIdentifier, + aDestIP.IP) compliantNets[netName]++ CompliantObject := testhelper.NewContainerReportObject(netUnderTest.TesterSource.ContainerIdentifier.Namespace, netUnderTest.TesterSource.ContainerIdentifier.Podname, @@ -188,16 +200,18 @@ func RunNetworkingTests( //nolint:funlen } } if nonCompliantNets[netName] != 0 { + logger.Error("ICMP tests failed for %d IP source/destination in this network", nonCompliantNets[netName]) report.NonCompliantObjectsOut = append(report.NonCompliantObjectsOut, testhelper.NewReportObject(fmt.Sprintf("ICMP tests failed for %d IP source/destination in this network", nonCompliantNets[netName]), testhelper.NetworkType, false). AddField(testhelper.NetworkName, netName)) } if compliantNets[netName] != 0 { - report.CompliantObjectsOut = append(report.CompliantObjectsOut, testhelper.NewReportObject(fmt.Sprintf("ICMP tests were successful for all %d IP source/destination in this network", compliantNets[netName]), testhelper.NetworkType, true). + logger.Info("ICMP tests were successful for all %d IP source/destination in this network", compliantNets[netName]) + report.CompliantObjectsOut = append(report.CompliantObjectsOut, testhelper.NewReportObject(fmt.Sprintf("ICMP tests were successful for all %d IP source/destination in this network", compliantNets[netName]), testhelper.NetworkType, true). AddField(testhelper.NetworkName, netName)) } } if !atLeastOneNetworkTested { - logger.Debug("There are no %s networks to test, skipping test", aIPVersion) + logger.Debug("There are no %q networks to test, skipping test", aIPVersion) skip = true } diff --git a/cnf-certification-test/networking/icmp/icmp_test.go b/cnf-certification-test/networking/icmp/icmp_test.go index e8945bbd3..2be16ca9f 100644 --- a/cnf-certification-test/networking/icmp/icmp_test.go +++ b/cnf-certification-test/networking/icmp/icmp_test.go @@ -298,14 +298,17 @@ func TestProcessContainerIpsPerNet(t *testing.T) { }, }, } + var logArchive strings.Builder + log.SetupLogger(&logArchive, "INFO") for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - ProcessContainerIpsPerNet( + processContainerIpsPerNet( tt.args.containerID, tt.args.netKey, tt.args.ipAddresses, tt.args.netsUnderTest, tt.args.aIPVersion, + log.GetLogger(), ) if !reflect.DeepEqual(tt.args.netsUnderTest, tt.args.wantNetsUnderTest) { t.Errorf( @@ -328,7 +331,6 @@ func TestBuildNetTestContext(t *testing.T) { name string args args wantNetsUnderTest map[string]netcommons.NetTestContext - wantLogArchive []string }{ { name: "ipv4ok", @@ -394,10 +396,6 @@ func TestBuildNetTestContext(t *testing.T) { DestTargets: nil, }, }, - - wantLogArchive: []string{ - "Skipping pod: pod2 ns: ns1 because it is excluded from all connectivity tests", - }, }, { name: "ipv4ok multus", @@ -511,10 +509,6 @@ func TestBuildNetTestContext(t *testing.T) { DestTargets: nil, }, }, - - wantLogArchive: []string{ - "Skipping pod pod2 because it is excluded from Multus connectivity tests only", - }, }, } var logArchive strings.Builder @@ -548,34 +542,10 @@ func TestBuildNetTestContext(t *testing.T) { tt.wantNetsUnderTest, ) } - - logArchiveMsgs := getLogArchiveMsgs(logArchive) - if !reflect.DeepEqual(logArchiveMsgs, tt.wantLogArchive) { - t.Errorf( - "BuildNetTestContext() gotClaimsLog = %v, want %v", - logArchiveMsgs, - tt.wantLogArchive, - ) - } - logArchive.Reset() }) } } -func getLogArchiveMsgs(logArchive strings.Builder) []string { - var logArchiveMsgs []string - logLines := strings.Split(logArchive.String(), "\n") - - for _, logLine := range logLines { - logMsgs := strings.Split(logLine, "]") - if len(logMsgs) > 2 { - logArchiveMsgs = append(logArchiveMsgs, strings.TrimSpace(logMsgs[2])) - } - } - - return logArchiveMsgs -} - var ( pod1 = provider.Pod{ //nolint:dupl Pod: &corev1.Pod{ @@ -741,7 +711,6 @@ func TestRunNetworkingTests(t *testing.T) { name string args args wantReport testhelper.FailureReasonOut - wantLogArchive []string testPingSuccess bool }{ {name: "ok", @@ -810,16 +779,13 @@ func TestRunNetworkingTests(t *testing.T) { ObjectType: "Network", ObjectFieldsKeys: []string{testhelper.ReasonForCompliance, testhelper.NetworkName}, ObjectFieldsValues: []string{ - "ICMP tests were successful for all 1 IP source/destination in this network", + "ICMP tests were successful for all 1 IP source/destination in this network", "default", }, }, }, NonCompliantObjectsOut: []*testhelper.ReportObject{}, }, - wantLogArchive: []string{ - "IPv4 ping test on network default from ( container: test1 pod: test-0 ns: tnf srcip: 10.244.195.231 ) to ( container: test2 pod: test-1 ns: tnf dstip: 10.244.195.232 ) result: outcome: SUCCESS transmitted: 10 received: 10 errors: 0", - }, testPingSuccess: true, }, {name: "noNetToTest", @@ -965,10 +931,6 @@ func TestRunNetworkingTests(t *testing.T) { }, }, }, - wantLogArchive: []string{ - "IPv4 ping test on network default from ( container: test1 pod: test-0 ns: tnf srcip: 10.244.195.231 ) to ( container: test2 pod: test-1 ns: tnf dstip: 10.244.195.232 ) result: outcome: FAILURE transmitted: 10 received: 5 errors: 5", //nolint:lll - "IPv4 ping test on network default from ( container: test1 pod: test-0 ns: tnf srcip: 10.244.195.231 ) to ( container: test3 pod: test-1 ns: tnf dstip: 10.244.195.233 ) result: outcome: FAILURE transmitted: 10 received: 5 errors: 5", - }, testPingSuccess: false, }, } @@ -990,20 +952,11 @@ func TestRunNetworkingTests(t *testing.T) { ) if !gotReport.Equal(tt.wantReport) { t.Errorf( - "RunNetworkingTests() gotReport = %s, want %s", + "RunNetworkingTests() gotReport = %q, want %q", testhelper.FailureReasonOutTestString(gotReport), testhelper.FailureReasonOutTestString(tt.wantReport), ) } - logArchiveMsgs := getLogArchiveMsgs(logArchive) - if !reflect.DeepEqual(logArchiveMsgs, tt.wantLogArchive) { - t.Errorf( - "RunNetworkingTests() gotReport = %+v, want %+v", - logArchiveMsgs, - tt.wantLogArchive, - ) - } - logArchive.Reset() }) } } diff --git a/cnf-certification-test/networking/netcommons/netcommons.go b/cnf-certification-test/networking/netcommons/netcommons.go index 99c837bd5..df096a379 100644 --- a/cnf-certification-test/networking/netcommons/netcommons.go +++ b/cnf-certification-test/networking/netcommons/netcommons.go @@ -149,11 +149,12 @@ func FilterIPListByIPVersion(ipList []string, aIPVersion IPVersion) []string { return filteredIPList } -func FindRogueContainersDeclaringPorts(containers []*provider.Container, portsToTest map[int32]bool, portsOrigin string) (compliantObjects, nonCompliantObjects []*testhelper.ReportObject) { +func findRogueContainersDeclaringPorts(containers []*provider.Container, portsToTest map[int32]bool, portsOrigin string, logger *log.Logger) (compliantObjects, nonCompliantObjects []*testhelper.ReportObject) { for _, cut := range containers { + logger.Info("Testing Container %q", cut) for _, port := range cut.Ports { if portsToTest[port.ContainerPort] { - log.Debug("%s has declared a port (%d) that has been reserved", cut, port.ContainerPort) + logger.Error("%q declares %s reserved port %d (%s)", cut, portsOrigin, port.ContainerPort, port.Protocol) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, fmt.Sprintf("Container declares %s reserved port in %v", portsOrigin, portsToTest), false). @@ -161,6 +162,7 @@ func FindRogueContainersDeclaringPorts(containers []*provider.Container, portsTo AddField(testhelper.PortNumber, strconv.Itoa(int(port.ContainerPort))). AddField(testhelper.PortProtocol, string(port.Protocol))) } else { + logger.Info("%q does not declare any %s reserved port", cut, portsOrigin) compliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, fmt.Sprintf("Container does not declare %s reserved port in %v", portsOrigin, portsToTest), true). @@ -187,16 +189,17 @@ var ReservedIstioPorts = map[int32]bool{ 15000: true, // Envoy admin port (commands/diagnostics) } -func FindRoguePodsListeningToPorts(pods []*provider.Pod, portsToTest map[int32]bool, portsOrigin string) (compliantObjects, nonCompliantObjects []*testhelper.ReportObject) { +func findRoguePodsListeningToPorts(pods []*provider.Pod, portsToTest map[int32]bool, portsOrigin string, logger *log.Logger) (compliantObjects, nonCompliantObjects []*testhelper.ReportObject) { for _, put := range pods { - compliantObjectsEntries, nonCompliantObjectsEntries := FindRogueContainersDeclaringPorts(put.Containers, portsToTest, portsOrigin) + logger.Info("Testing Pod %q", put) + compliantObjectsEntries, nonCompliantObjectsEntries := findRogueContainersDeclaringPorts(put.Containers, portsToTest, portsOrigin, logger) nonCompliantPortFound := len(nonCompliantObjectsEntries) > 0 compliantObjects = append(compliantObjects, compliantObjectsEntries...) nonCompliantObjects = append(nonCompliantObjects, nonCompliantObjectsEntries...) cut := put.Containers[0] listeningPorts, err := netutil.GetListeningPorts(cut) if err != nil { - log.Debug("Failed to get the listening ports on %s, err: %v", cut, err) + logger.Error("Failed to get the listening ports on %q, err: %v", cut, err) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(cut.Namespace, put.Name, fmt.Sprintf("Failed to get the listening ports on pod, err: %v", err), false)) @@ -207,10 +210,10 @@ func FindRoguePodsListeningToPorts(pods []*provider.Pod, portsToTest map[int32]b // If pod contains an "istio-proxy" container, we need to make sure that the ports returned // overlap with the known istio ports if put.ContainsIstioProxy() && ReservedIstioPorts[int32(port.PortNumber)] { - log.Debug("%s was found to be listening to port %d due to istio-proxy being present. Ignoring.", put, port.PortNumber) + logger.Info("%q was found to be listening to port %d due to istio-proxy being present. Ignoring.", put, port.PortNumber) continue } - log.Debug("%s has one container (%s) listening on port %d that has been reserved", put, cut.Name, port.PortNumber) + logger.Error("%q has one container (%q) listening on port %d (%s) that has been reserved", put, cut.Name, port.PortNumber, port.Protocol) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(cut.Namespace, put.Name, fmt.Sprintf("Pod Listens to %s reserved port in %v", portsOrigin, portsToTest), false). @@ -219,6 +222,7 @@ func FindRoguePodsListeningToPorts(pods []*provider.Pod, portsToTest map[int32]b AddField(testhelper.PortProtocol, port.Protocol)) nonCompliantPortFound = true } else { + logger.Info("%q listens in %s unreserved port %d (%s)", put, portsOrigin, port.PortNumber, port.Protocol) compliantObjects = append(compliantObjects, testhelper.NewPodReportObject(cut.Namespace, put.Name, fmt.Sprintf("Pod Listens to port not in %s reserved port %v", portsOrigin, portsToTest), true). @@ -240,8 +244,8 @@ func FindRoguePodsListeningToPorts(pods []*provider.Pod, portsToTest map[int32]b return compliantObjects, nonCompliantObjects } -func TestReservedPortsUsage(env *provider.TestEnvironment, reservedPorts map[int32]bool, portsOrigin string) (compliantObjects, nonCompliantObjects []*testhelper.ReportObject) { - compliantObjectsEntries, nonCompliantObjectsEntries := FindRoguePodsListeningToPorts(env.Pods, reservedPorts, portsOrigin) +func TestReservedPortsUsage(env *provider.TestEnvironment, reservedPorts map[int32]bool, portsOrigin string, logger *log.Logger) (compliantObjects, nonCompliantObjects []*testhelper.ReportObject) { + compliantObjectsEntries, nonCompliantObjectsEntries := findRoguePodsListeningToPorts(env.Pods, reservedPorts, portsOrigin, logger) compliantObjects = append(compliantObjects, compliantObjectsEntries...) nonCompliantObjects = append(nonCompliantObjects, nonCompliantObjectsEntries...) diff --git a/cnf-certification-test/networking/policies/policies.go b/cnf-certification-test/networking/policies/policies.go index 0d504583b..1c7232e87 100644 --- a/cnf-certification-test/networking/policies/policies.go +++ b/cnf-certification-test/networking/policies/policies.go @@ -17,34 +17,31 @@ package policies import ( - "github.com/test-network-function/cnf-certification-test/internal/log" networkingv1 "k8s.io/api/networking/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -func IsNetworkPolicyCompliant(np *networkingv1.NetworkPolicy, policyType networkingv1.PolicyType) bool { +//nolint:gocritic // unnamed results +func IsNetworkPolicyCompliant(np *networkingv1.NetworkPolicy, policyType networkingv1.PolicyType) (bool, string) { // As long as we have decided above that there is no pod selector, // we just have to make sure that the policy type is either Ingress or Egress (or both) we can return true. // For more information about deny-all policies, there are some good examples on: // https://kubernetes.io/docs/concepts/services-networking/network-policies/ if len(np.Spec.PolicyTypes) == 0 { - log.Debug("%s: policy types found empty", np.Name) - return false + return false, "empty policy types" } // Ingress and Egress rules should be "empty" if it is a default rule. if policyType == networkingv1.PolicyTypeEgress { if np.Spec.Egress != nil || len(np.Spec.Egress) > 0 { - log.Debug("%s: egress spec found not empty", np.Name) - return false + return false, "egress spec not empty for default egress rule" } } if policyType == networkingv1.PolicyTypeIngress { if np.Spec.Ingress != nil || len(np.Spec.Ingress) > 0 { - log.Debug("%s: ingress spec found not empty", np.Name) - return false + return false, "ingress spec not empty for default ingress rule" } } @@ -57,7 +54,7 @@ func IsNetworkPolicyCompliant(np *networkingv1.NetworkPolicy, policyType network } } - return policyTypeFound + return policyTypeFound, "" } func LabelsMatch(podSelectorLabels v1.LabelSelector, podLabels map[string]string) bool { diff --git a/cnf-certification-test/networking/policies/policies_test.go b/cnf-certification-test/networking/policies/policies_test.go index d018a158b..fb0b2c5f2 100644 --- a/cnf-certification-test/networking/policies/policies_test.go +++ b/cnf-certification-test/networking/policies/policies_test.go @@ -190,8 +190,11 @@ func TestIsNetworkPolicyCompliant(t *testing.T) { } for index, tc := range testCases { - assert.Equal(t, tc.expectedEgressOutput, IsNetworkPolicyCompliant(&testCases[index].testNP, networkingv1.PolicyTypeEgress)) - assert.Equal(t, tc.expectedIngressOutput, IsNetworkPolicyCompliant(&testCases[index].testNP, networkingv1.PolicyTypeIngress)) + var isCompliant bool + isCompliant, _ = IsNetworkPolicyCompliant(&testCases[index].testNP, networkingv1.PolicyTypeEgress) + assert.Equal(t, tc.expectedEgressOutput, isCompliant) + isCompliant, _ = IsNetworkPolicyCompliant(&testCases[index].testNP, networkingv1.PolicyTypeIngress) + assert.Equal(t, tc.expectedIngressOutput, isCompliant) } } diff --git a/cnf-certification-test/networking/suite.go b/cnf-certification-test/networking/suite.go index e4501898d..f30b8f555 100644 --- a/cnf-certification-test/networking/suite.go +++ b/cnf-certification-test/networking/suite.go @@ -49,7 +49,6 @@ var ( env provider.TestEnvironment beforeEachFn = func(check *checksdb.Check) error { - check.LogInfo("Check %s: getting test environment.", check.ID) env = provider.GetTestEnvironment() return nil } @@ -57,7 +56,7 @@ var ( //nolint:funlen func LoadChecks() { - log.Debug("Entering %s suite", common.NetworkingTestKey) + log.Debug("Loading %s suite checks", common.NetworkingTestKey) checksGroup := checksdb.NewChecksGroup(common.NetworkingTestKey). WithBeforeEachFn(beforeEachFn) @@ -157,20 +156,22 @@ func LoadChecks() { } func testExecProbDenyAtCPUPinning(check *checksdb.Check, dpdkPods []*provider.Pod) { - check.LogInfo("Check if exec probe is happening") var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject for _, cpuPinnedPod := range dpdkPods { execProbeFound := false for _, cut := range cpuPinnedPod.Containers { + check.LogInfo("Testing Container %q", cut) if cut.HasExecProbes() { + check.LogError("Container %q defines an exec probe", cut) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(cpuPinnedPod.Namespace, cpuPinnedPod.Name, "Exec prob is not allowed", false)) execProbeFound = true } } if !execProbeFound { + check.LogInfo("Pod %q does not define any exec probe", cpuPinnedPod) compliantObjects = append(compliantObjects, testhelper.NewPodReportObject(cpuPinnedPod.Namespace, cpuPinnedPod.Name, "Exec prob is allowed", true)) } } @@ -186,6 +187,7 @@ func testUndeclaredContainerPortsUsage(check *checksdb.Check, env *provider.Test // First get the ports declared in the Pod's containers spec declaredPorts := make(map[netutil.PortInfo]bool) for _, cut := range put.Containers { + check.LogInfo("Testing Container %q", cut) for _, port := range cut.Ports { portInfo.PortNumber = int(port.ContainerPort) portInfo.Protocol = string(port.Protocol) @@ -197,13 +199,13 @@ func testUndeclaredContainerPortsUsage(check *checksdb.Check, env *provider.Test firstPodContainer := put.Containers[0] listeningPorts, err := netutil.GetListeningPorts(firstPodContainer) if err != nil { - check.LogDebug("Failed to get the container's listening ports, err: %v", err) + check.LogError("Failed to get container %q listening ports, err: %v", firstPodContainer, err) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, fmt.Sprintf("Failed to get the container's listening ports, err: %v", err), false)) continue } if len(listeningPorts) == 0 { - check.LogDebug("None of the containers of %s have any listening port.", put) + check.LogInfo("None of the containers of %q have any listening port.", put) continue } @@ -211,13 +213,13 @@ func testUndeclaredContainerPortsUsage(check *checksdb.Check, env *provider.Test failedPod := false for listeningPort := range listeningPorts { if put.ContainsIstioProxy() && netcommons.ReservedIstioPorts[int32(listeningPort.PortNumber)] { - check.LogDebug("%s is listening on port %d protocol %s, but the pod also contains istio-proxy. Ignoring.", + check.LogInfo("%q is listening on port %d protocol %q, but the pod also contains istio-proxy. Ignoring.", put, listeningPort.PortNumber, listeningPort.Protocol) continue } if ok := declaredPorts[listeningPort]; !ok { - check.LogDebug("%s is listening on port %d protocol %s, but that port was not declared in any container spec.", + check.LogError("%q is listening on port %d protocol %q, but that port was not declared in any container spec.", put, listeningPort.PortNumber, listeningPort.Protocol) failedPod = true nonCompliantObjects = append(nonCompliantObjects, @@ -227,6 +229,7 @@ func testUndeclaredContainerPortsUsage(check *checksdb.Check, env *provider.Test AddField(testhelper.PortNumber, strconv.Itoa(listeningPort.PortNumber)). AddField(testhelper.PortProtocol, listeningPort.Protocol)) } else { + check.LogInfo("%q is listening on declared port %d protocol %q", put, listeningPort.PortNumber, listeningPort.Protocol) compliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "Listening port was declared in container spec", true). @@ -251,7 +254,7 @@ func testNetworkConnectivity(env *provider.TestEnvironment, aIPVersion netcommon netsUnderTest := icmp.BuildNetTestContext(env.Pods, aIPVersion, aType, check.GetLoggger()) report, skip := icmp.RunNetworkingTests(netsUnderTest, defaultNumPings, aIPVersion, check.GetLoggger()) if skip { - check.LogInfo("There are no %s networks to test with at least 2 pods, skipping test", aIPVersion) + check.LogInfo("There are no %q networks to test with at least 2 pods, skipping test", aIPVersion) } check.SetResult(report.CompliantObjectsOut, report.NonCompliantObjectsOut) } @@ -261,7 +264,7 @@ func testOCPReservedPortsUsage(check *checksdb.Check, env *provider.TestEnvironm OCPReservedPorts := map[int32]bool{ 22623: true, 22624: true} - compliantObjects, nonCompliantObjects := netcommons.TestReservedPortsUsage(env, OCPReservedPorts, "OCP") + compliantObjects, nonCompliantObjects := netcommons.TestReservedPortsUsage(env, OCPReservedPorts, "OCP", check.GetLoggger()) check.SetResult(compliantObjects, nonCompliantObjects) } @@ -278,29 +281,31 @@ func testPartnerSpecificTCPPorts(check *checksdb.Check, env *provider.TestEnviro 15001: true, 15000: true, } - compliantObjects, nonCompliantObjects := netcommons.TestReservedPortsUsage(env, ReservedPorts, "Partner") + compliantObjects, nonCompliantObjects := netcommons.TestReservedPortsUsage(env, ReservedPorts, "Partner", check.GetLoggger()) check.SetResult(compliantObjects, nonCompliantObjects) } func testDualStackServices(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject - check.LogInfo("Testing services (should be either single stack ipv6 or dual-stack)") for _, s := range env.Services { + check.LogInfo("Testing Service %q", s.Name) serviceIPVersion, err := services.GetServiceIPVersion(s) if err != nil { - check.LogDebug("%s", err) + check.LogError("Could not get IP version from Service %q, err=%v", s.Name, err) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewReportObject("Could not get IP Version from service", testhelper.ServiceType, false). AddField(testhelper.Namespace, s.Namespace). AddField(testhelper.ServiceName, s.Name)) } if serviceIPVersion == netcommons.Undefined || serviceIPVersion == netcommons.IPv4 { + check.LogError("Service %q (ns: %q) only supports IPv4", s.Name, s.Namespace) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewReportObject("Service supports only IPv4", testhelper.ServiceType, false). AddField(testhelper.Namespace, s.Namespace). AddField(testhelper.ServiceName, s.Name). AddField(testhelper.ServiceIPVersion, serviceIPVersion.String())) } else { - compliantObjects = append(compliantObjects, testhelper.NewReportObject("Service support IPv6 or is dual stack", testhelper.ServiceType, false). + check.LogInfo("Service %q (ns: %q) supports IPv6 or is dual stack", s.Name, s.Namespace) + compliantObjects = append(compliantObjects, testhelper.NewReportObject("Service supports IPv6 or is dual stack", testhelper.ServiceType, true). AddField(testhelper.Namespace, s.Namespace). AddField(testhelper.ServiceName, s.Name). AddField(testhelper.ServiceIPVersion, serviceIPVersion.String())) @@ -310,8 +315,8 @@ func testDualStackServices(check *checksdb.Check, env *provider.TestEnvironment) check.SetResult(compliantObjects, nonCompliantObjects) } +//nolint:funlen func testNetworkPolicyDenyAll(check *checksdb.Check, env *provider.TestEnvironment) { - check.LogInfo("Test for Deny All in network policies") var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -319,25 +324,35 @@ func testNetworkPolicyDenyAll(check *checksdb.Check, env *provider.TestEnvironme // This ensures that each pod is accounted for that we are tasked with testing and excludes any pods that are not marked // for testing (via the labels). for _, put := range env.Pods { + check.LogInfo("Testing Pod %q", put) denyAllEgressFound := false denyAllIngressFound := false // Look through all of the network policies for a matching namespace. for index := range env.NetworkPolicies { - check.LogDebug("Testing network policy %s against pod %s", env.NetworkPolicies[index].Name, put.String()) + networkPolicy := env.NetworkPolicies[index] + check.LogInfo("Testing Network policy %q against pod %q", networkPolicy.Name, put) // Skip any network policies that don't match the namespace of the pod we are testing. - if env.NetworkPolicies[index].Namespace != put.Namespace { + if networkPolicy.Namespace != put.Namespace { + check.LogInfo("Skipping Network policy %q (namespace %q does not match Pod namespace %q)", networkPolicy.Name, networkPolicy.Namespace, put.Namespace) continue } // Match the pod namespace with the network policy namespace. - if policies.LabelsMatch(env.NetworkPolicies[index].Spec.PodSelector, put.Labels) { + if policies.LabelsMatch(networkPolicy.Spec.PodSelector, put.Labels) { + var reason string if !denyAllEgressFound { - denyAllEgressFound = policies.IsNetworkPolicyCompliant(&env.NetworkPolicies[index], networkingv1.PolicyTypeEgress) + denyAllEgressFound, reason = policies.IsNetworkPolicyCompliant(&networkPolicy, networkingv1.PolicyTypeEgress) + if reason != "" { + check.LogError("Network policy %q is not compliant, reason=%q", networkPolicy.Name, reason) + } } if !denyAllIngressFound { - denyAllIngressFound = policies.IsNetworkPolicyCompliant(&env.NetworkPolicies[index], networkingv1.PolicyTypeIngress) + denyAllIngressFound, reason = policies.IsNetworkPolicyCompliant(&networkPolicy, networkingv1.PolicyTypeIngress) + if reason != "" { + check.LogError("Network policy %q is not compliant, reason=%q", networkPolicy.Name, reason) + } } } } @@ -345,18 +360,19 @@ func testNetworkPolicyDenyAll(check *checksdb.Check, env *provider.TestEnvironme // Network policy has not been found that contains a deny-all rule for both ingress and egress. podIsCompliant := true if !denyAllIngressFound { - check.LogDebug("%s was found to not have a default ingress deny-all network policy.", put.Name) + check.LogError("Pod %q was found to not have a default ingress deny-all network policy.", put) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "Pod was found to not have a default ingress deny-all network policy", false)) podIsCompliant = false } if !denyAllEgressFound { - check.LogDebug("%s was found to not have a default egress deny-all network policy.", put.Name) + check.LogError("Pod %q was found to not have a default egress deny-all network policy.", put) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "Pod was found to not have a default egress deny-all network policy", false)) podIsCompliant = false } if podIsCompliant { + check.LogInfo("Pod %q has a default ingress/egress deny-all network policy", put) compliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "Pod has a default ingress/egress deny-all network policy", true)) } } @@ -372,21 +388,22 @@ func testRestartOnRebootLabelOnPodsUsingSriov(check *checksdb.Check, sriovPods [ var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject for _, pod := range sriovPods { - check.LogDebug("Pod %s uses SRIOV network/s. Checking label %s existence & value.", pod, restartOnRebootLabel) + check.LogInfo("Testing SRIOV Pod %q", pod) labelValue, exist := pod.GetLabels()[restartOnRebootLabel] if !exist { - check.LogDebug("Pod %s is using SRIOV but the label %s was not found.", pod, restartOnRebootLabel) + check.LogError("Pod %q uses SRIOV but the label %q was not found.", pod, restartOnRebootLabel) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, fmt.Sprintf("Pod uses SRIOV but the label %s was not found", restartOnRebootLabel), false)) continue } if labelValue != "true" { - check.LogDebug("Pod %s is using SRIOV but the %s label value is not true.", pod, restartOnRebootLabel) + check.LogError("Pod %q uses SRIOV but the %q label value is not true.", pod, restartOnRebootLabel) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, fmt.Sprintf("Pod uses SRIOV but the label %s is not set to true", restartOnRebootLabel), false)) continue } + check.LogInfo("Pod %q uses SRIOV and the %q label is set to true", pod, restartOnRebootLabel) compliantObjects = append(compliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, fmt.Sprintf("Pod uses SRIOV and the label %s is set to true", restartOnRebootLabel), true)) } diff --git a/cnf-certification-test/platform/operatingsystem/files/rhcos_version_map b/cnf-certification-test/platform/operatingsystem/files/rhcos_version_map index 289f24224..a6eb63f4c 100644 --- a/cnf-certification-test/platform/operatingsystem/files/rhcos_version_map +++ b/cnf-certification-test/platform/operatingsystem/files/rhcos_version_map @@ -139,6 +139,7 @@ 4.11.53 / 411.86.202310261237-0 4.11.54 / 411.86.202311221858-0 4.11.55 / 411.86.202311302109-0 +4.11.56 / 411.86.202312160018-0 4.11.6 / 411.86.202209211811-0 4.11.7 / 411.86.202209211811-0 4.11.8 / 411.86.202210032349-0 @@ -231,6 +232,7 @@ 4.13.25 / 413.92.202311281619-0 4.13.26 / 413.92.202312042340-0 4.13.27 / 413.92.202312131705-0 +4.13.28 / 413.92.202312261421-0 4.13.3 / 413.92.202306070210-0 4.13.4 / 413.92.202306141213-0 4.13.5 / 413.92.202307140015-0 @@ -259,6 +261,7 @@ 4.14.5 / 414.92.202311281318-0 4.14.6 / 414.92.202312011602-0 4.14.7 / 414.92.202312132152-0 +4.14.8 / 414.92.202312191502-0 4.4.0 / 44.81.202004260825-0 4.4.0-rc.0 / 44.81.202003110830-0 4.4.0-rc.1 / 44.81.202003130330-0 diff --git a/go.mod b/go.mod index 4372fe894..3698df39e 100644 --- a/go.mod +++ b/go.mod @@ -169,7 +169,7 @@ require ( golang.org/x/net v0.18.0 // indirect golang.org/x/oauth2 v0.10.0 // indirect golang.org/x/sync v0.5.0 // indirect - golang.org/x/sys v0.15.0 // indirect + golang.org/x/sys v0.16.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.3.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect @@ -218,7 +218,7 @@ require ( github.com/robert-nix/ansihtml v1.0.1 github.com/test-network-function/oct v0.0.4 github.com/test-network-function/privileged-daemonset v1.0.18 - golang.org/x/term v0.15.0 + golang.org/x/term v0.16.0 gopkg.in/yaml.v3 v3.0.1 k8s.io/kubectl v0.28.4 ) diff --git a/go.sum b/go.sum index f61e21b69..aa8f2a89e 100644 --- a/go.sum +++ b/go.sum @@ -797,14 +797,14 @@ golang.org/x/sys v0.0.0-20220906165534-d0df966e6959/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= +golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/internal/clientsholder/clientsholder.go b/internal/clientsholder/clientsholder.go index f67fcf5f0..c6d242f89 100644 --- a/internal/clientsholder/clientsholder.go +++ b/internal/clientsholder/clientsholder.go @@ -43,6 +43,7 @@ import ( corev1 "k8s.io/api/core/v1" policyv1 "k8s.io/api/policy/v1" rbacv1 "k8s.io/api/rbac/v1" + storagev1 "k8s.io/api/storage/v1" apiextv1fake "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" k8sFakeClient "k8s.io/client-go/kubernetes/fake" @@ -122,6 +123,8 @@ func GetTestClientsHolder(k8sMockObjects []runtime.Object) *ClientsHolder { k8sClientObjects = append(k8sClientObjects, v) case *scalingv1.HorizontalPodAutoscaler: k8sClientObjects = append(k8sClientObjects, v) + case *storagev1.StorageClass: + k8sClientObjects = append(k8sClientObjects, v) // K8s Extension Client Objects case *apiextv1c.CustomResourceDefinition: diff --git a/pkg/arrayhelper/arrayhelper_test.go b/pkg/arrayhelper/arrayhelper_test.go index b56198261..36dc95442 100644 --- a/pkg/arrayhelper/arrayhelper_test.go +++ b/pkg/arrayhelper/arrayhelper_test.go @@ -18,6 +18,7 @@ package arrayhelper import ( "reflect" + "sort" "strings" "testing" @@ -85,3 +86,30 @@ func TestArgListToMap(t *testing.T) { assert.True(t, reflect.DeepEqual(tc.expectedMap, ArgListToMap(tc.argList))) } } + +func TestUnique(t *testing.T) { + testCases := []struct { + testSlice []string + expectedSlice []string + }{ + { + testSlice: []string{"one", "two", "three"}, + expectedSlice: []string{"one", "two", "three"}, + }, + { + testSlice: []string{"one", "two", "three", "three"}, + expectedSlice: []string{"one", "two", "three"}, + }, + { + testSlice: []string{}, + expectedSlice: []string{}, + }, + } + + for _, tc := range testCases { + sort.Strings(tc.expectedSlice) + results := Unique(tc.testSlice) + sort.Strings(results) + assert.True(t, reflect.DeepEqual(tc.expectedSlice, results)) + } +} diff --git a/pkg/autodiscover/autodiscover.go b/pkg/autodiscover/autodiscover.go index e9f2b324b..89a452567 100644 --- a/pkg/autodiscover/autodiscover.go +++ b/pkg/autodiscover/autodiscover.go @@ -129,7 +129,7 @@ func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData oc := clientsholder.GetClientsHolder() var err error - data.StorageClasses, err = getAllStorageClasses() + data.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1()) if err != nil { log.Error("Failed to retrieve storageClasses - err: %v", err) os.Exit(1) @@ -196,21 +196,21 @@ func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData data.Deployments = findDeploymentByLabel(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces) data.StatefulSet = findStatefulSetByLabel(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces) // Find ClusterRoleBindings - clusterRoleBindings, err := getClusterRoleBindings() + clusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1()) if err != nil { log.Error("Cannot get cluster role bindings, error: %v", err) os.Exit(1) } data.ClusterRoleBindings = clusterRoleBindings // Find RoleBindings - roleBindings, err := getRoleBindings() + roleBindings, err := getRoleBindings(oc.K8sClient.RbacV1()) if err != nil { log.Error("Cannot get cluster role bindings, error: %v", err) os.Exit(1) } data.RoleBindings = roleBindings // find roles - roles, err := getRoles() + roles, err := getRoles(oc.K8sClient.RbacV1()) if err != nil { log.Error("Cannot get roles, error: %v", err) os.Exit(1) diff --git a/pkg/autodiscover/autodiscover_events.go b/pkg/autodiscover/autodiscover_events.go index 9f8b88db7..518e562f9 100644 --- a/pkg/autodiscover/autodiscover_events.go +++ b/pkg/autodiscover/autodiscover_events.go @@ -25,10 +25,6 @@ import ( corev1client "k8s.io/client-go/kubernetes/typed/core/v1" ) -type Event struct { - *corev1.Event -} - func findAbnormalEvents(oc corev1client.CoreV1Interface, namespaces []string) (abnormalEvents []corev1.Event) { abnormalEvents = []corev1.Event{} for _, ns := range namespaces { diff --git a/pkg/autodiscover/autodiscover_events_test.go b/pkg/autodiscover/autodiscover_events_test.go index 417ae8d52..5a3744490 100644 --- a/pkg/autodiscover/autodiscover_events_test.go +++ b/pkg/autodiscover/autodiscover_events_test.go @@ -15,3 +15,53 @@ // 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. package autodiscover + +import ( + "testing" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + k8sfake "k8s.io/client-go/kubernetes/fake" +) + +func TestFindAbnormalEvents(t *testing.T) { + testCases := []struct { + expectedEvents []*corev1.Event + }{ + { + expectedEvents: []*corev1.Event{ + { + Reason: "FailedMount", + Type: "Warning", + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-namespace", + Name: "test-event", + }, + }, + }, + }, + } + + for _, testCase := range testCases { + var runtimeObjects []runtime.Object + for _, event := range testCase.expectedEvents { + runtimeObjects = append(runtimeObjects, event) + } + + // Create fake client + client := k8sfake.NewSimpleClientset(runtimeObjects...) + abnormalEvents := findAbnormalEvents(client.CoreV1(), []string{"test-namespace"}) + assert.Len(t, abnormalEvents, len(testCase.expectedEvents)) + + for _, event := range abnormalEvents { + for _, event2 := range testCase.expectedEvents { + if event.Name == event2.Name { + assert.Equal(t, event.Reason, event2.Reason) + assert.Equal(t, event.Type, event2.Type) + } + } + } + } +} diff --git a/pkg/autodiscover/autodiscover_networkpolicies_test.go b/pkg/autodiscover/autodiscover_networkpolicies_test.go index 417ae8d52..9ef556224 100644 --- a/pkg/autodiscover/autodiscover_networkpolicies_test.go +++ b/pkg/autodiscover/autodiscover_networkpolicies_test.go @@ -15,3 +15,43 @@ // 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. package autodiscover + +import ( + "testing" + + "github.com/stretchr/testify/assert" + networkingv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + k8sfake "k8s.io/client-go/kubernetes/fake" +) + +func TestGetNetworkPolicies(t *testing.T) { + testCases := []struct { + expectedNetworkPolicies []*networkingv1.NetworkPolicy + }{ + { + expectedNetworkPolicies: []*networkingv1.NetworkPolicy{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-network-policy", + Namespace: "test-namespace", + }, + }, + }, + }, + } + + for _, testCase := range testCases { + var runtimeObjects []runtime.Object + for _, networkPolicy := range testCase.expectedNetworkPolicies { + runtimeObjects = append(runtimeObjects, networkPolicy) + } + + // Create fake client + client := k8sfake.NewSimpleClientset(runtimeObjects...) + networkPolicies, err := getNetworkPolicies(client.NetworkingV1()) + assert.Nil(t, err) + assert.Len(t, networkPolicies, len(testCase.expectedNetworkPolicies)) + } +} diff --git a/pkg/autodiscover/autodiscover_podset_test.go b/pkg/autodiscover/autodiscover_podset_test.go index 21d43bcc4..40263d538 100644 --- a/pkg/autodiscover/autodiscover_podset_test.go +++ b/pkg/autodiscover/autodiscover_podset_test.go @@ -217,3 +217,79 @@ func TestFindHpaControllers(t *testing.T) { assert.Equal(t, tc.expectedResults, hpas) } } + +func TestFindDeploymentByNameByNamespace(t *testing.T) { + generateDeployment := func(name, namespace string) *appsv1.Deployment { + return &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } + } + + testCases := []struct { + testDeploymentName string + testDeploymentNamespace string + expectedResults *appsv1.Deployment + }{ + { + testDeploymentName: "testName", + testDeploymentNamespace: "testNamespace", + expectedResults: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testName", + Namespace: "testNamespace", + }, + }, + }, + } + + for _, tc := range testCases { + var testRuntimeObjects []runtime.Object + testRuntimeObjects = append(testRuntimeObjects, generateDeployment(tc.testDeploymentName, tc.testDeploymentNamespace)) + oc := clientsholder.GetTestClientsHolder(testRuntimeObjects) + + deployment, err := FindDeploymentByNameByNamespace(oc.K8sClient.AppsV1(), tc.testDeploymentNamespace, tc.testDeploymentName) + assert.Nil(t, err) + assert.Equal(t, tc.expectedResults, deployment) + } +} + +func TestFindStatefulSetByNameByNamespace(t *testing.T) { + generateStatefulSet := func(name, namespace string) *appsv1.StatefulSet { + return &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } + } + + testCases := []struct { + testStatefulSetName string + testStatefulSetNamespace string + expectedResults *appsv1.StatefulSet + }{ + { + testStatefulSetName: "testName", + testStatefulSetNamespace: "testNamespace", + expectedResults: &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testName", + Namespace: "testNamespace", + }, + }, + }, + } + + for _, tc := range testCases { + var testRuntimeObjects []runtime.Object + testRuntimeObjects = append(testRuntimeObjects, generateStatefulSet(tc.testStatefulSetName, tc.testStatefulSetNamespace)) + oc := clientsholder.GetTestClientsHolder(testRuntimeObjects) + + statefulSet, err := FindStatefulsetByNameByNamespace(oc.K8sClient.AppsV1(), tc.testStatefulSetNamespace, tc.testStatefulSetName) + assert.Nil(t, err) + assert.Equal(t, tc.expectedResults, statefulSet) + } +} diff --git a/pkg/autodiscover/autodiscover_pv.go b/pkg/autodiscover/autodiscover_pv.go index b65c712cd..60ec8b299 100644 --- a/pkg/autodiscover/autodiscover_pv.go +++ b/pkg/autodiscover/autodiscover_pv.go @@ -19,12 +19,12 @@ package autodiscover import ( "context" - "github.com/test-network-function/cnf-certification-test/internal/clientsholder" "github.com/test-network-function/cnf-certification-test/internal/log" corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + storagev1typed "k8s.io/client-go/kubernetes/typed/storage/v1" ) func getPersistentVolumes(oc corev1client.CoreV1Interface) ([]corev1.PersistentVolume, error) { @@ -43,9 +43,8 @@ func getPersistentVolumeClaims(oc corev1client.CoreV1Interface) ([]corev1.Persis return pvcs.Items, nil } -func getAllStorageClasses() ([]storagev1.StorageClass, error) { - o := clientsholder.GetClientsHolder() - storageclasslist, err := o.K8sClient.StorageV1().StorageClasses().List(context.TODO(), metav1.ListOptions{}) +func getAllStorageClasses(client storagev1typed.StorageV1Interface) ([]storagev1.StorageClass, error) { + storageclasslist, err := client.StorageClasses().List(context.TODO(), metav1.ListOptions{}) if err != nil { log.Error("Error when listing, err: %v", err) return nil, err diff --git a/pkg/autodiscover/autodiscover_pv_test.go b/pkg/autodiscover/autodiscover_pv_test.go index ad48ec36a..4694ce4e7 100644 --- a/pkg/autodiscover/autodiscover_pv_test.go +++ b/pkg/autodiscover/autodiscover_pv_test.go @@ -22,6 +22,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/test-network-function/cnf-certification-test/internal/clientsholder" corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) @@ -97,3 +98,40 @@ func TestGetPersistentVolumeClaims(t *testing.T) { assert.Equal(t, tc.expectedRQs[0].Name, PersistentVolumesClaims[0].Name) } } + +func TestGetAllStorageClasses(t *testing.T) { + generateStorageClasses := func(name string) *storagev1.StorageClass { + return &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Provisioner: name, + } + } + + testCases := []struct { + scName string + expectedRQs []storagev1.StorageClass + }{ + { + scName: "test1", + expectedRQs: []storagev1.StorageClass{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test1", + }, + Provisioner: "test1", + }, + }, + }, + } + + for _, tc := range testCases { + var testRuntimeObjects []runtime.Object + testRuntimeObjects = append(testRuntimeObjects, generateStorageClasses(tc.scName)) + oc := clientsholder.GetTestClientsHolder(testRuntimeObjects) + StorageClasses, err := getAllStorageClasses(oc.K8sClient.StorageV1()) + assert.Nil(t, err) + assert.Equal(t, tc.expectedRQs[0].Name, StorageClasses[0].Name) + } +} diff --git a/pkg/autodiscover/autodiscover_rbac.go b/pkg/autodiscover/autodiscover_rbac.go index bbe1d0d01..51b8a4e4f 100644 --- a/pkg/autodiscover/autodiscover_rbac.go +++ b/pkg/autodiscover/autodiscover_rbac.go @@ -19,17 +19,16 @@ package autodiscover import ( "context" - "github.com/test-network-function/cnf-certification-test/internal/clientsholder" "github.com/test-network-function/cnf-certification-test/internal/log" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + rbacv1typed "k8s.io/client-go/kubernetes/typed/rbac/v1" ) // getRoleBindings returns all of the rolebindings in the cluster -func getRoleBindings() ([]rbacv1.RoleBinding, error) { +func getRoleBindings(client rbacv1typed.RbacV1Interface) ([]rbacv1.RoleBinding, error) { // Get all of the rolebindings from all namespaces - clientsHolder := clientsholder.GetClientsHolder() - roleList, roleErr := clientsHolder.K8sClient.RbacV1().RoleBindings("").List(context.TODO(), metav1.ListOptions{}) + roleList, roleErr := client.RoleBindings("").List(context.TODO(), metav1.ListOptions{}) if roleErr != nil { log.Error("executing rolebinding command failed with error: %v", roleErr) return nil, roleErr @@ -38,11 +37,10 @@ func getRoleBindings() ([]rbacv1.RoleBinding, error) { } // getClusterRoleBindings returns all of the clusterrolebindings in the cluster -func getClusterRoleBindings() ([]rbacv1.ClusterRoleBinding, error) { +func getClusterRoleBindings(client rbacv1typed.RbacV1Interface) ([]rbacv1.ClusterRoleBinding, error) { // Get all of the clusterrolebindings from the cluster // These are not namespaced so we want all of them - clientsHolder := clientsholder.GetClientsHolder() - crbList, crbErr := clientsHolder.K8sClient.RbacV1().ClusterRoleBindings().List(context.TODO(), metav1.ListOptions{}) + crbList, crbErr := client.ClusterRoleBindings().List(context.TODO(), metav1.ListOptions{}) if crbErr != nil { log.Error("executing clusterrolebinding command failed with error: %v", crbErr) return nil, crbErr @@ -51,10 +49,9 @@ func getClusterRoleBindings() ([]rbacv1.ClusterRoleBinding, error) { } // getRoles returns all of the roles in the cluster -func getRoles() ([]rbacv1.Role, error) { +func getRoles(client rbacv1typed.RbacV1Interface) ([]rbacv1.Role, error) { // Get all of the roles from all namespaces - clientsHolder := clientsholder.GetClientsHolder() - roleList, roleErr := clientsHolder.K8sClient.RbacV1().Roles("").List(context.TODO(), metav1.ListOptions{}) + roleList, roleErr := client.Roles("").List(context.TODO(), metav1.ListOptions{}) if roleErr != nil { log.Error("executing roles command failed with error: %v", roleErr) return nil, roleErr diff --git a/pkg/autodiscover/autodiscover_rbac_test.go b/pkg/autodiscover/autodiscover_rbac_test.go index cd30358b1..a7510373c 100644 --- a/pkg/autodiscover/autodiscover_rbac_test.go +++ b/pkg/autodiscover/autodiscover_rbac_test.go @@ -73,15 +73,22 @@ func buildTestObjects() []runtime.Object { } func TestGetClusterRoleBinding(t *testing.T) { - _ = clientsholder.GetTestClientsHolder(buildTestObjects()) - gatheredCRBs, err := getClusterRoleBindings() + client := clientsholder.GetTestClientsHolder(buildTestObjects()) + gatheredCRBs, err := getClusterRoleBindings(client.K8sClient.RbacV1()) assert.Nil(t, err) assert.Equal(t, "testCRB", gatheredCRBs[0].Name) } func TestGetRoleBinding(t *testing.T) { - _ = clientsholder.GetTestClientsHolder(buildTestObjects()) - gatheredRBs, err := getRoleBindings() + client := clientsholder.GetTestClientsHolder(buildTestObjects()) + gatheredRBs, err := getRoleBindings(client.K8sClient.RbacV1()) assert.Nil(t, err) assert.Equal(t, "testRB", gatheredRBs[0].Name) } + +func TestGetRoles(t *testing.T) { + client := clientsholder.GetTestClientsHolder(buildTestObjects()) + gatheredRoles, err := getRoles(client.K8sClient.RbacV1()) + assert.Nil(t, err) + assert.Equal(t, "testRole", gatheredRoles[0].Name) +} diff --git a/version.json b/version.json index db0310338..3ecaae059 100644 --- a/version.json +++ b/version.json @@ -1,5 +1,5 @@ { "partner_tag": "v4.5.7", "claimFormat": "v0.4.0", - "parserTag": "v0.4.0" + "parserTag": "v0.4.2" }