From 3bb9620da6832dc1917bcc0d4d3dca0e48a15905 Mon Sep 17 00:00:00 2001 From: jmontesi Date: Thu, 7 Dec 2023 11:20:17 +0100 Subject: [PATCH] log: remove logrus and migrate all logs to slog --- .gitignore | 4 +- cmd/tnf/claim/add/add.go | 26 ++-- cmd/tnf/claim/compare/compare.go | 9 +- cmd/tnf/generate/catalog/catalog.go | 4 +- cmd/tnf/main.go | 7 +- .../accesscontrol/namespace/namespace.go | 8 +- .../accesscontrol/rbac/automount.go | 4 +- .../accesscontrol/resources/resources.go | 20 +-- .../securitycontextcontainer.go | 72 +++++----- cnf-certification-test/accesscontrol/suite.go | 134 ++++++++---------- cnf-certification-test/certification/suite.go | 33 ++--- .../chaostesting/pod_delete/pod_delete.go | 40 +++--- cnf-certification-test/chaostesting/suite.go | 10 +- .../ownerreference/ownerreference.go | 6 +- .../lifecycle/podrecreation/podrecreation.go | 20 +-- .../lifecycle/podsets/podsets.go | 48 +++---- .../lifecycle/scaling/crd_scaling.go | 42 +++--- .../lifecycle/scaling/deployment_scaling.go | 42 +++--- .../lifecycle/scaling/statefulset_scaling.go | 47 +++--- cnf-certification-test/lifecycle/suite.go | 124 ++++++++-------- cnf-certification-test/manageability/suite.go | 15 +- .../networking/icmp/icmp.go | 22 +-- .../networking/netcommons/netcommons.go | 10 +- .../networking/policies/policies.go | 8 +- .../networking/services/services.go | 8 +- cnf-certification-test/networking/suite.go | 41 +++--- cnf-certification-test/observability/suite.go | 30 ++-- .../operator/phasecheck/phasecheck.go | 17 ++- cnf-certification-test/operator/suite.go | 13 +- cnf-certification-test/performance/suite.go | 64 +++++---- .../platform/bootparams/bootparams.go | 6 +- .../platform/cnffsdiff/fsdiff.go | 16 +-- .../platform/hugepages/hugepages.go | 28 ++-- .../platform/isredhat/isredhat.go | 8 +- .../platform/nodetainted/nodetainted.go | 11 +- cnf-certification-test/platform/suite.go | 128 ++++++++--------- cnf-certification-test/preflight/suite.go | 42 +++--- cnf-certification-test/results/archiver.go | 6 +- cnf-certification-test/tnf_config.yml | 2 +- cnf-certification-test/webserver/webserver.go | 51 +++---- docs/index.md | 2 +- docs/test-container.md | 2 +- docs/test-output.md | 2 +- go.mod | 2 +- internal/clientsholder/clientsholder.go | 16 ++- internal/clientsholder/command.go | 16 +-- internal/crclient/crclient.go | 12 +- internal/log/log.go | 27 +++- main.go | 89 ++++++------ pkg/autodiscover/autodiscover.go | 56 +++++--- pkg/autodiscover/autodiscover_crds.go | 6 +- pkg/autodiscover/autodiscover_events.go | 4 +- pkg/autodiscover/autodiscover_operators.go | 42 +++--- pkg/autodiscover/autodiscover_pods.go | 6 +- pkg/autodiscover/autodiscover_podset.go | 32 ++--- pkg/autodiscover/autodiscover_pv.go | 4 +- pkg/autodiscover/autodiscover_rbac.go | 8 +- pkg/autodiscover/autodiscover_scales.go | 17 ++- pkg/certsuite/certsuite.go | 23 +-- pkg/checksdb/check.go | 9 +- pkg/checksdb/checksdb.go | 23 +-- pkg/checksdb/checksgroup.go | 44 +++--- pkg/checksdb/labels.go | 4 +- pkg/claimhelper/claimhelper.go | 37 +++-- pkg/compatibility/compatibility.go | 6 +- pkg/configuration/utils.go | 8 +- pkg/diagnostics/diagnostics.go | 28 ++-- pkg/loghelper/loghelper.go | 23 +-- pkg/loghelper/loghelper_test.go | 5 +- pkg/provider/containers.go | 24 ++-- pkg/provider/filters.go | 4 +- pkg/provider/isolation.go | 22 +-- pkg/provider/operators.go | 37 ++--- pkg/provider/pods.go | 25 ++-- pkg/provider/provider.go | 31 ++-- pkg/provider/scale_object.go | 4 +- pkg/scheduling/scheduling.go | 15 +- pkg/testhelper/testhelper.go | 27 ---- pkg/testhelper/testhelper_test.go | 46 ------ pkg/tnf/doc.go | 20 --- pkg/tnf/doc_test.go | 20 --- pkg/tnf/status.go | 19 --- pkg/tnf/status_test.go | 1 - run-cnf-suites.sh | 4 +- 84 files changed, 980 insertions(+), 1098 deletions(-) delete mode 100644 pkg/tnf/doc.go delete mode 100644 pkg/tnf/doc_test.go delete mode 100644 pkg/tnf/status.go delete mode 100644 pkg/tnf/status_test.go diff --git a/.gitignore b/.gitignore index 18990b958..4096145cb 100644 --- a/.gitignore +++ b/.gitignore @@ -1,9 +1,7 @@ bin/ catalog.json claim.json -tnf-execution.log -cnf-certification-test/tnf-execution.log -cnf-certification-test/cnf-test-log +cnf-certification-test/cnf-certsuite.log .idea vendor *.test diff --git a/cmd/tnf/claim/add/add.go b/cmd/tnf/claim/add/add.go index c0f6c0fdc..7cd3509cb 100644 --- a/cmd/tnf/claim/add/add.go +++ b/cmd/tnf/claim/add/add.go @@ -7,8 +7,8 @@ import ( "encoding/json" - log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/test-network-function/cnf-certification-test/internal/log" "github.com/test-network-function/cnf-certification-test/pkg/junit" "github.com/test-network-function/test-network-function-claim/pkg/claim" ) @@ -34,24 +34,27 @@ func claimUpdate(_ *cobra.Command, _ []string) error { fileUpdated := false dat, err := os.ReadFile(*claimFileTextPtr) if err != nil { - log.Fatalf("Error reading claim file :%v", err) + log.Error("Error reading claim file :%v", err) + os.Exit(1) } claimRoot := readClaim(&dat) junitMap := claimRoot.Claim.RawResults items, err := os.ReadDir(*reportFilesTextPtr) if err != nil { - log.Fatalf("Error reading directory: %v", err) + log.Error("Error reading directory: %v", err) + os.Exit(1) } for _, item := range items { fileName := item.Name() extension := filepath.Ext(fileName) reportKeyName := fileName[0 : len(fileName)-len(extension)] if _, ok := junitMap[reportKeyName]; ok { - log.Printf("Skipping: %s already exists in supplied `%s` claim file", reportKeyName, *claimFileTextPtr) + log.Info("Skipping: %s already exists in supplied `%s` claim file", reportKeyName, *claimFileTextPtr) } else { junitMap[reportKeyName], err = junit.ExportJUnitAsMap(fmt.Sprintf("%s/%s", *reportFilesTextPtr, item.Name())) if err != nil { - log.Fatalf("Error reading JUnit XML file into JSON: %v", err) + log.Error("Error reading JUnit XML file into JSON: %v", err) + os.Exit(1) } fileUpdated = true } @@ -59,16 +62,18 @@ func claimUpdate(_ *cobra.Command, _ []string) error { claimRoot.Claim.RawResults = junitMap payload, err := json.MarshalIndent(claimRoot, "", " ") if err != nil { - log.Fatalf("Failed to generate the claim: %v", err) + log.Error("Failed to generate the claim: %v", err) + os.Exit(1) } err = os.WriteFile(*claimFileTextPtr, payload, claimFilePermissions) if err != nil { - log.Fatalf("Error writing claim data:\n%s", string(payload)) + log.Error("Error writing claim data:\n%s", string(payload)) + os.Exit(1) } if fileUpdated { - log.Printf("Claim file `%s` updated\n", *claimFileTextPtr) + log.Info("Claim file `%s` updated\n", *claimFileTextPtr) } else { - log.Printf("No changes were applied to `%s`\n", *claimFileTextPtr) + log.Info("No changes were applied to `%s`\n", *claimFileTextPtr) } return nil } @@ -77,7 +82,8 @@ func readClaim(contents *[]byte) *claim.Root { var claimRoot claim.Root err := json.Unmarshal(*contents, &claimRoot) if err != nil { - log.Fatalf("Error reading claim constents file into type: %v", err) + log.Error("Error reading claim constents file into type: %v", err) + os.Exit(1) } return &claimRoot } diff --git a/cmd/tnf/claim/compare/compare.go b/cmd/tnf/claim/compare/compare.go index 1c78f703a..d6e9fe40d 100644 --- a/cmd/tnf/claim/compare/compare.go +++ b/cmd/tnf/claim/compare/compare.go @@ -5,13 +5,13 @@ import ( "fmt" "os" - log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/test-network-function/cnf-certification-test/cmd/tnf/claim/compare/configurations" "github.com/test-network-function/cnf-certification-test/cmd/tnf/claim/compare/nodes" "github.com/test-network-function/cnf-certification-test/cmd/tnf/claim/compare/testcases" "github.com/test-network-function/cnf-certification-test/cmd/tnf/claim/compare/versions" "github.com/test-network-function/cnf-certification-test/cmd/tnf/pkg/claim" + "github.com/test-network-function/cnf-certification-test/internal/log" ) const longHelp = `Compares sections of both claim files and the differences are shown in a table per section. @@ -97,12 +97,12 @@ func NewCommand() *cobra.Command { ) err := claimCompareFiles.MarkFlagRequired("claim1") if err != nil { - log.Errorf("Failed to mark flag claim1 as required: %v", err) + log.Error("Failed to mark flag claim1 as required: %v", err) return nil } err = claimCompareFiles.MarkFlagRequired("claim2") if err != nil { - log.Errorf("Failed to mark flag claim2 as required: %v", err) + log.Error("Failed to mark flag claim2 as required: %v", err) return nil } @@ -112,7 +112,8 @@ func NewCommand() *cobra.Command { func claimCompare(_ *cobra.Command, _ []string) error { err := claimCompareFilesfunc(Claim1FilePathFlag, Claim2FilePathFlag) if err != nil { - log.Fatalf("Error comparing claim files: %v", err) + log.Error("Error comparing claim files: %v", err) + os.Exit(1) } return nil } diff --git a/cmd/tnf/generate/catalog/catalog.go b/cmd/tnf/generate/catalog/catalog.go index 7d51082c7..aaf40489f 100644 --- a/cmd/tnf/generate/catalog/catalog.go +++ b/cmd/tnf/generate/catalog/catalog.go @@ -23,8 +23,8 @@ import ( "sort" "strings" - "github.com/sirupsen/logrus" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/identifiers" + "github.com/test-network-function/cnf-certification-test/internal/log" "github.com/test-network-function/cnf-certification-test/pkg/arrayhelper" "github.com/test-network-function/test-network-function-claim/pkg/claim" @@ -254,7 +254,7 @@ func summaryToMD(aSummary catalogSummary) (out string) { func outputJS() { out, err := json.MarshalIndent(identifiers.Classification, "", " ") if err != nil { - logrus.Errorf("could not Marshall classification, err=%s", err) + log.Error("could not Marshall classification, err=%s", err) return } fmt.Printf("classification= %s ", out) diff --git a/cmd/tnf/main.go b/cmd/tnf/main.go index 805874b32..44364379a 100644 --- a/cmd/tnf/main.go +++ b/cmd/tnf/main.go @@ -1,8 +1,10 @@ package main import ( - log "github.com/sirupsen/logrus" + "os" + "github.com/spf13/cobra" + "github.com/test-network-function/cnf-certification-test/internal/log" "github.com/test-network-function/cnf-certification-test/cmd/tnf/check" "github.com/test-network-function/cnf-certification-test/cmd/tnf/claim" @@ -22,6 +24,7 @@ func main() { rootCmd.AddCommand(check.NewCommand()) if err := rootCmd.Execute(); err != nil { - log.Fatal(err) + log.Error("%v", err) + os.Exit(1) } } diff --git a/cnf-certification-test/accesscontrol/namespace/namespace.go b/cnf-certification-test/accesscontrol/namespace/namespace.go index ed7b78321..31d96dd68 100644 --- a/cnf-certification-test/accesscontrol/namespace/namespace.go +++ b/cnf-certification-test/accesscontrol/namespace/namespace.go @@ -20,8 +20,8 @@ import ( "context" "fmt" - "github.com/sirupsen/logrus" "github.com/test-network-function/cnf-certification-test/internal/clientsholder" + "github.com/test-network-function/cnf-certification-test/internal/log" "github.com/test-network-function/cnf-certification-test/pkg/loghelper" "github.com/test-network-function/cnf-certification-test/pkg/stringhelper" apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" @@ -41,7 +41,7 @@ func TestCrsNamespaces(crds []*apiextv1.CustomResourceDefinition, configNamespac } for namespace, crNames := range crNamespaces { if !stringhelper.StringInSlice(configNamespaces, namespace, false) { - logrus.Debugf("CRD: %s (kind:%s/ plural:%s) has CRs %v deployed in namespace (%s) not in configured namespaces %v", + log.Debug("CRD: %s (kind:%s/ plural:%s) has CRs %v deployed in namespace (%s) not in configured namespaces %v", crd.Name, crd.Spec.Names.Kind, crd.Spec.Names.Plural, crNames, namespace, configNamespaces) // Initialize this map dimension before use if invalidCrs[crd.Name] == nil { @@ -64,10 +64,10 @@ func getCrsPerNamespaces(aCrd *apiextv1.CustomResourceDefinition) (crdNamespaces Version: version.Name, Resource: aCrd.Spec.Names.Plural, } - logrus.Debugf("Looking for CRs from CRD: %s api version:%s group:%s plural:%s", aCrd.Name, version.Name, aCrd.Spec.Group, aCrd.Spec.Names.Plural) + log.Debug("Looking for CRs from CRD: %s api version:%s group:%s plural:%s", aCrd.Name, version.Name, aCrd.Spec.Group, aCrd.Spec.Names.Plural) crs, err := oc.DynamicClient.Resource(gvr).List(context.TODO(), metav1.ListOptions{}) if err != nil { - logrus.Errorf("error getting %s: %v\n", aCrd.Name, err) + log.Error("error getting %s: %v\n", aCrd.Name, err) return crdNamespaces, err } crdNamespaces = make(map[string][]string) diff --git a/cnf-certification-test/accesscontrol/rbac/automount.go b/cnf-certification-test/accesscontrol/rbac/automount.go index d475cf6ff..e0c85ec7a 100644 --- a/cnf-certification-test/accesscontrol/rbac/automount.go +++ b/cnf-certification-test/accesscontrol/rbac/automount.go @@ -20,8 +20,8 @@ import ( "context" "fmt" - "github.com/sirupsen/logrus" "github.com/test-network-function/cnf-certification-test/internal/clientsholder" + "github.com/test-network-function/cnf-certification-test/internal/log" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -30,7 +30,7 @@ func AutomountServiceAccountSetOnSA(serviceAccountName, podNamespace string) (*b clientsHolder := clientsholder.GetClientsHolder() sa, err := clientsHolder.K8sClient.CoreV1().ServiceAccounts(podNamespace).Get(context.TODO(), serviceAccountName, metav1.GetOptions{}) if err != nil { - logrus.Errorf("executing serviceaccount command failed with error: %v", err) + log.Error("executing serviceaccount command failed with error: %v", err) return nil, err } return sa.AutomountServiceAccountToken, nil diff --git a/cnf-certification-test/accesscontrol/resources/resources.go b/cnf-certification-test/accesscontrol/resources/resources.go index 2369aa709..5f0455aff 100644 --- a/cnf-certification-test/accesscontrol/resources/resources.go +++ b/cnf-certification-test/accesscontrol/resources/resources.go @@ -1,40 +1,40 @@ package resources import ( + "github.com/test-network-function/cnf-certification-test/internal/log" "github.com/test-network-function/cnf-certification-test/pkg/provider" - "github.com/test-network-function/cnf-certification-test/pkg/tnf" ) func HasRequestsAndLimitsSet(cut *provider.Container) bool { passed := true // Parse the limits. if len(cut.Resources.Limits) == 0 { - tnf.ClaimFilePrintf("Container has been found missing resource limits: %s", cut.String()) + log.Debug("Container has been found missing resource limits: %s", cut.String()) passed = false } else { if cut.Resources.Limits.Cpu().IsZero() { - tnf.ClaimFilePrintf("Container has been found missing CPU limits: %s", cut.String()) + log.Debug("Container has been found missing CPU limits: %s", cut.String()) passed = false } if cut.Resources.Limits.Memory().IsZero() { - tnf.ClaimFilePrintf("Container has been found missing memory limits: %s", cut.String()) + log.Debug("Container has been found missing memory limits: %s", cut.String()) passed = false } } // Parse the requests. if len(cut.Resources.Requests) == 0 { - tnf.ClaimFilePrintf("Container has been found missing resource requests: %s", cut.String()) + log.Debug("Container has been found missing resource requests: %s", cut.String()) passed = false } else { if cut.Resources.Requests.Cpu().IsZero() { - tnf.ClaimFilePrintf("Container has been found missing CPU requests: %s", cut.String()) + log.Debug("Container has been found missing CPU requests: %s", cut.String()) passed = false } if cut.Resources.Requests.Memory().IsZero() { - tnf.ClaimFilePrintf("Container has been found missing memory requests: %s", cut.String()) + log.Debug("Container has been found missing memory requests: %s", cut.String()) passed = false } } @@ -48,14 +48,14 @@ func HasExclusiveCPUsAssigned(cut *provider.Container) bool { // if no cpu or memory limits are specified the container will run in the shared cpu pool if cpuLimits.IsZero() || memLimits.IsZero() { - tnf.ClaimFilePrintf("Container has been found missing cpu/memory resource limits: %s", cut.String()) + log.Debug("Container has been found missing cpu/memory resource limits: %s", cut.String()) return false } // if the cpu limits quantity is not an integer the container will run in the shared cpu pool cpuLimitsVal, isInteger := cpuLimits.AsInt64() if !isInteger { - tnf.ClaimFilePrintf("Container's cpu resource limit is not an integer: %s", cut.String()) + log.Debug("Container's cpu resource limit is not an integer: %s", cut.String()) return false } @@ -68,6 +68,6 @@ func HasExclusiveCPUsAssigned(cut *provider.Container) bool { } // if the cpu limits and request are different, the container will run in the shared cpu pool - tnf.ClaimFilePrintf("Container's cpu/memory resources and limits are not equal to each other: %s", cut.String()) + log.Debug("Container's cpu/memory resources and limits are not equal to each other: %s", cut.String()) return false } diff --git a/cnf-certification-test/accesscontrol/securitycontextcontainer/securitycontextcontainer.go b/cnf-certification-test/accesscontrol/securitycontextcontainer/securitycontextcontainer.go index bca9e8483..3658e2756 100644 --- a/cnf-certification-test/accesscontrol/securitycontextcontainer/securitycontextcontainer.go +++ b/cnf-certification-test/accesscontrol/securitycontextcontainer/securitycontextcontainer.go @@ -8,9 +8,9 @@ import ( "reflect" "sort" + "github.com/test-network-function/cnf-certification-test/internal/log" "github.com/test-network-function/cnf-certification-test/pkg/provider" "github.com/test-network-function/cnf-certification-test/pkg/stringhelper" - "github.com/test-network-function/cnf-certification-test/pkg/tnf" corev1 "k8s.io/api/core/v1" ) @@ -289,7 +289,7 @@ func checkContainerCategory(containers []corev1.Container, containerSCC Containe for j := 0; j < len(containers); j++ { cut := &provider.Container{Podname: podName, Namespace: nameSpace, Container: &containers[j]} percontainerSCC := GetContainerSCC(cut, containerSCC) - tnf.ClaimFilePrintf("containerSCC %s is %+v", cut, percontainerSCC) + log.Debug("containerSCC %s is %+v", cut, percontainerSCC) // after building the containerSCC need to check to which category it is categoryinfo = PodListCategory{ Containername: cut.Name, @@ -297,10 +297,10 @@ func checkContainerCategory(containers []corev1.Container, containerSCC Containe NameSpace: nameSpace, } if compareCategory(&Category1, &percontainerSCC, CategoryID1) { - tnf.ClaimFilePrintf("Testing if pod belongs to category1 ") + log.Debug("Testing if pod belongs to category1 ") categoryinfo.Category = CategoryID1 } else if compareCategory(&Category1NoUID0, &percontainerSCC, CategoryID1NoUID0) { - tnf.ClaimFilePrintf("Testing if pod belongs to category1NoUID0 ") + log.Debug("Testing if pod belongs to category1NoUID0 ") categoryinfo.Category = CategoryID1NoUID0 } else if compareCategory(&Category2, &percontainerSCC, CategoryID2) { categoryinfo.Category = CategoryID2 @@ -359,15 +359,15 @@ func CheckPod(pod *provider.Pod) []PodListCategory { //nolint:funlen func compareCategory(refCategory, containerSCC *ContainerSCC, id CategoryID) bool { result := true - tnf.ClaimFilePrintf("Testing if pod belongs to category %s", &id) + log.Debug("Testing if pod belongs to category %s", &id) // AllVolumeAllowed reports whether the volumes in the container are compliant to the SCC (same volume list for all SCCs). // True means that all volumes declared in the pod are allowed in the SCC. // False means that at least one volume is disallowed if refCategory.AllVolumeAllowed == containerSCC.AllVolumeAllowed { - tnf.ClaimFilePrintf("AllVolumeAllowed = %s - OK", containerSCC.AllVolumeAllowed) + log.Debug("AllVolumeAllowed = %s - OK", containerSCC.AllVolumeAllowed) } else { result = false - tnf.ClaimFilePrintf("AllVolumeAllowed = %s but expected >=<=%s - NOK", containerSCC.AllVolumeAllowed, refCategory.AllVolumeAllowed) + log.Debug("AllVolumeAllowed = %s but expected >=<=%s - NOK", containerSCC.AllVolumeAllowed, refCategory.AllVolumeAllowed) } // RunAsUserPresent reports whether the RunAsUser Field is set to something other than nil as requested by All SCC categories. // True means that the RunAsUser Field is set. @@ -379,17 +379,17 @@ func compareCategory(refCategory, containerSCC *ContainerSCC, id CategoryID) boo // uidRangeMin: 1000 // uidRangeMax: 2000 if refCategory.RunAsUserPresent == containerSCC.RunAsUserPresent { - tnf.ClaimFilePrintf("RunAsUserPresent = %s - OK", containerSCC.RunAsUserPresent) + log.Debug("RunAsUserPresent = %s - OK", containerSCC.RunAsUserPresent) } else { - tnf.ClaimFilePrintf("RunAsUserPresent = %s but expected %s - NOK", containerSCC.RunAsUserPresent, refCategory.RunAsUserPresent) + log.Debug("RunAsUserPresent = %s but expected %s - NOK", containerSCC.RunAsUserPresent, refCategory.RunAsUserPresent) result = false } // RunAsNonRoot is true if the RunAsNonRoot field is set to true, false otherwise. // if setting a range including the roor UID 0 ( for instance 0-2000), then this option can disallow it. if refCategory.RunAsNonRoot >= containerSCC.RunAsNonRoot { - tnf.ClaimFilePrintf("RunAsNonRoot = %s - OK", containerSCC.RunAsNonRoot) + log.Debug("RunAsNonRoot = %s - OK", containerSCC.RunAsNonRoot) } else { - tnf.ClaimFilePrintf("RunAsNonRoot = %s but expected %s - NOK", containerSCC.RunAsNonRoot, refCategory.RunAsNonRoot) + log.Debug("RunAsNonRoot = %s but expected %s - NOK", containerSCC.RunAsNonRoot, refCategory.RunAsNonRoot) result = false } // FsGroupPresent reports whether the FsGroup Field is set to something other than nil as requested by All SCC categories. @@ -403,92 +403,92 @@ func compareCategory(refCategory, containerSCC *ContainerSCC, id CategoryID) boo // - min: 1000900000 // max: 1000900010 if refCategory.FsGroupPresent == containerSCC.FsGroupPresent { - tnf.ClaimFilePrintf("FsGroupPresent = %s - OK", containerSCC.FsGroupPresent) + log.Debug("FsGroupPresent = %s - OK", containerSCC.FsGroupPresent) } else { - tnf.ClaimFilePrintf("FsGroupPresent = %s but expected %s - NOK", containerSCC.FsGroupPresent, refCategory.FsGroupPresent) + log.Debug("FsGroupPresent = %s but expected %s - NOK", containerSCC.FsGroupPresent, refCategory.FsGroupPresent) result = false } // RequiredDropCapabilitiesPresent is true if the drop DropCapabilities field has at least the set of required drop capabilities ( same required set for all categories ). // False means that some required DropCapabilities are missing. if refCategory.RequiredDropCapabilitiesPresent == containerSCC.RequiredDropCapabilitiesPresent { - tnf.ClaimFilePrintf("DropCapabilities list - OK") + log.Debug("DropCapabilities list - OK") } else { - tnf.ClaimFilePrintf("RequiredDropCapabilitiesPresent = %s but expected %s - NOK", containerSCC.RequiredDropCapabilitiesPresent, refCategory.RequiredDropCapabilitiesPresent) - tnf.ClaimFilePrintf("its didnt have all the required (MKNOD, SETUID, SETGID, KILL)/(ALL) drop value ") + log.Debug("RequiredDropCapabilitiesPresent = %s but expected %s - NOK", containerSCC.RequiredDropCapabilitiesPresent, refCategory.RequiredDropCapabilitiesPresent) + log.Debug("its didnt have all the required (MKNOD, SETUID, SETGID, KILL)/(ALL) drop value ") result = false } // HostDirVolumePluginPresent is true if a hostpath volume is configured, false otherwise. // It is a deprecated field and is derived from the volume list currently configured in the container. // see https://docs.openshift.com/container-platform/3.11/admin_guide/manage_scc.html#use-the-hostpath-volume-plugin if refCategory.HostDirVolumePluginPresent == containerSCC.HostDirVolumePluginPresent { - tnf.ClaimFilePrintf("HostDirVolumePluginPresent = %s - OK", containerSCC.HostDirVolumePluginPresent) + log.Debug("HostDirVolumePluginPresent = %s - OK", containerSCC.HostDirVolumePluginPresent) } else { - tnf.ClaimFilePrintf("HostDirVolumePluginPresent = %s but expected %s - NOK", containerSCC.HostDirVolumePluginPresent, refCategory.HostDirVolumePluginPresent) + log.Debug("HostDirVolumePluginPresent = %s but expected %s - NOK", containerSCC.HostDirVolumePluginPresent, refCategory.HostDirVolumePluginPresent) result = false } // HostIPC is true if the HostIPC field is set to true, false otherwise. if refCategory.HostIPC >= containerSCC.HostIPC { - tnf.ClaimFilePrintf("HostIPC = %s - OK", containerSCC.HostIPC) + log.Debug("HostIPC = %s - OK", containerSCC.HostIPC) } else { result = false - tnf.ClaimFilePrintf("HostIPC = %s but expected <= %s - NOK", containerSCC.HostIPC, refCategory.HostIPC) + log.Debug("HostIPC = %s but expected <= %s - NOK", containerSCC.HostIPC, refCategory.HostIPC) } // HostNetwork is true if the HostNetwork field is set to true, false otherwise. if refCategory.HostNetwork >= containerSCC.HostNetwork { - tnf.ClaimFilePrintf("HostNetwork = %s - OK", containerSCC.HostNetwork) + log.Debug("HostNetwork = %s - OK", containerSCC.HostNetwork) } else { result = false - tnf.ClaimFilePrintf("HostNetwork = %s but expected <= %s - NOK", containerSCC.HostNetwork, refCategory.HostNetwork) + log.Debug("HostNetwork = %s but expected <= %s - NOK", containerSCC.HostNetwork, refCategory.HostNetwork) } // HostPID is true if the HostPID field is set to true, false otherwise. if refCategory.HostPID >= containerSCC.HostPID { - tnf.ClaimFilePrintf("HostPID = %s - OK", containerSCC.HostPID) + log.Debug("HostPID = %s - OK", containerSCC.HostPID) } else { result = false - tnf.ClaimFilePrintf("HostPID = %s but expected <= %s - NOK", containerSCC.HostPID, refCategory.HostPID) + log.Debug("HostPID = %s but expected <= %s - NOK", containerSCC.HostPID, refCategory.HostPID) } // HostPorts is true if the HostPorts field is set to true, false otherwise. if refCategory.HostPorts >= containerSCC.HostPorts { - tnf.ClaimFilePrintf("HostPorts = %s - OK", containerSCC.HostPorts) + log.Debug("HostPorts = %s - OK", containerSCC.HostPorts) } else { result = false - tnf.ClaimFilePrintf("HostPorts = %s but expected <= %s - NOK", containerSCC.HostPorts, refCategory.HostPorts) + log.Debug("HostPorts = %s but expected <= %s - NOK", containerSCC.HostPorts, refCategory.HostPorts) } // PrivilegeEscalation is true if the PrivilegeEscalation field is set to true, false otherwise. if refCategory.PrivilegeEscalation >= containerSCC.PrivilegeEscalation { - tnf.ClaimFilePrintf("HostNetwork = %s - OK", containerSCC.HostNetwork) + log.Debug("HostNetwork = %s - OK", containerSCC.HostNetwork) } else { result = false - tnf.ClaimFilePrintf("PrivilegeEscalation = %s but expected <= %s - NOK", containerSCC.PrivilegeEscalation, refCategory.PrivilegeEscalation) + log.Debug("PrivilegeEscalation = %s but expected <= %s - NOK", containerSCC.PrivilegeEscalation, refCategory.PrivilegeEscalation) } // PrivilegedContainer is true if the PrivilegedContainer field is set to true, false otherwise. if refCategory.PrivilegedContainer >= containerSCC.PrivilegedContainer { - tnf.ClaimFilePrintf("PrivilegedContainer = %s - OK", containerSCC.PrivilegedContainer) + log.Debug("PrivilegedContainer = %s - OK", containerSCC.PrivilegedContainer) } else { result = false - tnf.ClaimFilePrintf("PrivilegedContainer = %s but expected <= %s - NOK", containerSCC.PrivilegedContainer, refCategory.PrivilegedContainer) + log.Debug("PrivilegedContainer = %s but expected <= %s - NOK", containerSCC.PrivilegedContainer, refCategory.PrivilegedContainer) } // ReadOnlyRootFilesystem is true if the ReadOnlyRootFilesystem field is set to true, false otherwise. if refCategory.ReadOnlyRootFilesystem >= containerSCC.ReadOnlyRootFilesystem { - tnf.ClaimFilePrintf("ReadOnlyRootFilesystem = %s - OK", containerSCC.ReadOnlyRootFilesystem) + log.Debug("ReadOnlyRootFilesystem = %s - OK", containerSCC.ReadOnlyRootFilesystem) } else { result = false - tnf.ClaimFilePrintf("ReadOnlyRootFilesystem = %s but expected <= %s - NOK", containerSCC.ReadOnlyRootFilesystem, refCategory.ReadOnlyRootFilesystem) + log.Debug("ReadOnlyRootFilesystem = %s but expected <= %s - NOK", containerSCC.ReadOnlyRootFilesystem, refCategory.ReadOnlyRootFilesystem) } // SeLinuxContextPresent is true if the SeLinuxContext field is present and set to a value (e.g. not nil), false otherwise. // An SELinuxContext strategy of MustRunAs with no level set. Admission looks for the openshift.io/sa.scc.mcs annotation to populate the level. if refCategory.SeLinuxContextPresent == containerSCC.SeLinuxContextPresent { - tnf.ClaimFilePrintf("SeLinuxContextPresent is not nil - OK") + log.Debug("SeLinuxContextPresent is not nil - OK") } else { result = false - tnf.ClaimFilePrintf("SeLinuxContextPresent = %s but expected %s expected to be non nil - NOK", containerSCC.SeLinuxContextPresent, refCategory.SeLinuxContextPresent) + log.Debug("SeLinuxContextPresent = %s but expected %s expected to be non nil - NOK", containerSCC.SeLinuxContextPresent, refCategory.SeLinuxContextPresent) } // CapabilitiesCategory indicates the lowest SCC category to which the list of capabilities.add in the container can be mapped to. if refCategory.CapabilitiesCategory != containerSCC.CapabilitiesCategory { result = false - tnf.ClaimFilePrintf("CapabilitiesCategory = %s but expected %s - NOK", containerSCC.CapabilitiesCategory, refCategory.CapabilitiesCategory) + log.Debug("CapabilitiesCategory = %s but expected %s - NOK", containerSCC.CapabilitiesCategory, refCategory.CapabilitiesCategory) } else { - tnf.ClaimFilePrintf("CapabilitiesCategory list is as expected %s - OK", containerSCC.CapabilitiesCategory) + log.Debug("CapabilitiesCategory list is as expected %s - OK", containerSCC.CapabilitiesCategory) } return result } diff --git a/cnf-certification-test/accesscontrol/suite.go b/cnf-certification-test/accesscontrol/suite.go index b56c75cb8..4c203438f 100644 --- a/cnf-certification-test/accesscontrol/suite.go +++ b/cnf-certification-test/accesscontrol/suite.go @@ -17,11 +17,9 @@ package accesscontrol import ( - "fmt" "strconv" "strings" - "github.com/sirupsen/logrus" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/accesscontrol/namespace" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/accesscontrol/rbac" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/accesscontrol/resources" @@ -32,11 +30,11 @@ import ( "github.com/test-network-function/cnf-certification-test/cnf-certification-test/networking/services" "github.com/test-network-function/cnf-certification-test/internal/clientsholder" "github.com/test-network-function/cnf-certification-test/internal/crclient" + "github.com/test-network-function/cnf-certification-test/internal/log" "github.com/test-network-function/cnf-certification-test/pkg/checksdb" "github.com/test-network-function/cnf-certification-test/pkg/provider" "github.com/test-network-function/cnf-certification-test/pkg/stringhelper" "github.com/test-network-function/cnf-certification-test/pkg/testhelper" - "github.com/test-network-function/cnf-certification-test/pkg/tnf" rbacv1 "k8s.io/api/rbac/v1" ) @@ -58,7 +56,7 @@ var ( env provider.TestEnvironment beforeEachFn = func(check *checksdb.Check) error { - logrus.Infof("Check %s: getting test environment.", check.ID) + check.LogInfo("Check %s: getting test environment.", check.ID) env = provider.GetTestEnvironment() return nil } @@ -66,7 +64,7 @@ var ( //nolint:funlen func LoadChecks() { - logrus.Debugf("Entering %s suite", common.AccessControlTestKey) + log.Debug("Loading %s checks", common.AccessControlTestKey) checksGroup := checksdb.NewChecksGroup(common.AccessControlTestKey). WithBeforeEachFn(beforeEachFn) @@ -288,7 +286,7 @@ func LoadChecks() { })) } -func checkForbiddenCapability(containers []*provider.Container, capability string) (compliantObjects, nonCompliantObjects []*testhelper.ReportObject) { +func checkForbiddenCapability(check *checksdb.Check, containers []*provider.Container, capability string) (compliantObjects, nonCompliantObjects []*testhelper.ReportObject) { for _, cut := range containers { compliant := true @@ -302,7 +300,7 @@ func checkForbiddenCapability(containers []*provider.Container, capability strin if compliant { compliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, "No forbidden capability "+capability+" detected in container", true)) } else { - tnf.ClaimFilePrintf("Non compliant %s capability detected in container %s. All container caps: %s", capability, cut.String(), cut.SecurityContext.Capabilities.String()) + check.LogDebug("Non compliant %s capability detected in container %s. All container caps: %s", capability, cut.String(), cut.SecurityContext.Capabilities.String()) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, "Non compliant capability "+capability+" in container", false).AddField(testhelper.SCCCapability, capability)) } } @@ -310,27 +308,27 @@ func checkForbiddenCapability(containers []*provider.Container, capability strin } func testSysAdminCapability(check *checksdb.Check, env *provider.TestEnvironment) { - compliantObjects, nonCompliantObjects := checkForbiddenCapability(env.Containers, "SYS_ADMIN") + compliantObjects, nonCompliantObjects := checkForbiddenCapability(check, env.Containers, "SYS_ADMIN") check.SetResult(compliantObjects, nonCompliantObjects) } func testNetAdminCapability(check *checksdb.Check, env *provider.TestEnvironment) { - compliantObjects, nonCompliantObjects := checkForbiddenCapability(env.Containers, "NET_ADMIN") + compliantObjects, nonCompliantObjects := checkForbiddenCapability(check, env.Containers, "NET_ADMIN") check.SetResult(compliantObjects, nonCompliantObjects) } func testNetRawCapability(check *checksdb.Check, env *provider.TestEnvironment) { - compliantObjects, nonCompliantObjects := checkForbiddenCapability(env.Containers, "NET_RAW") + compliantObjects, nonCompliantObjects := checkForbiddenCapability(check, env.Containers, "NET_RAW") check.SetResult(compliantObjects, nonCompliantObjects) } func testIpcLockCapability(check *checksdb.Check, env *provider.TestEnvironment) { - compliantObjects, nonCompliantObjects := checkForbiddenCapability(env.Containers, "IPC_LOCK") + compliantObjects, nonCompliantObjects := checkForbiddenCapability(check, env.Containers, "IPC_LOCK") check.SetResult(compliantObjects, nonCompliantObjects) } func testBpfCapability(check *checksdb.Check, env *provider.TestEnvironment) { - compliantObjects, nonCompliantObjects := checkForbiddenCapability(env.Containers, "BPF") + compliantObjects, nonCompliantObjects := checkForbiddenCapability(check, env.Containers, "BPF") check.SetResult(compliantObjects, nonCompliantObjects) } @@ -340,7 +338,7 @@ func testSecConRootUser(check *checksdb.Check, env *provider.TestEnvironment) { var nonCompliantObjects []*testhelper.ReportObject for _, put := range env.Pods { if put.IsRunAsUserID(0) { - tnf.ClaimFilePrintf("Non compliant run as Root User detected (RunAsUser uid=0) in pod %s", put.Namespace+"."+put.Name) + check.LogDebug("Non compliant run as Root User detected (RunAsUser uid=0) in pod %s", put.Namespace+"."+put.Name) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "Root User detected (RunAsUser uid=0)", false)) } else { compliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "Root User not detected (RunAsUser uid=0)", true)) @@ -369,7 +367,7 @@ func testSecConPrivilegeEscalation(check *checksdb.Check, env *provider.TestEnvi for _, cut := range env.Containers { if cut.SecurityContext != nil && cut.SecurityContext.AllowPrivilegeEscalation != nil { if *(cut.SecurityContext.AllowPrivilegeEscalation) { - tnf.ClaimFilePrintf("AllowPrivilegeEscalation is set to true in container %s.", cut.Podname+"."+cut.Name) + check.LogDebug("AllowPrivilegeEscalation is set to true in container %s.", cut.Podname+"."+cut.Name) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, "AllowPrivilegeEscalation is set to true", false)) } else { compliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, "AllowPrivilegeEscalation is set to false", true)) @@ -387,7 +385,7 @@ func testContainerHostPort(check *checksdb.Check, env *provider.TestEnvironment) for _, cut := range env.Containers { for _, aPort := range cut.Ports { if aPort.HostPort != 0 { - tnf.ClaimFilePrintf("Host port %d is configured in container %s.", aPort.HostPort, cut.String()) + check.LogDebug("Host port %d is configured in container %s.", aPort.HostPort, cut.String()) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, "Host port is configured", false). SetType(testhelper.HostPortType). AddField(testhelper.PortNumber, strconv.Itoa(int(aPort.HostPort)))) @@ -406,7 +404,7 @@ func testPodHostNetwork(check *checksdb.Check, env *provider.TestEnvironment) { var nonCompliantObjects []*testhelper.ReportObject for _, put := range env.Pods { if put.Spec.HostNetwork { - tnf.ClaimFilePrintf("Host network is set to true in pod %s.", put.Namespace+"."+put.Name) + check.LogDebug("Host network is set to true in pod %s.", put.Namespace+"."+put.Name) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "Host network is set to true", false)) } else { compliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "Host network is not set to true", true)) @@ -425,7 +423,7 @@ func testPodHostPath(check *checksdb.Check, env *provider.TestEnvironment) { for idx := range put.Spec.Volumes { vol := &put.Spec.Volumes[idx] if vol.HostPath != nil && vol.HostPath.Path != "" { - tnf.ClaimFilePrintf("Hostpath path: %s is set in pod %s.", vol.HostPath.Path, put.Namespace+"."+put.Name) + check.LogDebug("Hostpath path: %s is set in pod %s.", vol.HostPath.Path, put.Namespace+"."+put.Name) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "Hostpath path is set", false). SetType(testhelper.HostPathType). AddField(testhelper.Path, vol.HostPath.Path)) @@ -446,7 +444,7 @@ func testPodHostIPC(check *checksdb.Check, env *provider.TestEnvironment) { var nonCompliantObjects []*testhelper.ReportObject for _, put := range env.Pods { if put.Spec.HostIPC { - tnf.ClaimFilePrintf("HostIpc is set in pod %s.", put.Namespace+"."+put.Name) + check.LogDebug("HostIpc is set in pod %s.", put.Namespace+"."+put.Name) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "HostIpc is set to true", false)) } else { compliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "HostIpc is not set to true", true)) @@ -462,7 +460,7 @@ func testPodHostPID(check *checksdb.Check, env *provider.TestEnvironment) { var nonCompliantObjects []*testhelper.ReportObject for _, put := range env.Pods { if put.Spec.HostPID { - tnf.ClaimFilePrintf("HostPid is set in pod %s.", put.Namespace+"."+put.Name) + check.LogDebug("HostPid is set in pod %s.", put.Namespace+"."+put.Name) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "HostPid is set to true", false)) } else { compliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "HostPid is not set to true", true)) @@ -474,15 +472,15 @@ func testPodHostPID(check *checksdb.Check, env *provider.TestEnvironment) { // Tests namespaces for invalid prefixed and CRs are not defined in namespaces not under test with CRDs under test func testNamespace(check *checksdb.Check, env *provider.TestEnvironment) { - tnf.Logf(logrus.InfoLevel, fmt.Sprintf("CNF resources' Namespaces should not have any of the following prefixes: %v", invalidNamespacePrefixes)) + check.LogInfo("CNF resources' Namespaces should not have any of the following prefixes: %v", invalidNamespacePrefixes) var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject for _, namespace := range env.Namespaces { namespaceCompliant := true - tnf.Logf(logrus.InfoLevel, fmt.Sprintf("Checking namespace %s", namespace)) + log.Info("Checking namespace %s", namespace) for _, invalidPrefix := range invalidNamespacePrefixes { if strings.HasPrefix(namespace, invalidPrefix) { - tnf.ClaimFilePrintf("Namespace %s has invalid prefix %s", namespace, invalidPrefix) + check.LogDebug("Namespace %s has invalid prefix %s", namespace, invalidPrefix) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewNamespacedReportObject("Namespace has invalid prefix", testhelper.Namespace, false, namespace)) namespaceCompliant = false break // Break out of the loop if we find an invalid prefix @@ -495,17 +493,17 @@ func testNamespace(check *checksdb.Check, env *provider.TestEnvironment) { if failedNamespacesNum := len(nonCompliantObjects); failedNamespacesNum > 0 { check.SetResult(compliantObjects, nonCompliantObjects) } - tnf.Logf(logrus.InfoLevel, fmt.Sprintf("CNF pods should belong to any of the configured Namespaces: %v", env.Namespaces)) - tnf.Logf(logrus.InfoLevel, fmt.Sprintf("CRs from autodiscovered CRDs should belong only to the configured Namespaces: %v", env.Namespaces)) + check.LogInfo("CNF pods should belong to any of the configured Namespaces: %v", env.Namespaces) + check.LogInfo("CRs from autodiscovered CRDs should belong only to the configured Namespaces: %v", env.Namespaces) invalidCrs, err := namespace.TestCrsNamespaces(env.Crds, env.Namespaces) if err != nil { - tnf.Logf(logrus.ErrorLevel, fmt.Sprintf("Error while testing CRs namespaces: %v", err)) + check.LogError("Error while testing CRs namespaces: %v", err) return } invalidCrsNum, claimsLog := namespace.GetInvalidCRsNum(invalidCrs) if invalidCrsNum > 0 && len(claimsLog.GetLogLines()) > 0 { - tnf.ClaimFilePrintf("%s", claimsLog.GetLogLines()) + check.LogDebug("%s", claimsLog.GetLogLines()) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewReportObject("CRs are not in the configured namespaces", testhelper.Namespace, false)) } else { compliantObjects = append(compliantObjects, testhelper.NewReportObject("CRs are in the configured namespaces", testhelper.Namespace, true)) @@ -515,13 +513,13 @@ func testNamespace(check *checksdb.Check, env *provider.TestEnvironment) { // testPodServiceAccount verifies that the pod utilizes a valid service account func testPodServiceAccount(check *checksdb.Check, env *provider.TestEnvironment) { - tnf.Logf(logrus.InfoLevel, "Tests that each pod utilizes a valid service account") + check.LogInfo("Tests that each pod utilizes a valid service account") var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject for _, put := range env.Pods { - tnf.Logf(logrus.InfoLevel, fmt.Sprintf("Testing service account for pod %s (ns: %s)", put.Name, put.Namespace)) + check.LogInfo("Testing service account for pod %s (ns: %s)", put.Name, put.Namespace) if put.Spec.ServiceAccountName == defaultServiceAccount { - tnf.ClaimFilePrintf("Pod %s (ns: %s) does not have a valid service account name.", put.Name, put.Namespace) + check.LogDebug("Pod %s (ns: %s) does not have a valid service account name.", put.Name, put.Namespace) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "Pod does not have a valid service account name", false)) } else { compliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "Pod has a service account name", true)) @@ -535,22 +533,22 @@ func testPodServiceAccount(check *checksdb.Check, env *provider.TestEnvironment) // //nolint:funlen func testPodRoleBindings(check *checksdb.Check, env *provider.TestEnvironment) { - tnf.Logf(logrus.InfoLevel, "Should not have RoleBinding in other namespaces") + check.LogInfo("Should not have RoleBinding in other namespaces") var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject for _, put := range env.Pods { podIsCompliant := true - tnf.Logf(logrus.InfoLevel, fmt.Sprintf("Testing role binding for pod: %s namespace: %s", put.Name, put.Namespace)) + check.LogInfo("Testing role binding for pod: %s namespace: %s", put.Name, put.Namespace) if put.Pod.Spec.ServiceAccountName == defaultServiceAccount { - logrus.Infof("%s has an empty or default serviceAccountName, skipping.", put.String()) + log.Info("%s has an empty or default serviceAccountName, skipping.", put.String()) // Add the pod to the non-compliant list nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "The serviceAccountName is either empty or default", false)) podIsCompliant = false } else { - logrus.Infof("%s has a serviceAccountName: %s, checking role bindings.", put.String(), put.Spec.ServiceAccountName) + log.Info("%s has a serviceAccountName: %s, checking role bindings.", put.String(), put.Spec.ServiceAccountName) // Loop through the rolebindings and check if they are from another namespace for rbIndex := range env.RoleBindings { // Short circuit if the role binding and the pod are in the same namespace. @@ -573,7 +571,7 @@ func testPodRoleBindings(check *checksdb.Check, env *provider.TestEnvironment) { if subject.Kind == rbacv1.ServiceAccountKind && subject.Namespace == put.Namespace && subject.Name == put.Spec.ServiceAccountName { - tnf.Logf(logrus.WarnLevel, "Pod: %s has the following role bindings that do not live in one of the CNF namespaces: %s", put, env.RoleBindings[rbIndex].Name) + check.LogWarn("Pod: %s has the following role bindings that do not live in one of the CNF namespaces: %s", put, env.RoleBindings[rbIndex].Name) // Add the pod to the non-compliant list nonCompliantObjects = append(nonCompliantObjects, @@ -607,25 +605,25 @@ func testPodRoleBindings(check *checksdb.Check, env *provider.TestEnvironment) { // //nolint:dupl func testPodClusterRoleBindings(check *checksdb.Check, env *provider.TestEnvironment) { - tnf.Logf(logrus.InfoLevel, "Pods should not have ClusterRoleBindings") + check.LogInfo("Pods should not have ClusterRoleBindings") var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject - logrus.Infof("There were %d cluster role bindings found in the cluster.", len(env.ClusterRoleBindings)) + log.Info("There were %d cluster role bindings found in the cluster.", len(env.ClusterRoleBindings)) for _, put := range env.Pods { podIsCompliant := true - tnf.Logf(logrus.InfoLevel, fmt.Sprintf("Testing cluster role binding for pod: %s namespace: %s", put.Name, put.Namespace)) + check.LogInfo("Testing cluster role binding for pod: %s namespace: %s", put.Name, put.Namespace) result, roleRefName, err := put.IsUsingClusterRoleBinding(env.ClusterRoleBindings) if err != nil { - logrus.Errorf("failed to determine if pod %s/%s is using a cluster role binding: %v", put.Namespace, put.Name, err) + log.Error("failed to determine if pod %s/%s is using a cluster role binding: %v", put.Namespace, put.Name, err) podIsCompliant = false } // Pod was found to be using a cluster role binding. This is not allowed. // Flagging this pod as a failed pod. if result { - tnf.Logf(logrus.WarnLevel, "%s is using a cluster role binding", put.String()) + check.LogWarn("%s is using a cluster role binding", put.String()) podIsCompliant = false } @@ -640,15 +638,15 @@ func testPodClusterRoleBindings(check *checksdb.Check, env *provider.TestEnviron } func testAutomountServiceToken(check *checksdb.Check, env *provider.TestEnvironment) { - tnf.Logf(logrus.InfoLevel, "Should have automountServiceAccountToken set to false") + check.LogInfo("Should have automountServiceAccountToken set to false") msg := []string{} var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject for _, put := range env.Pods { - tnf.Logf(logrus.InfoLevel, fmt.Sprintf("check the existence of pod service account %s (ns= %s )", put.Namespace, put.Name)) + check.LogInfo("check the existence of pod service account %s (ns= %s )", put.Namespace, put.Name) if put.Spec.ServiceAccountName == defaultServiceAccount { - tnf.ClaimFilePrintf("Pod %s has been found with default service account name.", put.Name) + check.LogDebug("Pod %s has been found with default service account name.", put.Name) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "Pod has been found with default service account name", false)) break } @@ -664,7 +662,7 @@ func testAutomountServiceToken(check *checksdb.Check, env *provider.TestEnvironm } if len(msg) > 0 { - tnf.ClaimFilePrintf(strings.Join(msg, "")) + check.LogDebug(strings.Join(msg, "")) } check.SetResult(compliantObjects, nonCompliantObjects) @@ -681,25 +679,25 @@ func testOneProcessPerContainer(check *checksdb.Check, env *provider.TestEnviron } debugPod := env.DebugPods[cut.NodeName] if debugPod == nil { - tnf.Logf(logrus.ErrorLevel, "Debug pod not found for node %s", cut.NodeName) + check.LogError("Debug pod not found for node %s", cut.NodeName) return } ocpContext := clientsholder.NewContext(debugPod.Namespace, debugPod.Name, debugPod.Spec.Containers[0].Name) pid, err := crclient.GetPidFromContainer(cut, ocpContext) if err != nil { - tnf.ClaimFilePrintf("Could not get PID for: %s, error: %v", cut, err) + check.LogDebug("Could not get PID for: %s, error: %v", cut, err) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, err.Error(), false)) continue } nbProcesses, err := getNbOfProcessesInPidNamespace(ocpContext, pid, clientsholder.GetClientsHolder()) if err != nil { - tnf.ClaimFilePrintf("Could not get number of processes for: %s, error: %v", cut, err) + check.LogDebug("Could not get number of processes for: %s, error: %v", cut, err) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, err.Error(), false)) continue } if nbProcesses > 1 { - tnf.ClaimFilePrintf("%s has more than one process running", cut.String()) + check.LogDebug("%s has more than one process running", cut.String()) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, "Container has more than one process running", false)) } else { compliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, "Container has only one process running", true)) @@ -718,7 +716,7 @@ func testSYSNiceRealtimeCapability(check *checksdb.Check, env *provider.TestEnvi for _, cut := range env.Containers { n := env.Nodes[cut.NodeName] if n.IsRTKernel() && !strings.Contains(cut.SecurityContext.Capabilities.String(), "SYS_NICE") { - tnf.ClaimFilePrintf("%s has been found running on a realtime kernel enabled node without SYS_NICE capability.", cut.String()) + check.LogDebug("%s has been found running on a realtime kernel enabled node without SYS_NICE capability.", cut.String()) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, "Container is running on a realtime kernel enabled node without SYS_NICE capability", false)) } else { compliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, "Container is not running on a realtime kernel enabled node", true)) @@ -745,7 +743,7 @@ func testSysPtraceCapability(check *checksdb.Check, env *provider.TestEnvironmen } } if !sysPtraceEnabled { - tnf.ClaimFilePrintf("Pod %s has process namespace sharing enabled but no container allowing the SYS_PTRACE capability.", put.String()) + check.LogDebug("Pod %s has process namespace sharing enabled but no container allowing the SYS_PTRACE capability.", put.String()) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "Pod has process namespace sharing enabled but no container allowing the SYS_PTRACE capability", false)) } else { compliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "Pod has process namespace sharing enabled and at least one container allowing the SYS_PTRACE capability", true)) @@ -757,7 +755,7 @@ func testSysPtraceCapability(check *checksdb.Check, env *provider.TestEnvironmen func testNamespaceResourceQuota(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject - tnf.Logf(logrus.InfoLevel, "Testing namespace resource quotas") + check.LogInfo("Testing namespace resource quotas") for _, put := range env.Pods { // Look through all of the pods and compare their namespace to any potential @@ -774,7 +772,7 @@ func testNamespaceResourceQuota(check *checksdb.Check, env *provider.TestEnviron } if !foundPodNamespaceRQ { - tnf.ClaimFilePrintf("Pod %s is running in a namespace that does not have a ResourceQuota applied.", put.String()) + check.LogDebug("Pod %s is running in a namespace that does not have a ResourceQuota applied.", put.String()) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "Pod is running in a namespace that does not have a ResourceQuota applied", false)) } else { compliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "Pod is running in a namespace that has a ResourceQuota applied", true)) @@ -788,15 +786,6 @@ const ( sshServicePortProtocol = "TCP" ) -func findSSHPort(cut *provider.Container) (port string, isError bool) { - port, err := netutil.GetSSHDaemonPort(cut) - if err != nil { - isError = true - logrus.Errorf("error occurred while finding ssh port on %s, err %v", cut, err) - } - return port, isError -} - func testNoSSHDaemonsAllowed(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -805,8 +794,9 @@ func testNoSSHDaemonsAllowed(check *checksdb.Check, env *provider.TestEnvironmen cut := put.Containers[0] // 1. Find SSH port - port, foundError := findSSHPort(cut) - if foundError { + port, err := netutil.GetSSHDaemonPort(cut) + if err != nil { + check.LogError("could not get ssh daemon port on %s, err: %v", cut, err) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "Failed to get the ssh port for pod", false)) continue } @@ -818,7 +808,7 @@ func testNoSSHDaemonsAllowed(check *checksdb.Check, env *provider.TestEnvironmen sshServicePortNumber, err := strconv.Atoi(port) if err != nil { - logrus.Errorf("error occurred while converting port %s from string to integer on %s", port, cut) + log.Error("error occurred while converting port %s from string to integer on %s", port, cut) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "Failed to get the listening ports for pod", false)) continue } @@ -827,13 +817,13 @@ func testNoSSHDaemonsAllowed(check *checksdb.Check, env *provider.TestEnvironmen sshPortInfo := netutil.PortInfo{PortNumber: sshServicePortNumber, Protocol: sshServicePortProtocol} listeningPorts, err := netutil.GetListeningPorts(cut) if err != nil { - tnf.ClaimFilePrintf("Failed to get the listening ports on %s, err: %v", cut, err) + check.LogDebug("Failed to get the listening ports on %s, err: %v", cut, err) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "Failed to get the listening ports for pod", false)) continue } if _, ok := listeningPorts[sshPortInfo]; ok { - tnf.ClaimFilePrintf("Pod %s is running an SSH daemon", put) + check.LogDebug("Pod %s is running an SSH daemon", put) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "Pod is running an SSH daemon", false)) } else { compliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "Pod is not running an SSH daemon", true)) @@ -846,7 +836,7 @@ func testNoSSHDaemonsAllowed(check *checksdb.Check, env *provider.TestEnvironmen func testPodRequestsAndLimits(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject - tnf.Logf(logrus.InfoLevel, "Testing container resource requests and limits") + check.LogInfo("Testing container resource requests and limits") // Loop through the containers, looking for containers that are missing requests or limits. // These need to be defined in order to pass. @@ -866,11 +856,11 @@ func test1337UIDs(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject const leetNum = 1337 - tnf.Logf(logrus.InfoLevel, "Testing pods to ensure none are using UID 1337") + check.LogInfo("Testing pods to ensure none are using UID 1337") for _, put := range env.Pods { - tnf.Logf(logrus.InfoLevel, fmt.Sprintf("checking if pod %s has a securityContext RunAsUser 1337 (ns= %s)", put.Name, put.Namespace)) + check.LogInfo("checking if pod %s has a securityContext RunAsUser 1337 (ns= %s)", put.Name, put.Namespace) if put.IsRunAsUserID(leetNum) { - tnf.ClaimFilePrintf("Pod: %s/%s is found to use securityContext RunAsUser 1337", put.Namespace, put.Name) + check.LogDebug("Pod: %s/%s is found to use securityContext RunAsUser 1337", put.Namespace, put.Name) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "Pod is using securityContext RunAsUser 1337", false)) } else { compliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "Pod is not using securityContext RunAsUser 1337", true)) @@ -915,10 +905,10 @@ func testNodePort(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject for _, s := range env.Services { - tnf.Logf(logrus.InfoLevel, fmt.Sprintf("Testing %s", services.ToString(s))) + check.LogInfo("Testing %s", services.ToString(s)) if s.Spec.Type == nodePort { - tnf.ClaimFilePrintf("FAILURE: Service %s (ns %s) type is nodePort", s.Name, s.Namespace) + check.LogDebug("FAILURE: Service %s (ns %s) type is nodePort", s.Name, s.Namespace) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewReportObject("Service is type NodePort", testhelper.ServiceType, false). AddField(testhelper.Namespace, s.Namespace). AddField(testhelper.ServiceName, s.Name). @@ -972,7 +962,7 @@ func testCrdRoles(check *checksdb.Check, env *provider.TestEnvironment) { } } if len(nonCompliantObjects) == 0 && len(compliantObjects) == 0 { - tnf.Logf(logrus.InfoLevel, "No role contains rules that apply to at least one CRD under test") + check.LogInfo("No role contains rules that apply to at least one CRD under test") return } check.SetResult(compliantObjects, nonCompliantObjects) diff --git a/cnf-certification-test/certification/suite.go b/cnf-certification-test/certification/suite.go index 87c368773..53c1540b3 100644 --- a/cnf-certification-test/certification/suite.go +++ b/cnf-certification-test/certification/suite.go @@ -23,15 +23,14 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/sirupsen/logrus" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/common" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/identifiers" "github.com/test-network-function/cnf-certification-test/internal/clientsholder" + "github.com/test-network-function/cnf-certification-test/internal/log" "github.com/test-network-function/cnf-certification-test/pkg/checksdb" "github.com/test-network-function/cnf-certification-test/pkg/provider" "github.com/test-network-function/cnf-certification-test/pkg/testhelper" - "github.com/test-network-function/cnf-certification-test/pkg/tnf" "github.com/test-network-function/oct/pkg/certdb" ) @@ -46,7 +45,7 @@ var ( validator certdb.CertificationStatusValidator beforeEachFn = func(check *checksdb.Check) error { - logrus.Infof("Check %s: getting test environment and certdb validator.", check.ID) + check.LogInfo("Check %s: getting test environment and certdb validator.", check.ID) env = provider.GetTestEnvironment() var err error @@ -76,7 +75,7 @@ var ( ) func LoadChecks() { - logrus.Debugf("Entering %s suite", common.AffiliatedCertTestKey) + log.Debug("Loading %s checks", common.AffiliatedCertTestKey) checksGroup := checksdb.NewChecksGroup(common.AffiliatedCertTestKey). WithBeforeEachFn(beforeEachFn) @@ -128,16 +127,12 @@ func getContainersToQuery(env *provider.TestEnvironment) map[provider.ContainerI } func testContainerCertification(c provider.ContainerImageIdentifier, validator certdb.CertificationStatusValidator) bool { - ans := validator.IsContainerCertified(c.Registry, c.Repository, c.Tag, c.Digest) - if !ans { - tnf.ClaimFilePrintf("%s/%s:%s is not listed in certified containers", c.Registry, c.Repository, c.Tag) - } - return ans + return validator.IsContainerCertified(c.Registry, c.Repository, c.Tag, c.Digest) } func testAllOperatorCertified(check *checksdb.Check, env *provider.TestEnvironment, validator certdb.CertificationStatusValidator) { operatorsUnderTest := env.Operators - tnf.Logf(logrus.InfoLevel, "Verify operator as certified. Number of operators to check: %d", len(operatorsUnderTest)) + check.LogInfo("Verify operator as certified. Number of operators to check: %d", len(operatorsUnderTest)) var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -154,12 +149,12 @@ func testAllOperatorCertified(check *checksdb.Check, env *provider.TestEnvironme channel := operatorsUnderTest[i].Channel isCertified := validator.IsOperatorCertified(name, ocpMinorVersion, channel) if !isCertified { - tnf.Logf(logrus.InfoLevel, "Operator %s (channel %s) failed to be certified for OpenShift %s", name, channel, ocpMinorVersion) + check.LogInfo("Operator %s (channel %s) failed to be certified for OpenShift %s", name, channel, ocpMinorVersion) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(operatorsUnderTest[i].Namespace, operatorsUnderTest[i].Name, "Operator failed to be certified for OpenShift", false). AddField(testhelper.OCPVersion, ocpMinorVersion). AddField(testhelper.OCPChannel, channel)) } else { - logrus.Infof("Operator %s (channel %s) certified OK.", name, channel) + log.Info("Operator %s (channel %s) certified OK.", name, channel) compliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(operatorsUnderTest[i].Namespace, operatorsUnderTest[i].Name, "Operator certified OK", true). AddField(testhelper.OCPVersion, ocpMinorVersion). AddField(testhelper.OCPChannel, channel)) @@ -180,9 +175,9 @@ func testHelmCertified(check *checksdb.Check, env *provider.TestEnvironment, val nonCompliantObjects = append(nonCompliantObjects, testhelper.NewHelmChartReportObject(helm.Namespace, helm.Name, "helm chart is not certified", false). SetType(testhelper.HelmVersionType). AddField(testhelper.Version, helm.Chart.Metadata.Version)) - tnf.ClaimFilePrintf("Helm Chart %s version %s is not certified.", helm.Name, helm.Chart.Metadata.Version) + check.LogDebug("Helm Chart %s version %s is not certified.", helm.Name, helm.Chart.Metadata.Version) } else { - logrus.Infof("Helm Chart %s version %s is certified.", helm.Name, helm.Chart.Metadata.Version) + log.Info("Helm Chart %s version %s is certified.", helm.Name, helm.Chart.Metadata.Version) compliantObjects = append(compliantObjects, testhelper.NewHelmChartReportObject(helm.Namespace, helm.Name, "helm chart is certified", true). SetType(testhelper.HelmVersionType). AddField(testhelper.Version, helm.Chart.Metadata.Version)) @@ -198,13 +193,15 @@ func testContainerCertificationStatusByDigest(check *checksdb.Check, env *provid for _, c := range env.Containers { switch { case c.ContainerImageIdentifier.Digest == "": - tnf.ClaimFilePrintf("%s is missing digest field, failing validation (repo=%s image=%s digest=%s)", c, c.ContainerImageIdentifier.Registry, c.ContainerImageIdentifier.Repository, c.ContainerImageIdentifier.Digest) + check.LogDebug("%s is missing digest field, failing validation (repo=%s image=%s digest=%s)", c, c.ContainerImageIdentifier.Registry, c.ContainerImageIdentifier.Repository, c.ContainerImageIdentifier.Digest) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(c.Namespace, c.Podname, c.Name, "Missing digest field", false). AddField(testhelper.Repository, c.ContainerImageIdentifier.Registry). AddField(testhelper.ImageName, c.ContainerImageIdentifier.Repository). AddField(testhelper.ImageDigest, c.ContainerImageIdentifier.Digest)) case !testContainerCertification(c.ContainerImageIdentifier, validator): - tnf.ClaimFilePrintf("%s digest not found in database, failing validation (repo=%s image=%s digest=%s)", c, c.ContainerImageIdentifier.Registry, c.ContainerImageIdentifier.Repository, c.ContainerImageIdentifier.Digest) + check.LogDebug("%s digest not found in database, failing validation (repo=%s image=%s tag=%s digest=%s)", c, + c.ContainerImageIdentifier.Registry, c.ContainerImageIdentifier.Repository, + c.ContainerImageIdentifier.Tag, c.ContainerImageIdentifier.Digest) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(c.Namespace, c.Podname, c.Name, "Digest not found in database", false). AddField(testhelper.Repository, c.ContainerImageIdentifier.Registry). AddField(testhelper.ImageName, c.ContainerImageIdentifier.Repository). @@ -231,7 +228,7 @@ func testHelmVersion(check *checksdb.Check) error { } if len(podList.Items) == 0 { - tnf.ClaimFilePrintf("Tiller pod not found in any namespaces. Helm version is v3.") + check.LogDebug("Tiller pod not found in any namespaces. Helm version is v3.") for _, helm := range env.HelmChartReleases { compliantObjects = append(compliantObjects, testhelper.NewHelmChartReportObject(helm.Namespace, helm.Name, "helm chart was installed with helm v3", true)) } @@ -239,7 +236,7 @@ func testHelmVersion(check *checksdb.Check) error { return nil } - tnf.ClaimFilePrintf("Tiller pod found, helm version is v2.") + check.LogDebug("Tiller pod found, helm version is v2.") for i := range podList.Items { nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(podList.Items[i].Namespace, podList.Items[i].Name, "This pod is a Tiller pod. Helm Chart version is v2 but needs to be v3 due to the security risks associated with Tiller", false)) diff --git a/cnf-certification-test/chaostesting/pod_delete/pod_delete.go b/cnf-certification-test/chaostesting/pod_delete/pod_delete.go index afc34893c..2cd87ac33 100644 --- a/cnf-certification-test/chaostesting/pod_delete/pod_delete.go +++ b/cnf-certification-test/chaostesting/pod_delete/pod_delete.go @@ -30,8 +30,8 @@ import ( "github.com/test-network-function/cnf-certification-test/pkg/provider" "github.com/test-network-function/cnf-certification-test/pkg/tnf" - "github.com/sirupsen/logrus" "github.com/test-network-function/cnf-certification-test/internal/clientsholder" + "github.com/test-network-function/cnf-certification-test/internal/log" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -76,17 +76,17 @@ func GetLabelDeploymentValue(env *provider.TestEnvironment, labelsMap map[string func ApplyAndCreatePodDeleteResources(appLabel, appKind, namespace string) error { // create the chaos experiment resource if err := applyAndCreateFile(appLabel, appKind, namespace, experimentFile); err != nil { - logrus.Errorf("cant create the experiment of the test: %s", err) + log.Error("cant create the experiment of the test: %s", err) return err } // create the chaos serviceAccount resource if err := applyAndCreateFile(appLabel, appKind, namespace, serviceAccountFile); err != nil { - logrus.Errorf("cant create the serviceAccount of the test: %s", err) + log.Error("cant create the serviceAccount of the test: %s", err) return err } // create the chaos chaosEngine resource if err := applyAndCreateFile(appLabel, appKind, namespace, chaosEngineFile); err != nil { - logrus.Errorf("cant create the chaosEngine of the test: %s", err) + log.Error("cant create the chaosEngine of the test: %s", err) return err } return nil @@ -95,11 +95,11 @@ func ApplyAndCreatePodDeleteResources(appLabel, appKind, namespace string) error func applyAndCreateFile(appLabel, appKind, namespace, filename string) error { fileDecoder, err := applyTemplate(appLabel, appKind, namespace, filename) if err != nil { - logrus.Errorf("cant create the decoderfile of the test: %s", err) + log.Error("cant create the decoderfile of the test: %s", err) return err } if err = createResource(fileDecoder); err != nil { - logrus.Errorf("%s error create the resources for the test.", err) + log.Error("%s error create the resources for the test.", err) return err } return nil @@ -114,25 +114,25 @@ func DeleteAllResources(namespace string) { } gvr := schema.GroupVersionResource{Group: "litmuschaos.io", Version: "v1alpha1", Resource: "chaosengines"} if err := oc.DynamicClient.Resource(gvr).Namespace(namespace).Delete(context.TODO(), "engine-test", deleteOptions); err != nil { - logrus.Errorf("error while removing the chaos engine resources %s", err) + log.Error("error while removing the chaos engine resources %s", err) } err := oc.K8sClient.CoreV1().ServiceAccounts(namespace).Delete(context.TODO(), "test-sa", deleteOptions) if err != nil { - logrus.Errorf("error while removing the ServiceAccounts resources %s", err) + log.Error("error while removing the ServiceAccounts resources %s", err) } if err = oc.K8sClient.RbacV1().Roles(namespace).Delete(context.TODO(), "test-sa", deleteOptions); err != nil { - logrus.Errorf("error while removing the chaos engine resources %s", err) + log.Error("error while removing the chaos engine resources %s", err) } if err = oc.K8sClient.RbacV1().RoleBindings(namespace).Delete(context.TODO(), "test-sa", deleteOptions); err != nil { - logrus.Errorf("error while removing the chaos engine resources %s", err) + log.Error("error while removing the chaos engine resources %s", err) } gvr = schema.GroupVersionResource{Group: "litmuschaos.io", Version: "v1alpha1", Resource: "chaosexperiments"} if err := oc.DynamicClient.Resource(gvr).Namespace(namespace).Delete(context.TODO(), chaosTestName, deleteOptions); err != nil { - logrus.Errorf("error while removing the chaos engine resources %s", err) + log.Error("error while removing the chaos engine resources %s", err) } gvr = schema.GroupVersionResource{Group: "litmuschaos.io", Version: "v1alpha1", Resource: "chaosresults"} if err := oc.DynamicClient.Resource(gvr).Namespace(namespace).Delete(context.TODO(), chaosresultName, deleteOptions); err != nil { - logrus.Errorf("error while removing the chaos results resources %s", err) + log.Error("error while removing the chaos results resources %s", err) } } @@ -144,7 +144,7 @@ func applyTemplate(appLabel, appKind, namespace, filename string) (*yamlutil.YAM vars["APP_KIND"] = appKind output, err := fillTemplate(filename, vars) if err != nil { - logrus.Errorf("error while executing the template to the yaml file %s", err) + log.Error("error while executing the template to the yaml file %s", err) return nil, err } const oneh = 100 @@ -156,13 +156,13 @@ func fillTemplate(file string, values map[string]interface{}) ([]byte, error) { // parse the template tmpl, err := template.ParseFiles(file) if err != nil { - logrus.Errorf("error while parsing the yaml file: %s error: %v", file, err) + log.Error("error while parsing the yaml file: %s error: %v", file, err) return nil, err } var buffer bytes.Buffer writer := bufio.NewWriter(&buffer) if err := tmpl.Execute(writer, values); err != nil { - logrus.Errorf("error while executing the template to the yaml file: %s error: %v", file, err) + log.Error("error while executing the template to the yaml file: %s error: %v", file, err) return nil, err } writer.Flush() // write to the buffer @@ -192,11 +192,11 @@ func IsChaosResultVerdictPass() bool { gvr := schema.GroupVersionResource{Group: "litmuschaos.io", Version: "v1alpha1", Resource: "chaosresults"} crs, err := oc.DynamicClient.Resource(gvr).List(context.TODO(), metav1.ListOptions{}) if err != nil { - logrus.Errorf("error getting : %v\n", err) + log.Error("error getting : %v\n", err) return false } if len(crs.Items) > 1 { - logrus.Errorf("There are currently %d chaosresults resources. That is incorrect behavior.\n", len(crs.Items)) + log.Error("There are currently %d chaosresults resources. That is incorrect behavior.\n", len(crs.Items)) return false } cr := crs.Items[0] @@ -207,7 +207,7 @@ func IsChaosResultVerdictPass() bool { if verdictValue == pass { return true } - tnf.Logf(logrus.WarnLevel, "test completed but it failed with reason %s", failResult.(string)) + log.Warn("test completed but it failed with reason %s", failResult.(string)) return false } return false @@ -218,7 +218,7 @@ func waitForResult() bool { gvr := schema.GroupVersionResource{Group: "litmuschaos.io", Version: "v1alpha1", Resource: "chaosengines"} crs, err := oc.DynamicClient.Resource(gvr).List(context.TODO(), metav1.ListOptions{}) if err != nil { - logrus.Errorf("error getting : %v\n", err) + log.Error("error getting : %v\n", err) return false } @@ -227,7 +227,7 @@ func waitForResult() bool { func parseLitmusResult(crs *unstructured.UnstructuredList) bool { if len(crs.Items) > 1 { - logrus.Errorf("There are currently %d chaosengine resources. That is incorrect behavior.\n", len(crs.Items)) + log.Error("There are currently %d chaosengine resources. That is incorrect behavior.\n", len(crs.Items)) return false } cr := crs.Items[0] diff --git a/cnf-certification-test/chaostesting/suite.go b/cnf-certification-test/chaostesting/suite.go index 8f5b8a2dc..8a3b42d40 100644 --- a/cnf-certification-test/chaostesting/suite.go +++ b/cnf-certification-test/chaostesting/suite.go @@ -8,10 +8,10 @@ import ( "github.com/test-network-function/cnf-certification-test/pkg/provider" - "github.com/sirupsen/logrus" poddelete "github.com/test-network-function/cnf-certification-test/cnf-certification-test/chaostesting/pod_delete" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/identifiers" + "github.com/test-network-function/cnf-certification-test/internal/log" "github.com/onsi/ginkgo/v2" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/common" @@ -25,7 +25,7 @@ const ( ) var _ = ginkgo.Describe(common.ChaosTesting, func() { - logrus.Debugf("Entering %s suite", common.ChaosTesting) + log.Debug("Entering %s suite", common.ChaosTesting) var env provider.TestEnvironment ginkgo.BeforeEach(func() { @@ -53,7 +53,7 @@ func testPodDelete(env *provider.TestEnvironment) { var label string var err error if label, err = poddelete.GetLabelDeploymentValue(env, dep.Spec.Template.Labels); err != nil { - logrus.Errorf("did not find a match label for the deployment %s ", dep.ToString()) + log.Error("did not find a match label for the deployment %s ", dep.ToString()) ginkgo.Fail(fmt.Sprintf("There is no label for the deployment %s ", dep.ToString())) } if err := poddelete.ApplyAndCreatePodDeleteResources(label, deployment, dep.Namespace); err != nil { @@ -61,13 +61,13 @@ func testPodDelete(env *provider.TestEnvironment) { } if completed := poddelete.WaitForTestFinish(testCaseTimeout); !completed { poddelete.DeleteAllResources(dep.Namespace) - logrus.Errorf("deployment %s timed-out the litmus test", dep.ToString()) + log.Error("deployment %s timed-out the litmus test", dep.ToString()) ginkgo.Fail(fmt.Sprintf("deployment %s timed-out the litmus test", dep.ToString())) } if result := poddelete.IsChaosResultVerdictPass(); !result { // delete the chaos engin crd poddelete.DeleteAllResources(dep.Namespace) - logrus.Errorf("deployment %s failed the litmus test", dep.ToString()) + log.Error("deployment %s failed the litmus test", dep.ToString()) ginkgo.Fail(fmt.Sprintf("deployment %s failed the litmus test", dep.ToString())) } poddelete.DeleteAllResources(dep.Namespace) diff --git a/cnf-certification-test/lifecycle/ownerreference/ownerreference.go b/cnf-certification-test/lifecycle/ownerreference/ownerreference.go index 542369f08..5969c3270 100644 --- a/cnf-certification-test/lifecycle/ownerreference/ownerreference.go +++ b/cnf-certification-test/lifecycle/ownerreference/ownerreference.go @@ -17,7 +17,7 @@ package ownerreference import ( - "github.com/sirupsen/logrus" + "github.com/test-network-function/cnf-certification-test/internal/log" "github.com/test-network-function/cnf-certification-test/pkg/testhelper" corev1 "k8s.io/api/core/v1" ) @@ -46,11 +46,11 @@ func NewOwnerReference(put *corev1.Pod) *OwnerReference { // o.result func (o *OwnerReference) RunTest() { for _, k := range o.put.OwnerReferences { - logrus.Traceln("kind is ", k.Kind) + log.Debug("kind is %s", k.Kind) if k.Kind == statefulSet || k.Kind == replicaSet { o.result = testhelper.SUCCESS } else { - logrus.Error("Pod ", o.put.Name, " has owner of type ", k.Kind) + log.Error("Pod %s has owner of type %s", o.put.Name, k.Kind) o.result = testhelper.FAILURE return } diff --git a/cnf-certification-test/lifecycle/podrecreation/podrecreation.go b/cnf-certification-test/lifecycle/podrecreation/podrecreation.go index 7bb18b291..9d38c7820 100644 --- a/cnf-certification-test/lifecycle/podrecreation/podrecreation.go +++ b/cnf-certification-test/lifecycle/podrecreation/podrecreation.go @@ -22,8 +22,8 @@ import ( "sync" "time" - "github.com/sirupsen/logrus" "github.com/test-network-function/cnf-certification-test/internal/clientsholder" + "github.com/test-network-function/cnf-certification-test/internal/log" "github.com/test-network-function/cnf-certification-test/pkg/checksdb" "github.com/test-network-function/cnf-certification-test/pkg/provider" corev1 "k8s.io/api/core/v1" @@ -48,7 +48,7 @@ const ( func CordonHelper(name, operation string) error { clients := clientsholder.GetClientsHolder() - logrus.Infof("Performing %s operation on node %s", operation, name) + log.Info("Performing %s operation on node %s", operation, name) retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { // Fetch node object node, err := clients.K8sClient.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{}) @@ -68,7 +68,7 @@ func CordonHelper(name, operation string) error { return err }) if retryErr != nil { - logrus.Error("can not ", operation, " node: ", name, " error=", retryErr) + log.Error("can not %s node: %s, err=%v", operation, name, retryErr) } return retryErr } @@ -90,7 +90,7 @@ func CountPodsWithDelete(pods []*provider.Pod, nodeName, mode string) (count int } err := deletePod(put.Pod, mode, &wg) if err != nil { - logrus.Errorf("error deleting %s", put) + log.Error("error deleting %s", put) } } } @@ -110,7 +110,7 @@ func skipDaemonPod(pod *corev1.Pod) bool { func deletePod(pod *corev1.Pod, mode string, wg *sync.WaitGroup) error { clients := clientsholder.GetClientsHolder() - logrus.Debugf("deleting ns=%s pod=%s with %s mode", pod.Namespace, pod.Name, mode) + log.Debug("deleting ns=%s pod=%s with %s mode", pod.Namespace, pod.Name, mode) gracePeriodSeconds := *pod.Spec.TerminationGracePeriodSeconds // Create watcher before deleting pod watcher, err := clients.K8sClient.CoreV1().Pods(pod.Namespace).Watch(context.TODO(), metav1.ListOptions{ @@ -124,7 +124,7 @@ func deletePod(pod *corev1.Pod, mode string, wg *sync.WaitGroup) error { GracePeriodSeconds: &gracePeriodSeconds, }) if err != nil { - logrus.Errorf("error deleting %s err: %v", pod.String(), err) + log.Error("error deleting %s err: %v", pod.String(), err) return err } if mode == DeleteBackground { @@ -143,24 +143,24 @@ func deletePod(pod *corev1.Pod, mode string, wg *sync.WaitGroup) error { func CordonCleanup(node string, check *checksdb.Check) { err := CordonHelper(node, Uncordon) if err != nil { - logrus.Errorf("cleanup: error uncordoning the node: %s, err=%s", node, err) + log.Error("cleanup: error uncordoning the node: %s, err=%s", node, err) check.Abort() } } func waitPodDeleted(ns, podName string, timeout int64, watcher watch.Interface) { - logrus.Tracef("Entering waitPodDeleted ns=%s pod=%s", ns, podName) + log.Debug("Entering waitPodDeleted ns=%s pod=%s", ns, podName) defer watcher.Stop() for { select { case event := <-watcher.ResultChan(): if event.Type == watch.Deleted || event.Type == "" { - logrus.Debugf("ns=%s pod=%s deleted", ns, podName) + log.Debug("ns=%s pod=%s deleted", ns, podName) return } case <-time.After(time.Duration(timeout) * time.Second): - logrus.Infof("watch for pod deletion timedout after %d seconds", timeout) + log.Info("watch for pod deletion timedout after %d seconds", timeout) return } } diff --git a/cnf-certification-test/lifecycle/podsets/podsets.go b/cnf-certification-test/lifecycle/podsets/podsets.go index 7169bd35a..c2cffbc5a 100644 --- a/cnf-certification-test/lifecycle/podsets/podsets.go +++ b/cnf-certification-test/lifecycle/podsets/podsets.go @@ -20,8 +20,8 @@ import ( "fmt" "time" - "github.com/sirupsen/logrus" "github.com/test-network-function/cnf-certification-test/internal/clientsholder" + "github.com/test-network-function/cnf-certification-test/internal/log" "github.com/test-network-function/cnf-certification-test/pkg/loghelper" "github.com/test-network-function/cnf-certification-test/pkg/provider" "k8s.io/apimachinery/pkg/runtime/schema" @@ -33,62 +33,62 @@ const ( ) var WaitForDeploymentSetReady = func(ns, name string, timeout time.Duration) bool { - logrus.Trace("check if deployment ", ns, ":", name, " is ready ") + log.Debug("check if deployment %s:%s is ready", ns, name) clients := clientsholder.GetClientsHolder() start := time.Now() for time.Since(start) < timeout { dp, err := provider.GetUpdatedDeployment(clients.K8sClient.AppsV1(), ns, name) if err != nil { - logrus.Errorf("Error while getting deployment %s (ns: %s), err: %v", name, ns, err) + log.Error("Error while getting deployment %s (ns: %s), err: %v", name, ns, err) } else if !dp.IsDeploymentReady() { - logrus.Infof("%s is not ready yet", dp.ToString()) + log.Info("%s is not ready yet", dp.ToString()) } else { - logrus.Tracef("%s is ready!", dp.ToString()) + log.Debug("%s is ready!", dp.ToString()) return true } time.Sleep(time.Second) } - logrus.Error("deployment ", ns, ":", name, " is not ready ") + log.Error("deployment %s:%s is not ready", ns, name) return false } var WaitForScalingToComplete = func(ns, name string, timeout time.Duration, groupResourceSchema schema.GroupResource) bool { - logrus.Trace("check if scale object for crs ", ns, ":", name, " is ready ") + log.Debug("check if scale object for crs %s:%s is ready", ns, name) clients := clientsholder.GetClientsHolder() start := time.Now() for time.Since(start) < timeout { crScale, err := provider.GetUpdatedCrObject(clients.ScalingClient, ns, name, groupResourceSchema) if err != nil { - logrus.Errorf("error while getting the scaling fields %v", err) + log.Error("error while getting the scaling fields %v", err) } else if !crScale.IsScaleObjectReady() { - logrus.Errorf("%s is not ready yet", crScale.ToString()) + log.Error("%s is not ready yet", crScale.ToString()) } else { - logrus.Tracef("%s is ready!", crScale.ToString()) + log.Debug("%s is ready!", crScale.ToString()) return true } time.Sleep(time.Second) } - logrus.Error("timeout waiting for cr ", ns, ":", name, " scaling to be complete") + log.Error("timeout waiting for cr %s:%s scaling to be complete", ns, name) return false } func WaitForStatefulSetReady(ns, name string, timeout time.Duration) bool { - logrus.Trace("check if statefulset ", ns, ":", name, " is ready") + log.Debug("check if statefulset %s:%s is ready", ns, name) clients := clientsholder.GetClientsHolder() start := time.Now() for time.Since(start) < timeout { ss, err := provider.GetUpdatedStatefulset(clients.K8sClient.AppsV1(), ns, name) if err != nil { - logrus.Errorf("error while getting the %s, err: %v", ss.ToString(), err) + log.Error("error while getting the %s, err: %v", ss.ToString(), err) } else if ss.IsStatefulSetReady() { - logrus.Tracef("%s is ready", ss.ToString()) + log.Debug("%s is ready", ss.ToString()) return true } time.Sleep(time.Second) } - logrus.Error("statefulset ", ns, ":", name, " is not ready") + log.Error("statefulset %s:%s is not ready", ns, name) return false } @@ -142,14 +142,14 @@ func getNotReadyDeployments(deployments []*provider.Deployment) []*provider.Depl for _, dep := range deployments { ready, err := isDeploymentReady(dep.Name, dep.Namespace) if err != nil { - logrus.Errorf("Failed to get %s: %v", dep.ToString(), err) + log.Error("Failed to get %s: %v", dep.ToString(), err) // We'll mark it as not ready, anyways. notReadyDeployments = append(notReadyDeployments, dep) continue } if ready { - logrus.Debugf("%s is ready.", dep.ToString()) + log.Debug("%s is ready.", dep.ToString()) } else { notReadyDeployments = append(notReadyDeployments, dep) } @@ -165,14 +165,14 @@ func getNotReadyStatefulSets(statefulSets []*provider.StatefulSet) []*provider.S for _, sts := range statefulSets { ready, err := isStatefulSetReady(sts.Name, sts.Namespace) if err != nil { - logrus.Errorf("Failed to get %s: %v", sts.ToString(), err) + log.Error("Failed to get %s: %v", sts.ToString(), err) // We'll mark it as not ready, anyways. notReadyStatefulSets = append(notReadyStatefulSets, sts) continue } if ready { - logrus.Debugf("%s is ready.", sts.ToString()) + log.Debug("%s is ready.", sts.ToString()) } else { notReadyStatefulSets = append(notReadyStatefulSets, sts) } @@ -190,16 +190,16 @@ func WaitForAllPodSetsReady(env *provider.TestEnvironment, timeout time.Duration deploymentsToCheck := env.Deployments statefulSetsToCheck := env.StatefulSets - logrus.Infof("Waiting %s for %d podsets to be ready.", timeout, len(deploymentsToCheck)+len(statefulSetsToCheck)) + log.Info("Waiting %s for %d podsets to be ready.", timeout, len(deploymentsToCheck)+len(statefulSetsToCheck)) for startTime := time.Now(); time.Since(startTime) < timeout; { - logrus.Infof("Checking Deployments readiness of Deployments %v", getDeploymentsInfo(deploymentsToCheck)) + log.Info("Checking Deployments readiness of Deployments %v", getDeploymentsInfo(deploymentsToCheck)) notReadyDeployments = getNotReadyDeployments(deploymentsToCheck) - logrus.Infof("Checking StatefulSets readiness of StatefulSets %v", getStatefulSetsInfo(statefulSetsToCheck)) + log.Info("Checking StatefulSets readiness of StatefulSets %v", getStatefulSetsInfo(statefulSetsToCheck)) notReadyStatefulSets = getNotReadyStatefulSets(statefulSetsToCheck) - logrus.Infof("Not ready Deployments: %v", getDeploymentsInfo(notReadyDeployments)) - logrus.Infof("Not ready StatefulSets: %v", getStatefulSetsInfo(notReadyStatefulSets)) + log.Info("Not ready Deployments: %v", getDeploymentsInfo(notReadyDeployments)) + log.Info("Not ready StatefulSets: %v", getStatefulSetsInfo(notReadyStatefulSets)) deploymentsToCheck = notReadyDeployments statefulSetsToCheck = notReadyStatefulSets diff --git a/cnf-certification-test/lifecycle/scaling/crd_scaling.go b/cnf-certification-test/lifecycle/scaling/crd_scaling.go index e2944a63a..01f6ba763 100644 --- a/cnf-certification-test/lifecycle/scaling/crd_scaling.go +++ b/cnf-certification-test/lifecycle/scaling/crd_scaling.go @@ -23,9 +23,9 @@ import ( "errors" "time" - "github.com/sirupsen/logrus" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/lifecycle/podsets" "github.com/test-network-function/cnf-certification-test/internal/clientsholder" + "github.com/test-network-function/cnf-certification-test/internal/log" "github.com/test-network-function/cnf-certification-test/pkg/provider" scalingv1 "k8s.io/api/autoscaling/v1" scale "k8s.io/client-go/scale" @@ -38,7 +38,7 @@ import ( func TestScaleCrd(crScale *provider.CrScale, groupResourceSchema schema.GroupResource, timeout time.Duration) bool { if crScale == nil { - logrus.Errorf("cc object is nill") + log.Error("cc object is nill") return false } clients := clientsholder.GetClientsHolder() @@ -50,25 +50,25 @@ func TestScaleCrd(crScale *provider.CrScale, groupResourceSchema schema.GroupRes // scale up replicas++ if !scaleCrHelper(clients.ScalingClient, groupResourceSchema, crScale, replicas, true, timeout) { - logrus.Errorf("Can not scale cr %s in namespace %s", name, namespace) + log.Error("Can not scale cr %s in namespace %s", name, namespace) return false } // scale down replicas-- if !scaleCrHelper(clients.ScalingClient, groupResourceSchema, crScale, replicas, false, timeout) { - logrus.Errorf("Can not scale cr %s in namespace %s", name, namespace) + log.Error("Can not scale cr %s in namespace %s", name, namespace) return false } } else { // scale down replicas-- if !scaleCrHelper(clients.ScalingClient, groupResourceSchema, crScale, replicas, false, timeout) { - logrus.Errorf("Can not scale cr %s in namespace %s", name, namespace) + log.Error("Can not scale cr %s in namespace %s", name, namespace) return false } // scale up replicas++ if !scaleCrHelper(clients.ScalingClient, groupResourceSchema, crScale, replicas, true, timeout) { - logrus.Errorf("Can not scale cr %s in namespace %s", name, namespace) + log.Error("Can not scale cr %s in namespace %s", name, namespace) return false } } @@ -78,9 +78,9 @@ func TestScaleCrd(crScale *provider.CrScale, groupResourceSchema schema.GroupRes func scaleCrHelper(scalesGetter scale.ScalesGetter, rc schema.GroupResource, autoscalerpram *provider.CrScale, replicas int32, up bool, timeout time.Duration) bool { if up { - logrus.Trace("scale UP CRS to ", replicas, " replicas ") + log.Debug("scale UP CRS to %d replicas", replicas) } else { - logrus.Trace("scale DOWN CRS to ", replicas, " replicas ") + log.Debug("scale DOWN CRS to %d replicas", replicas) } retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { @@ -94,17 +94,17 @@ func scaleCrHelper(scalesGetter scale.ScalesGetter, rc schema.GroupResource, aut scalingObject.Spec.Replicas = replicas _, err = scalesGetter.Scales(namespace).Update(context.TODO(), rc, scalingObject, metav1.UpdateOptions{}) if err != nil { - logrus.Error("Can not update DynamicClient ") + log.Error("Can not update DynamicClient ") return err } if !podsets.WaitForScalingToComplete(namespace, name, timeout, rc) { - logrus.Error("can not update cr ", namespace, ":", name) + log.Error("can not update cr %s:%s", namespace, name) return errors.New("can not update cr") } return nil }) if retryErr != nil { - logrus.Error("Can not scale DynamicClient ", " error=", retryErr) + log.Error("Can not scale DynamicClient, err=%v", retryErr) return false } return true @@ -112,7 +112,7 @@ func scaleCrHelper(scalesGetter scale.ScalesGetter, rc schema.GroupResource, aut func TestScaleHPACrd(cr *provider.CrScale, hpa *scalingv1.HorizontalPodAutoscaler, groupResourceSchema schema.GroupResource, timeout time.Duration) bool { if cr == nil { - logrus.Errorf("cc object is nill") + log.Error("cc object is nill") return false } clients := clientsholder.GetClientsHolder() @@ -131,14 +131,14 @@ func TestScaleHPACrd(cr *provider.CrScale, hpa *scalingv1.HorizontalPodAutoscale if replicas <= 1 { // scale up replicas++ - logrus.Trace("scale UP HPA ", namespace, ":", hpa.Name, "To min=", replicas, " max=", replicas) + log.Debug("scale UP HPA %s:%s to min=%d max=%d", namespace, hpa.Name, replicas, replicas) pass := scaleHpaCRDHelper(hpscaler, hpa.Name, name, namespace, replicas, replicas, timeout, groupResourceSchema) if !pass { return false } // scale down replicas-- - logrus.Trace("scale DOWN HPA ", namespace, ":", hpa.Name, "To min=", replicas, " max=", replicas) + log.Debug("scale DOWN HPA %s:%s to min=%d max=%d", namespace, hpa.Name, replicas, replicas) pass = scaleHpaCRDHelper(hpscaler, hpa.Name, name, namespace, min, max, timeout, groupResourceSchema) if !pass { return false @@ -146,21 +146,21 @@ func TestScaleHPACrd(cr *provider.CrScale, hpa *scalingv1.HorizontalPodAutoscale } else { // scale down replicas-- - logrus.Trace("scale DOWN HPA ", namespace, ":", hpa.Name, "To min=", replicas, " max=", replicas) + log.Debug("scale DOWN HPA %s:%s to min=%d max=%d", namespace, hpa.Name, replicas, replicas) pass := scaleHpaCRDHelper(hpscaler, hpa.Name, name, namespace, replicas, replicas, timeout, groupResourceSchema) if !pass { return false } // scale up replicas++ - logrus.Trace("scale UP HPA ", namespace, ":", hpa.Name, "To min=", replicas, " max=", replicas) + log.Debug("scale UP HPA %s:%s to min=%d max=%d", namespace, hpa.Name, replicas, replicas) pass = scaleHpaCRDHelper(hpscaler, hpa.Name, name, namespace, replicas, replicas, timeout, groupResourceSchema) if !pass { return false } } // back the min and the max value of the hpa - logrus.Trace("back HPA ", namespace, ":", hpa.Name, "To min=", min, " max=", max) + log.Debug("back HPA %s:%s to min=%d max=%d", namespace, hpa.Name, min, max) return scaleHpaCRDHelper(hpscaler, hpa.Name, name, namespace, min, max, timeout, groupResourceSchema) } @@ -168,24 +168,24 @@ func scaleHpaCRDHelper(hpscaler hps.HorizontalPodAutoscalerInterface, hpaName, c retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { hpa, err := hpscaler.Get(context.TODO(), hpaName, metav1.GetOptions{}) if err != nil { - logrus.Error("Can not Update autoscaler to scale ", namespace, ":", crName, " error=", err) + log.Error("Can not Update autoscaler to scale %s:%s, err=%v", namespace, crName, err) return err } hpa.Spec.MinReplicas = &min hpa.Spec.MaxReplicas = max _, err = hpscaler.Update(context.TODO(), hpa, metav1.UpdateOptions{}) if err != nil { - logrus.Error("Can not Update autoscaler to scale ", namespace, ":", crName, " error=", err) + log.Error("Can not Update autoscaler to scale %s:%s, err=%v", namespace, crName, err) return err } if !podsets.WaitForScalingToComplete(namespace, crName, timeout, groupResourceSchema) { - logrus.Error("Can not update cr ", namespace, ":", crName) + log.Error("Can not update cr %s:%s", namespace, crName) return errors.New("can not update cr") } return nil }) if retryErr != nil { - logrus.Error("Can not scale hpa ", namespace, ":", hpaName, " error=", retryErr) + log.Error("Can not scale hpa %s:%s, err=%v", namespace, hpaName, retryErr) return false } return true diff --git a/cnf-certification-test/lifecycle/scaling/deployment_scaling.go b/cnf-certification-test/lifecycle/scaling/deployment_scaling.go index 9758dc53c..4b0f76d85 100644 --- a/cnf-certification-test/lifecycle/scaling/deployment_scaling.go +++ b/cnf-certification-test/lifecycle/scaling/deployment_scaling.go @@ -22,9 +22,9 @@ import ( "errors" "time" - "github.com/sirupsen/logrus" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/lifecycle/podsets" "github.com/test-network-function/cnf-certification-test/internal/clientsholder" + "github.com/test-network-function/cnf-certification-test/internal/log" "github.com/test-network-function/cnf-certification-test/pkg/provider" v1autoscaling "k8s.io/api/autoscaling/v1" @@ -39,7 +39,7 @@ import ( func TestScaleDeployment(deployment *appsv1.Deployment, timeout time.Duration) bool { clients := clientsholder.GetClientsHolder() - logrus.Trace("scale deployment not using HPA ", deployment.Namespace, ":", deployment.Name) + log.Debug("scale deployment not using HPA %s:%s", deployment.Namespace, deployment.Name) var replicas int32 if deployment.Spec.Replicas != nil { replicas = *deployment.Spec.Replicas @@ -51,25 +51,25 @@ func TestScaleDeployment(deployment *appsv1.Deployment, timeout time.Duration) b // scale up replicas++ if !scaleDeploymentHelper(clients.K8sClient.AppsV1(), deployment, replicas, timeout, true) { - logrus.Error("can not scale deployment =", deployment.Namespace, ":", deployment.Name) + log.Error("can not scale deployment %s:%s", deployment.Namespace, deployment.Name) return false } // scale down replicas-- if !scaleDeploymentHelper(clients.K8sClient.AppsV1(), deployment, replicas, timeout, false) { - logrus.Error("can not scale deployment =", deployment.Namespace, ":", deployment.Name) + log.Error("can not scale deployment %s:%s", deployment.Namespace, deployment.Name) return false } } else { // scale down replicas-- if !scaleDeploymentHelper(clients.K8sClient.AppsV1(), deployment, replicas, timeout, false) { - logrus.Error("can not scale deployment =", deployment.Namespace, ":", deployment.Name) + log.Error("can not scale deployment %s:%s", deployment.Namespace, deployment.Name) return false } // scale up replicas++ if !scaleDeploymentHelper(clients.K8sClient.AppsV1(), deployment, replicas, timeout, true) { - logrus.Error("can not scale deployment =", deployment.Namespace, ":", deployment.Name) + log.Error("can not scale deployment %s:%s", deployment.Namespace, deployment.Name) return false } } @@ -78,9 +78,9 @@ func TestScaleDeployment(deployment *appsv1.Deployment, timeout time.Duration) b func scaleDeploymentHelper(client typedappsv1.AppsV1Interface, deployment *appsv1.Deployment, replicas int32, timeout time.Duration, up bool) bool { if up { - logrus.Trace("scale UP deployment to ", replicas, " replicas ") + log.Debug("scale UP deployment to %d replicas", replicas) } else { - logrus.Trace("scale DOWN deployment to ", replicas, " replicas ") + log.Debug("scale DOWN deployment to %d replicas", replicas) } retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { @@ -88,23 +88,23 @@ func scaleDeploymentHelper(client typedappsv1.AppsV1Interface, deployment *appsv // RetryOnConflict uses exponential backoff to avoid exhausting the apiserver dp, err := client.Deployments(deployment.Namespace).Get(context.TODO(), deployment.Name, v1machinery.GetOptions{}) if err != nil { - logrus.Error("failed to get latest version of deployment ", deployment.Namespace, ":", deployment.Name) + log.Error("failed to get latest version of deployment %s:%s", deployment.Namespace, deployment.Name) return err } dp.Spec.Replicas = &replicas _, err = client.Deployments(deployment.Namespace).Update(context.TODO(), dp, v1machinery.UpdateOptions{}) if err != nil { - logrus.Error("can not update deployment ", deployment.Namespace, ":", deployment.Name) + log.Error("can not update deployment %s:%s", deployment.Namespace, deployment.Name) return err } if !podsets.WaitForDeploymentSetReady(deployment.Namespace, deployment.Name, timeout) { - logrus.Error("can not update deployment ", deployment.Namespace, ":", deployment.Name) + log.Error("can not update deployment %s:%s", deployment.Namespace, deployment.Name) return errors.New("can not update deployment") } return nil }) if retryErr != nil { - logrus.Error("can not scale deployment ", deployment.Namespace, ":", deployment.Name, " error=", retryErr) + log.Error("can not scale deployment %s:%s, err=%v", deployment.Namespace, deployment.Name, retryErr) return false } return true @@ -127,14 +127,14 @@ func TestScaleHpaDeployment(deployment *provider.Deployment, hpa *v1autoscaling. if replicas <= 1 { // scale up replicas++ - logrus.Trace("scale UP HPA ", deployment.Namespace, ":", hpa.Name, "To min=", replicas, " max=", replicas) + log.Debug("scale UP HPA %s:%s to min=%d max=%d", deployment.Namespace, hpa.Name, replicas, replicas) pass := scaleHpaDeploymentHelper(hpscaler, hpa.Name, deployment.Name, deployment.Namespace, replicas, replicas, timeout) if !pass { return false } // scale down replicas-- - logrus.Trace("scale DOWN HPA ", deployment.Namespace, ":", hpa.Name, "To min=", replicas, " max=", replicas) + log.Debug("scale DOWN HPA %s:%s to min=%d max=%d", deployment.Namespace, hpa.Name, replicas, replicas) pass = scaleHpaDeploymentHelper(hpscaler, hpa.Name, deployment.Name, deployment.Namespace, min, max, timeout) if !pass { return false @@ -142,21 +142,21 @@ func TestScaleHpaDeployment(deployment *provider.Deployment, hpa *v1autoscaling. } else { // scale down replicas-- - logrus.Trace("scale DOWN HPA ", deployment.Namespace, ":", hpa.Name, "To min=", replicas, " max=", replicas) + log.Debug("scale DOWN HPA %s:%s to min=%d max=%d", deployment.Namespace, hpa.Name, replicas, replicas) pass := scaleHpaDeploymentHelper(hpscaler, hpa.Name, deployment.Name, deployment.Namespace, replicas, replicas, timeout) if !pass { return false } // scale up replicas++ - logrus.Trace("scale UP HPA ", deployment.Namespace, ":", hpa.Name, "To min=", replicas, " max=", replicas) + log.Debug("scale UP HPA %s:%s to min=%d max=%d", deployment.Namespace, hpa.Name, replicas, replicas) pass = scaleHpaDeploymentHelper(hpscaler, hpa.Name, deployment.Name, deployment.Namespace, replicas, replicas, timeout) if !pass { return false } } // back the min and the max value of the hpa - logrus.Trace("back HPA ", deployment.Namespace, ":", hpa.Name, "To min=", min, " max=", max) + log.Debug("back HPA %s:%s to min=%d max=%d", deployment.Namespace, hpa.Name, min, max) return scaleHpaDeploymentHelper(hpscaler, hpa.Name, deployment.Name, deployment.Namespace, min, max, timeout) } @@ -164,23 +164,23 @@ func scaleHpaDeploymentHelper(hpscaler hps.HorizontalPodAutoscalerInterface, hpa retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { hpa, err := hpscaler.Get(context.TODO(), hpaName, v1machinery.GetOptions{}) if err != nil { - logrus.Error("can not Update autoscaler to scale ", namespace, ":", deploymentName, " error=", err) + log.Error("can not Update autoscaler to scale %s:%s , err=%v", namespace, deploymentName, err) return err } hpa.Spec.MinReplicas = &min hpa.Spec.MaxReplicas = max _, err = hpscaler.Update(context.TODO(), hpa, v1machinery.UpdateOptions{}) if err != nil { - logrus.Error("can not Update autoscaler to scale ", namespace, ":", deploymentName, " error=", err) + log.Error("can not Update autoscaler to scale %s:%s, err=%v", namespace, deploymentName, err) return err } if !podsets.WaitForDeploymentSetReady(namespace, deploymentName, timeout) { - logrus.Error("deployment not ready after scale operation ", namespace, ":", deploymentName) + log.Error("deployment not ready after scale operation %s:%s", namespace, deploymentName) } return nil }) if retryErr != nil { - logrus.Error("can not scale hpa ", namespace, ":", hpaName, " error=", retryErr) + log.Error("can not scale hpa %s:%s , err=%v", namespace, hpaName, retryErr) return false } return true diff --git a/cnf-certification-test/lifecycle/scaling/statefulset_scaling.go b/cnf-certification-test/lifecycle/scaling/statefulset_scaling.go index 98bf05f2e..6b21a34d7 100644 --- a/cnf-certification-test/lifecycle/scaling/statefulset_scaling.go +++ b/cnf-certification-test/lifecycle/scaling/statefulset_scaling.go @@ -22,9 +22,9 @@ import ( "errors" "time" - "github.com/sirupsen/logrus" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/lifecycle/podsets" "github.com/test-network-function/cnf-certification-test/internal/clientsholder" + "github.com/test-network-function/cnf-certification-test/internal/log" v1app "k8s.io/api/apps/v1" v1autoscaling "k8s.io/api/autoscaling/v1" @@ -33,7 +33,6 @@ import ( v1machinery "k8s.io/apimachinery/pkg/apis/meta/v1" retry "k8s.io/client-go/util/retry" - "github.com/test-network-function/cnf-certification-test/pkg/tnf" hps "k8s.io/client-go/kubernetes/typed/autoscaling/v1" ) @@ -41,7 +40,7 @@ func TestScaleStatefulSet(statefulset *v1app.StatefulSet, timeout time.Duration) clients := clientsholder.GetClientsHolder() name, namespace := statefulset.Name, statefulset.Namespace ssClients := clients.K8sClient.AppsV1().StatefulSets(namespace) - logrus.Trace("scale statefulset not using HPA ", namespace, ":", name) + log.Debug("scale statefulset not using HPA %s:%s", namespace, name) replicas := int32(1) if statefulset.Spec.Replicas != nil { replicas = *statefulset.Spec.Replicas @@ -50,30 +49,30 @@ func TestScaleStatefulSet(statefulset *v1app.StatefulSet, timeout time.Duration) if replicas <= 1 { // scale up replicas++ - logrus.Trace("scale UP statefulset to ", replicas, " replicas ") + log.Debug("scale UP statefulset to %d replicas", replicas) if !scaleStateFulsetHelper(clients, ssClients, statefulset, replicas, timeout) { - logrus.Error("can not scale statefulset =", namespace, ":", name) + log.Error("can not scale statefulset = %s:%s", namespace, name) return false } // scale down replicas-- - logrus.Trace("scale DOWN statefulset to ", replicas, " replicas ") + log.Debug("scale DOWN statefulset to %d replicas", replicas) if !scaleStateFulsetHelper(clients, ssClients, statefulset, replicas, timeout) { - logrus.Error("can not scale statefulset =", namespace, ":", name) + log.Error("can not scale statefulset = %s:%s", namespace, name) return false } } else { // scale down replicas-- - logrus.Trace("scale DOWN statefulset to ", replicas, " replicas ") + log.Debug("scale DOWN statefulset to %d replicas", replicas) if !scaleStateFulsetHelper(clients, ssClients, statefulset, replicas, timeout) { - logrus.Error("can not scale statefulset =", namespace, ":", name) + log.Error("can not scale statefulset = %s:%s", namespace, name) return false } // scale up replicas++ - logrus.Trace("scale UP statefulset to ", replicas, " replicas ") + log.Debug("scale UP statefulset to %d replicas", replicas) if !scaleStateFulsetHelper(clients, ssClients, statefulset, replicas, timeout) { - logrus.Error("can not scale statefulset =", namespace, ":", name) + log.Error("can not scale statefulset = %s:%s", namespace, name) return false } } @@ -89,23 +88,23 @@ func scaleStateFulsetHelper(clients *clientsholder.ClientsHolder, ssClient v1.St // RetryOnConflict uses exponential backoff to avoid exhausting the apiserver ss, err := ssClient.Get(context.TODO(), name, v1machinery.GetOptions{}) if err != nil { - tnf.ClaimFilePrintf("failed to get latest version of statefulset %s:%s with error %s", namespace, name, err) + log.Debug("failed to get latest version of statefulset %s:%s with error %s", namespace, name, err) return err } ss.Spec.Replicas = &replicas _, err = clients.K8sClient.AppsV1().StatefulSets(namespace).Update(context.TODO(), ss, v1machinery.UpdateOptions{}) if err != nil { - logrus.Error("can not update statefulset ", namespace, ":", name) + log.Error("can not update statefulset %s:%s", namespace, name) return err } if !podsets.WaitForStatefulSetReady(namespace, name, timeout) { - logrus.Error("can not update statefulset ", namespace, ":", name) + log.Error("can not update statefulset %s:%s", namespace, name) return errors.New("can not update statefulset") } return nil }) if retryErr != nil { - logrus.Error("can not scale statefulset ", namespace, ":", name, " error=", retryErr) + log.Error("can not scale statefulset %s:%s, err=%v", namespace, name, retryErr) return false } return true @@ -128,14 +127,14 @@ func TestScaleHpaStatefulSet(statefulset *v1app.StatefulSet, hpa *v1autoscaling. if replicas <= 1 { // scale up replicas++ - logrus.Trace("scale UP HPA ", namespace, ":", hpaName, "To min=", replicas, " max=", replicas) + log.Debug("scale UP HPA %s:%s to min=%d max=%d", namespace, hpaName, replicas, replicas) pass := scaleHpaStatefulSetHelper(hpscaler, hpaName, name, namespace, replicas, replicas, timeout) if !pass { return false } // scale down replicas-- - logrus.Trace("scale DOWN HPA ", namespace, ":", hpaName, "To min=", replicas, " max=", replicas) + log.Debug("scale DOWN HPA %s:%s to min=%d max=%d", namespace, hpaName, replicas, replicas) pass = scaleHpaStatefulSetHelper(hpscaler, hpaName, name, namespace, replicas, replicas, timeout) if !pass { return false @@ -143,21 +142,21 @@ func TestScaleHpaStatefulSet(statefulset *v1app.StatefulSet, hpa *v1autoscaling. } else { // scale down replicas-- - logrus.Trace("scale DOWN HPA ", namespace, ":", hpaName, "To min=", replicas, " max=", replicas) + log.Debug("scale DOWN HPA %s:%s to min=%d max=%d", namespace, hpaName, replicas, replicas) pass := scaleHpaStatefulSetHelper(hpscaler, hpaName, name, namespace, replicas, replicas, timeout) if !pass { return false } // scale up replicas++ - logrus.Trace("scale UP HPA ", namespace, ":", hpaName, "To min=", min, " max=", max) + log.Debug("scale UP HPA %s:%s to min=%d max=%d", namespace, hpaName, min, max) pass = scaleHpaStatefulSetHelper(hpscaler, hpaName, name, namespace, replicas, replicas, timeout) if !pass { return false } } // back the min and the max value of the hpa - logrus.Trace("back HPA ", namespace, ":", hpaName, "To min=", min, " max=", max) + log.Debug("back HPA %s:%s to min=%d max=%d", namespace, hpaName, min, max) pass := scaleHpaStatefulSetHelper(hpscaler, hpaName, name, namespace, min, max, timeout) return pass } @@ -166,23 +165,23 @@ func scaleHpaStatefulSetHelper(hpscaler hps.HorizontalPodAutoscalerInterface, hp retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { hpa, err := hpscaler.Get(context.TODO(), hpaName, v1machinery.GetOptions{}) if err != nil { - logrus.Error("can not Update autoscaler to scale ", namespace, ":", statefulsetName, " error=", err) + log.Error("can not Update autoscaler to scale %s:%s, err=%v", namespace, statefulsetName, err) return err } hpa.Spec.MinReplicas = &min hpa.Spec.MaxReplicas = max _, err = hpscaler.Update(context.TODO(), hpa, v1machinery.UpdateOptions{}) if err != nil { - logrus.Error("can not Update autoscaler to scale ", namespace, ":", statefulsetName, " error=", err) + log.Error("can not Update autoscaler to scale %s:%s, err=%v", namespace, statefulsetName, err) return err } if !podsets.WaitForStatefulSetReady(namespace, statefulsetName, timeout) { - logrus.Error("statefulsetN not ready after scale operation ", namespace, ":", statefulsetName) + log.Error("statefulsetN not ready after scale operation %s:%s", namespace, statefulsetName) } return nil }) if retryErr != nil { - logrus.Error("can not scale hpa ", namespace, ":", hpaName, " error=", retryErr) + log.Error("can not scale hpa %s:%s, err=%v", namespace, hpaName, retryErr) return false } return true diff --git a/cnf-certification-test/lifecycle/suite.go b/cnf-certification-test/lifecycle/suite.go index 7c3e12278..a1bde5b33 100644 --- a/cnf-certification-test/lifecycle/suite.go +++ b/cnf-certification-test/lifecycle/suite.go @@ -17,10 +17,9 @@ package lifecycle import ( - "fmt" + "os" "time" - "github.com/sirupsen/logrus" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/common" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/identifiers" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/lifecycle/ownerreference" @@ -29,12 +28,12 @@ import ( "github.com/test-network-function/cnf-certification-test/cnf-certification-test/lifecycle/scaling" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/lifecycle/tolerations" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/lifecycle/volumes" + "github.com/test-network-function/cnf-certification-test/internal/log" "github.com/test-network-function/cnf-certification-test/pkg/checksdb" "github.com/test-network-function/cnf-certification-test/pkg/configuration" "github.com/test-network-function/cnf-certification-test/pkg/postmortem" "github.com/test-network-function/cnf-certification-test/pkg/provider" "github.com/test-network-function/cnf-certification-test/pkg/testhelper" - "github.com/test-network-function/cnf-certification-test/pkg/tnf" corev1 "k8s.io/api/core/v1" ) @@ -52,7 +51,7 @@ var ( env provider.TestEnvironment beforeEachFn = func(check *checksdb.Check) error { - logrus.Infof("Check %s: getting test environment.", check.ID) + check.LogInfo("Check %s: getting test environment.", check.ID) env = provider.GetTestEnvironment() return nil } @@ -68,7 +67,7 @@ var ( //nolint:funlen func LoadChecks() { - logrus.Debugf("Entering %s suite", common.LifecycleTestKey) + log.Debug("Loading %s checks", common.LifecycleTestKey) checksGroup := checksdb.NewChecksGroup(common.LifecycleTestKey). WithBeforeEachFn(beforeEachFn) @@ -261,10 +260,10 @@ func testContainersPreStop(check *checksdb.Check, env *provider.TestEnvironment) var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject for _, cut := range env.Containers { - logrus.Debugln("check container ", cut.String(), " pre stop lifecycle ") + check.LogDebug("check %s pre stop lifecycle", cut.String()) if cut.Lifecycle == nil || (cut.Lifecycle != nil && cut.Lifecycle.PreStop == nil) { - tnf.ClaimFilePrintf("%s does not have preStop defined", cut) + check.LogDebug("%s does not have preStop defined", cut) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, "Container does not have preStop defined", false)) } else { compliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, "Container has preStop defined", true)) @@ -277,10 +276,10 @@ func testContainersPostStart(check *checksdb.Check, env *provider.TestEnvironmen var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject for _, cut := range env.Containers { - logrus.Debugln("check container ", cut.String(), " post start lifecycle ") + check.LogDebug("check %s post start lifecycle", cut.String()) if cut.Lifecycle == nil || (cut.Lifecycle != nil && cut.Lifecycle.PostStart == nil) { - tnf.ClaimFilePrintf("%s does not have postStart defined", cut) + check.LogDebug("%s does not have postStart defined", cut) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, "Container does not have postStart defined", false)) } else { compliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, "Container has postStart defined", true)) @@ -293,9 +292,9 @@ func testContainersImagePolicy(check *checksdb.Check, env *provider.TestEnvironm var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject for _, cut := range env.Containers { - logrus.Debugln("check container ", cut.String(), " pull policy, should be ", corev1.PullIfNotPresent) + check.LogDebug("check %s pull policy, should be %s", cut.String(), corev1.PullIfNotPresent) if cut.ImagePullPolicy != corev1.PullIfNotPresent { - tnf.Logf(logrus.WarnLevel, "%s is using %s as ImagePullPolicy", cut, cut.ImagePullPolicy) + check.LogWarn("%s is using %s as ImagePullPolicy", cut, cut.ImagePullPolicy) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, "Container is not using IfNotPresent as ImagePullPolicy", false)) } else { compliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, "Container is using IfNotPresent as ImagePullPolicy", true)) @@ -308,9 +307,9 @@ func testContainersReadinessProbe(check *checksdb.Check, env *provider.TestEnvir var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject for _, cut := range env.Containers { - logrus.Debugln("check container ", cut.String(), " readiness probe ") + check.LogDebug("check %s readiness probe", cut.String()) if cut.ReadinessProbe == nil { - tnf.Logf(logrus.WarnLevel, "%s does not have ReadinessProbe defined", cut) + check.LogWarn("%s does not have ReadinessProbe defined", cut) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, "Container does not have ReadinessProbe defined", false)) } else { compliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, "Container has ReadinessProbe defined", true)) @@ -323,9 +322,9 @@ func testContainersLivenessProbe(check *checksdb.Check, env *provider.TestEnviro var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject for _, cut := range env.Containers { - logrus.Debugln("check container ", cut.String(), " liveness probe ") + check.LogDebug("check %s liveness probe", cut.String()) if cut.LivenessProbe == nil { - tnf.Logf(logrus.WarnLevel, "%s does not have LivenessProbe defined", cut) + check.LogWarn("%s does not have LivenessProbe defined", cut) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, "Container does not have LivenessProbe defined", false)) } else { compliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, "Container has LivenessProbe defined", true)) @@ -338,9 +337,9 @@ func testContainersStartupProbe(check *checksdb.Check, env *provider.TestEnviron var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject for _, cut := range env.Containers { - logrus.Debugln("check container ", cut.String(), " startup probe ") + check.LogDebug("check %s startup probe", cut.String()) if cut.StartupProbe == nil { - tnf.Logf(logrus.WarnLevel, "%s does not have StartupProbe defined", cut) + check.LogWarn("%s does not have StartupProbe defined", cut) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, "Container does not have StartupProbe defined", false)) } else { compliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, "Container has StartupProbe defined", true)) @@ -350,15 +349,15 @@ func testContainersStartupProbe(check *checksdb.Check, env *provider.TestEnviron } func testPodsOwnerReference(check *checksdb.Check, env *provider.TestEnvironment) { - tnf.Logf(logrus.InfoLevel, "Testing owners of CNF pod, should be replicas Set") + check.LogInfo("Testing owners of CNF pod, should be replicas Set") var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject for _, put := range env.Pods { - logrus.Debugln("check pod ", put.Namespace, " ", put.Name, " owner reference") + check.LogDebug("check pod %s:%s owner reference", put.Namespace, put.Name) o := ownerreference.NewOwnerReference(put.Pod) o.RunTest() if o.GetResults() != testhelper.SUCCESS { - tnf.ClaimFilePrintf("%s found with non-compliant owner reference", put.String()) + check.LogDebug("%s found with non-compliant owner reference", put.String()) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "Pod has non-compliant owner reference", false)) } else { compliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "Pod has compliant owner reference", true)) @@ -373,12 +372,12 @@ func testPodNodeSelectorAndAffinityBestPractices(testPods []*provider.Pod, check for _, put := range testPods { compliantPod := true if put.HasNodeSelector() { - tnf.ClaimFilePrintf("ERROR: %s has a node selector. Node selector: %v", put, put.Spec.NodeSelector) + check.LogDebug("ERROR: %s has a node selector. Node selector: %v", put, put.Spec.NodeSelector) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "Pod has node selector", false)) compliantPod = false } if put.Spec.Affinity != nil && put.Spec.Affinity.NodeAffinity != nil { - tnf.ClaimFilePrintf("ERROR: %s has a node affinity clause. Node affinity: %v", put, put.Spec.Affinity.NodeAffinity) + check.LogDebug("ERROR: %s has a node affinity clause. Node affinity: %v", put, put.Spec.Affinity.NodeAffinity) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "Pod has node affinity", false)) compliantPod = false } @@ -410,7 +409,7 @@ func nameInStatefulSetSkipList(name, namespace string, list []configuration.Skip //nolint:dupl func testDeploymentScaling(env *provider.TestEnvironment, timeout time.Duration, check *checksdb.Check) { - tnf.Logf(logrus.InfoLevel, "Testing deployment scaling") + check.LogInfo("Testing deployment scaling") defer env.SetNeedsRefresh() var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -418,15 +417,15 @@ func testDeploymentScaling(env *provider.TestEnvironment, timeout time.Duration, if scaling.IsManaged(env.Deployments[i].Name, env.Config.ManagedDeployments) { if !scaling.CheckOwnerReference(env.Deployments[i].GetOwnerReferences(), env.Config.CrdFilters, env.Crds) { nonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(env.Deployments[i].Namespace, env.Deployments[i].Name, "Deployment is not scalable", false)) - tnf.ClaimFilePrintf("%s is scaling failed due to OwnerReferences that are not scalable", env.Deployments[i].ToString()) + check.LogDebug("%s is scaling failed due to OwnerReferences that are not scalable", env.Deployments[i].ToString()) } else { - logrus.Infof("%s is scaling skipped due to scalable OwnerReferences, test will run on the cr scaling", env.Deployments[i].ToString()) + check.LogInfo("%s is scaling skipped due to scalable OwnerReferences, test will run on the cr scaling", env.Deployments[i].ToString()) } continue } // Skip deployment if it is allowed by config if nameInDeploymentSkipList(env.Deployments[i].Name, env.Deployments[i].Namespace, env.Config.SkipScalingTestDeployments) { - tnf.ClaimFilePrintf("%s is being skipped due to configuration setting", env.Deployments[i].String()) + check.LogDebug("%s is being skipped due to configuration setting", env.Deployments[i].String()) continue } @@ -438,7 +437,7 @@ func testDeploymentScaling(env *provider.TestEnvironment, timeout time.Duration, // horizontal scaler, then test that scaler // can scale the deployment if !scaling.TestScaleHpaDeployment(env.Deployments[i], hpa, timeout) { - tnf.ClaimFilePrintf("Deployment has failed the HPA scale test: %s", env.Deployments[i].ToString()) + check.LogDebug("Deployment has failed the HPA scale test: %s", env.Deployments[i].ToString()) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(env.Deployments[i].Namespace, env.Deployments[i].Name, "Deployment has failed the HPA scale test", false)) } continue @@ -446,7 +445,7 @@ func testDeploymentScaling(env *provider.TestEnvironment, timeout time.Duration, // if the deployment is not controller by HPA // scale it directly if !scaling.TestScaleDeployment(env.Deployments[i].Deployment, timeout) { - tnf.ClaimFilePrintf("Deployment has failed the non-HPA scale test: %s", env.Deployments[i].ToString()) + check.LogDebug("Deployment has failed the non-HPA scale test: %s", env.Deployments[i].ToString()) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(env.Deployments[i].Namespace, env.Deployments[i].Name, "Deployment has failed the non-HPA scale test", false)) } else { compliantObjects = append(compliantObjects, testhelper.NewDeploymentReportObject(env.Deployments[i].Namespace, env.Deployments[i].Name, "Deployment is scalable", true)) @@ -457,7 +456,7 @@ func testDeploymentScaling(env *provider.TestEnvironment, timeout time.Duration, } func testScaleCrd(env *provider.TestEnvironment, timeout time.Duration, check *checksdb.Check) { - tnf.Logf(logrus.InfoLevel, "Testing custom resource scaling") + check.LogInfo("Testing custom resource scaling") defer env.SetNeedsRefresh() var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -466,13 +465,13 @@ func testScaleCrd(env *provider.TestEnvironment, timeout time.Duration, check *c scaleCr := env.ScaleCrUnderTest[i].Scale if hpa := scaling.GetResourceHPA(env.HorizontalScaler, scaleCr.Name, scaleCr.Namespace, scaleCr.Kind); hpa != nil { if !scaling.TestScaleHPACrd(&scaleCr, hpa, groupResourceSchema, timeout) { - tnf.ClaimFilePrintf("cr has failed the scaling test: %s", scaleCr.GetName()) + check.LogDebug("cr has failed the scaling test: %s", scaleCr.GetName()) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewCrdReportObject(scaleCr.Namespace, scaleCr.Name, "cr has failed the HPA scaling test", false)) } continue } if !scaling.TestScaleCrd(&scaleCr, groupResourceSchema, timeout) { - tnf.ClaimFilePrintf("CR has failed the non-HPA scale test: %s", scaleCr.GetName()) + check.LogDebug("CR has failed the non-HPA scale test: %s", scaleCr.GetName()) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewCrdReportObject(scaleCr.Namespace, scaleCr.Name, "CR has failed the non-HPA scale test", false)) } else { compliantObjects = append(compliantObjects, testhelper.NewCrdReportObject(scaleCr.Namespace, scaleCr.Name, "CR is scalable", true)) @@ -483,23 +482,23 @@ func testScaleCrd(env *provider.TestEnvironment, timeout time.Duration, check *c //nolint:dupl func testStatefulSetScaling(env *provider.TestEnvironment, timeout time.Duration, check *checksdb.Check) { - tnf.Logf(logrus.InfoLevel, "Testing statefulset scaling") + check.LogInfo("Testing statefulset scaling") defer env.SetNeedsRefresh() var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject for i := range env.StatefulSets { if scaling.IsManaged(env.StatefulSets[i].Name, env.Config.ManagedStatefulsets) { if !scaling.CheckOwnerReference(env.StatefulSets[i].GetOwnerReferences(), env.Config.CrdFilters, env.Crds) { - tnf.ClaimFilePrintf("%s is scaling failed due to OwnerReferences that are not scalable", env.Deployments[i].ToString()) + check.LogDebug("%s is scaling failed due to OwnerReferences that are not scalable", env.Deployments[i].ToString()) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(env.StatefulSets[i].Namespace, env.StatefulSets[i].Name, "StatefulSet has OwnerReferences that are not scalable", false)) } else { - logrus.Infof("%s is scaling skipped due to scalable OwnerReferences, test will run on te cr scaling", env.StatefulSets[i].ToString()) + check.LogInfo("%s is scaling skipped due to scalable OwnerReferences, test will run on te cr scaling", env.StatefulSets[i].ToString()) } continue } // Skip statefulset if it is allowed by config if nameInStatefulSetSkipList(env.StatefulSets[i].Name, env.StatefulSets[i].Namespace, env.Config.SkipScalingTestStatefulSets) { - tnf.ClaimFilePrintf("%s is being skipped due to configuration setting", env.StatefulSets[i].String()) + check.LogDebug("%s is being skipped due to configuration setting", env.StatefulSets[i].String()) continue } @@ -511,7 +510,7 @@ func testStatefulSetScaling(env *provider.TestEnvironment, timeout time.Duration // horizontal scaler, then test that scaler // can scale the statefulset if !scaling.TestScaleHpaStatefulSet(env.StatefulSets[i].StatefulSet, hpa, timeout) { - tnf.ClaimFilePrintf("StatefulSet has failed the scaling test: %s", env.StatefulSets[i].ToString()) + check.LogDebug("StatefulSet has failed the scaling test: %s", env.StatefulSets[i].ToString()) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(env.StatefulSets[i].Namespace, env.StatefulSets[i].Name, "StatefulSet has failed the HPA scaling test", false)) } continue @@ -519,7 +518,7 @@ func testStatefulSetScaling(env *provider.TestEnvironment, timeout time.Duration // if the statefulset is not controller by HPA // scale it directly if !scaling.TestScaleStatefulSet(env.StatefulSets[i].StatefulSet, timeout) { - tnf.ClaimFilePrintf("StatefulSet has failed the scaling test: %s", env.StatefulSets[i].ToString()) + check.LogDebug("StatefulSet has failed the scaling test: %s", env.StatefulSets[i].ToString()) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(env.StatefulSets[i].Namespace, env.StatefulSets[i].Name, "StatefulSet has failed the non-HPA scale test", false)) } else { compliantObjects = append(compliantObjects, testhelper.NewStatefulSetReportObject(env.StatefulSets[i].Namespace, env.StatefulSets[i].Name, "StatefulSet is scalable", true)) @@ -531,13 +530,13 @@ func testStatefulSetScaling(env *provider.TestEnvironment, timeout time.Duration // testHighAvailability func testHighAvailability(check *checksdb.Check, env *provider.TestEnvironment) { - tnf.Logf(logrus.InfoLevel, "Should set pod replica number greater than 1") + check.LogInfo("Should set pod replica number greater than 1") var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject for _, dp := range env.Deployments { if dp.Spec.Replicas == nil || *(dp.Spec.Replicas) <= 1 { - tnf.ClaimFilePrintf("Deployment found without valid high availability: %s", dp.ToString()) + check.LogDebug("Deployment found without valid high availability: %s", dp.ToString()) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(dp.Namespace, dp.Name, "Deployment found without valid high availability", false)) continue } @@ -550,7 +549,7 @@ func testHighAvailability(check *checksdb.Check, env *provider.TestEnvironment) if dp.Spec.Template.Spec.Affinity == nil || dp.Spec.Template.Spec.Affinity.PodAntiAffinity == nil { - tnf.ClaimFilePrintf("Deployment found without valid high availability: %s", dp.ToString()) + check.LogDebug("Deployment found without valid high availability: %s", dp.ToString()) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(dp.Namespace, dp.Name, "Deployment found without valid high availability", false)) } else { compliantObjects = append(compliantObjects, testhelper.NewDeploymentReportObject(dp.Namespace, dp.Name, "Deployment has valid high availability", true)) @@ -558,7 +557,7 @@ func testHighAvailability(check *checksdb.Check, env *provider.TestEnvironment) } for _, st := range env.StatefulSets { if st.Spec.Replicas == nil || *(st.Spec.Replicas) <= 1 { - tnf.ClaimFilePrintf("StatefulSet found without valid high availability: %s", st.ToString()) + check.LogDebug("StatefulSet found without valid high availability: %s", st.ToString()) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(st.Namespace, st.Name, "StatefulSet found without valid high availability", false)) continue } @@ -570,7 +569,7 @@ func testHighAvailability(check *checksdb.Check, env *provider.TestEnvironment) if st.Spec.Template.Spec.Affinity == nil || st.Spec.Template.Spec.Affinity.PodAntiAffinity == nil { - tnf.ClaimFilePrintf("StatefulSet found without valid high availability: %s", st.ToString()) + check.LogDebug("StatefulSet found without valid high availability: %s", st.ToString()) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(st.Namespace, st.Name, "StatefulSet found without valid high availability", false)) } else { compliantObjects = append(compliantObjects, testhelper.NewStatefulSetReportObject(st.Namespace, st.Name, "StatefulSet has valid high availability", true)) @@ -588,13 +587,13 @@ func testPodsRecreation(check *checksdb.Check, env *provider.TestEnvironment) { needsPostMortemInfo := true defer func() { if needsPostMortemInfo { - tnf.ClaimFilePrintf(postmortem.Log()) + check.LogDebug(postmortem.Log()) } // Since we are possible exiting early, we need to make sure we set the result at the end of the function. check.SetResult(compliantObjects, nonCompliantObjects) }() - tnf.Logf(logrus.InfoLevel, "Testing node draining effect of deployment") - tnf.Logf(logrus.InfoLevel, "Testing initial state for deployments") + check.LogInfo("Testing node draining effect of deployment") + check.LogInfo("Testing initial state for deployments") defer env.SetNeedsRefresh() // Before draining any node, wait until all podsets are ready. The timeout depends on the number of podsets to check. @@ -602,7 +601,7 @@ func testPodsRecreation(check *checksdb.Check, env *provider.TestEnvironment) { allPodsetsReadyTimeout := timeoutPodSetReady + time.Minute*time.Duration(len(env.Deployments)+len(env.StatefulSets)) claimsLog, notReadyDeployments, notReadyStatefulSets := podsets.WaitForAllPodSetsReady(env, allPodsetsReadyTimeout) if len(notReadyDeployments) > 0 || len(notReadyStatefulSets) > 0 { - tnf.ClaimFilePrintf("%s", claimsLog.GetLogLines()) + check.LogDebug("%s", claimsLog.GetLogLines()) for _, dep := range notReadyDeployments { nonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(dep.Namespace, dep.Name, "Deployment was not ready before draining any node.", false)) } @@ -621,11 +620,11 @@ func testPodsRecreation(check *checksdb.Check, env *provider.TestEnvironment) { for _, put := range env.Pods { if !put.IsRuntimeClassNameSpecified() && put.HasNodeSelector() { podsWithNodeAssignment = append(podsWithNodeAssignment, put) - logrus.Errorf("%s has been found with node selector(s): %v", put.String(), put.Spec.NodeSelector) + check.LogError("%s has been found with node selector(s): %v", put.String(), put.Spec.NodeSelector) } } if len(podsWithNodeAssignment) > 0 { - logrus.Errorf("Pod(s) have been found to contain a node assignment and cannot perform the pod-recreation test: %v", podsWithNodeAssignment) + check.LogError("Pod(s) have been found to contain a node assignment and cannot perform the pod-recreation test: %v", podsWithNodeAssignment) for _, pod := range podsWithNodeAssignment { nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, "Pod has node assignment.", false)) } @@ -637,19 +636,19 @@ func testPodsRecreation(check *checksdb.Check, env *provider.TestEnvironment) { defer podrecreation.CordonCleanup(nodeName, check) //nolint:gocritic // The defer in loop is intentional, calling the cleanup function once per node err := podrecreation.CordonHelper(nodeName, podrecreation.Cordon) if err != nil { - logrus.Errorf("error cordoning the node: %s", nodeName) + check.LogError("error cordoning the node: %s", nodeName) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, "Node cordoning failed", false)) return } - tnf.Logf(logrus.InfoLevel, fmt.Sprintf("Draining and Cordoning node %s: ", nodeName)) - logrus.Debugf("node: %s cordoned", nodeName) + check.LogInfo("Draining and Cordoning node %s: ", nodeName) + check.LogDebug("node: %s cordoned", nodeName) count, err := podrecreation.CountPodsWithDelete(env.Pods, nodeName, podrecreation.NoDelete) if err != nil { nonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, "Getting pods list to drain failed", false)) return } nodeTimeout := timeoutPodSetReady + timeoutPodRecreationPerPod*time.Duration(count) - logrus.Debugf("draining node: %s with timeout: %s", nodeName, nodeTimeout) + check.LogDebug("draining node: %s with timeout: %s", nodeName, nodeTimeout) _, err = podrecreation.CountPodsWithDelete(env.Pods, nodeName, podrecreation.DeleteForeground) if err != nil { nonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, "Draining node failed", false)) @@ -658,7 +657,7 @@ func testPodsRecreation(check *checksdb.Check, env *provider.TestEnvironment) { claimsLog, notReadyDeployments, notReadyStatefulSets := podsets.WaitForAllPodSetsReady(env, nodeTimeout) if len(notReadyDeployments) > 0 || len(notReadyStatefulSets) > 0 { - tnf.ClaimFilePrintf("%s", claimsLog.GetLogLines()) + check.LogDebug("%s", claimsLog.GetLogLines()) for _, dep := range notReadyDeployments { nonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(dep.Namespace, dep.Name, "Deployment not ready after draining node "+nodeName, false)) } @@ -670,7 +669,8 @@ func testPodsRecreation(check *checksdb.Check, env *provider.TestEnvironment) { err = podrecreation.CordonHelper(nodeName, podrecreation.Uncordon) if err != nil { - logrus.Fatalf("error uncordoning the node: %s", nodeName) + check.LogError("error uncordoning the node: %s", nodeName) + os.Exit(1) //nolint: gocritic } } @@ -691,7 +691,7 @@ func testPodsRecreation(check *checksdb.Check, env *provider.TestEnvironment) { } func testPodPersistentVolumeReclaimPolicy(check *checksdb.Check, env *provider.TestEnvironment) { - tnf.Logf(logrus.InfoLevel, "Testing PersistentVolumes for reclaim policy to be set to delete") + check.LogInfo("Testing PersistentVolumes for reclaim policy to be set to delete") var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -707,7 +707,7 @@ func testPodPersistentVolumeReclaimPolicy(check *checksdb.Check, env *provider.T // If the Pod Volume is not tied back to a PVC and corresponding PV that has a reclaim policy of DELETE. if !volumes.IsPodVolumeReclaimPolicyDelete(&put.Spec.Volumes[pvIndex], env.PersistentVolumes, env.PersistentVolumeClaims) { - tnf.ClaimFilePrintf("%s contains volume: %s has been found without a reclaim policy of DELETE.", put.String(), &put.Spec.Volumes[pvIndex].Name) + check.LogDebug("%s contains volume: %s has been found without a reclaim policy of DELETE.", put.String(), put.Spec.Volumes[pvIndex].Name) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "Pod contains volume without a reclaim policy of DELETE", false). AddField(testhelper.PersistentVolumeName, put.Spec.Volumes[pvIndex].Name). AddField(testhelper.PersistentVolumeClaimName, put.Spec.Volumes[pvIndex].PersistentVolumeClaim.ClaimName)) @@ -725,7 +725,7 @@ func testPodPersistentVolumeReclaimPolicy(check *checksdb.Check, env *provider.T } func testCPUIsolation(check *checksdb.Check, env *provider.TestEnvironment) { - tnf.Logf(logrus.InfoLevel, "Testing pods for CPU isolation requirements") + check.LogInfo("Testing pods for CPU isolation requirements") // Individual requirements we are looking for: // - CPU Requests and Limits must be in the form of whole units @@ -740,7 +740,7 @@ func testCPUIsolation(check *checksdb.Check, env *provider.TestEnvironment) { for _, put := range env.GetGuaranteedPodsWithExclusiveCPUs() { if !put.IsCPUIsolationCompliant() { - tnf.ClaimFilePrintf("%s is not CPU isolated", put.String()) + check.LogDebug("%s is not CPU isolated", put.String()) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "Pod is not CPU isolated", false)) } else { compliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "Pod is CPU isolated", true)) @@ -751,7 +751,7 @@ func testCPUIsolation(check *checksdb.Check, env *provider.TestEnvironment) { } func testAffinityRequiredPods(check *checksdb.Check, env *provider.TestEnvironment) { - tnf.Logf(logrus.InfoLevel, "Testing affinity required pods for ") + check.LogInfo("Testing affinity required pods for ") var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -759,7 +759,7 @@ func testAffinityRequiredPods(check *checksdb.Check, env *provider.TestEnvironme // Check if the pod is Affinity compliant. result, err := put.IsAffinityCompliant() if !result { - tnf.ClaimFilePrintf(err.Error()) + check.LogDebug(err.Error()) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "Pod is not Affinity compliant", false)) } else { compliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "Pod is Affinity compliant", true)) @@ -778,7 +778,7 @@ func testPodTolerationBypass(check *checksdb.Check, env *provider.TestEnvironmen // Check if the tolerations fall outside the 'default' and are modified versions // Take also into account the qosClass applied to the pod if tolerations.IsTolerationModified(t, put.Status.QOSClass) { - tnf.ClaimFilePrintf("%s has been found with non-default toleration %s/%s which is not allowed.", put.String(), t.Key, t.Effect) + check.LogDebug("%s has been found with non-default toleration %s/%s which is not allowed.", put.String(), t.Key, t.Effect) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "Pod has non-default toleration", false). AddField(testhelper.TolerationKey, t.Key). AddField(testhelper.TolerationEffect, string(t.Effect))) @@ -819,7 +819,7 @@ func testStorageProvisioner(check *checksdb.Check, env *provider.TestEnvironment for j := range StorageClasses { if Pvc[i].Spec.StorageClassName != nil && StorageClasses[j].Name == *Pvc[i].Spec.StorageClassName { usesPvcAndStorageClass = true - tnf.ClaimFilePrintf("%s pvc_name: %s, storageclass_name: %s, provisioner_name: %s", put.String(), put.Spec.Volumes[pvIndex].PersistentVolumeClaim.ClaimName, + check.LogDebug("%s pvc_name: %s, storageclass_name: %s, provisioner_name: %s", put.String(), put.Spec.Volumes[pvIndex].PersistentVolumeClaim.ClaimName, StorageClasses[j].Name, StorageClasses[j].Provisioner) if env.IsSNO() { diff --git a/cnf-certification-test/manageability/suite.go b/cnf-certification-test/manageability/suite.go index 0b3bf73e1..e1eaa8692 100644 --- a/cnf-certification-test/manageability/suite.go +++ b/cnf-certification-test/manageability/suite.go @@ -19,27 +19,26 @@ package manageability import ( "strings" - "github.com/sirupsen/logrus" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/common" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/identifiers" + "github.com/test-network-function/cnf-certification-test/internal/log" "github.com/test-network-function/cnf-certification-test/pkg/checksdb" "github.com/test-network-function/cnf-certification-test/pkg/provider" "github.com/test-network-function/cnf-certification-test/pkg/testhelper" - "github.com/test-network-function/cnf-certification-test/pkg/tnf" ) var ( env provider.TestEnvironment beforeEachFn = func(check *checksdb.Check) error { - logrus.Infof("Check %s: getting test environment.", check.ID) + check.LogInfo("Check %s: getting test environment.", check.ID) env = provider.GetTestEnvironment() return nil } skipIfNoContainersFn = func() (bool, string) { if len(env.Containers) == 0 { - logrus.Warnf("No containers to check...") + log.Warn("No containers to check...") return true, "There are no containers to check. Please check under test labels." } return false, "" @@ -47,7 +46,7 @@ var ( ) func LoadChecks() { - logrus.Debugf("Entering %s suite", common.ManageabilityTestKey) + log.Debug("Entering %s suite", common.ManageabilityTestKey) checksGroup := checksdb.NewChecksGroup(common.ManageabilityTestKey). WithBeforeEachFn(beforeEachFn) @@ -73,10 +72,10 @@ func testContainersImageTag(check *checksdb.Check, env *provider.TestEnvironment var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject for _, cut := range env.Containers { - logrus.Debugln("check container ", cut.String(), " image should be tagged ") + check.LogDebug("check that %s image is tagged", cut) if cut.IsTagEmpty() { nonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, "Container is missing image tag(s)", false)) - tnf.ClaimFilePrintf("Container %s is missing image tag(s)", cut.String()) + check.LogDebug("Container %s is missing image tag(s)", cut.String()) } else { compliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, "Container is tagged", true)) } @@ -102,7 +101,7 @@ func testContainerPortNameFormat(check *checksdb.Check, env *provider.TestEnviro for _, cut := range env.Containers { for _, port := range cut.Ports { if !containerPortNameFormatCheck(port.Name) { - tnf.ClaimFilePrintf("%s: ContainerPort %s does not follow the partner naming conventions", cut, port.Name) + check.LogDebug("%s: ContainerPort %s does not follow the partner naming conventions", cut, port.Name) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, "ContainerPort does not follow the partner naming conventions", false). AddField(testhelper.ContainerPort, port.Name)) } else { diff --git a/cnf-certification-test/networking/icmp/icmp.go b/cnf-certification-test/networking/icmp/icmp.go index 6861b6f38..4eff39c98 100644 --- a/cnf-certification-test/networking/icmp/icmp.go +++ b/cnf-certification-test/networking/icmp/icmp.go @@ -21,9 +21,9 @@ import ( "regexp" "strconv" - "github.com/sirupsen/logrus" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/networking/netcommons" "github.com/test-network-function/cnf-certification-test/internal/crclient" + "github.com/test-network-function/cnf-certification-test/internal/log" "github.com/test-network-function/cnf-certification-test/pkg/loghelper" "github.com/test-network-function/cnf-certification-test/pkg/provider" "github.com/test-network-function/cnf-certification-test/pkg/testhelper" @@ -86,7 +86,7 @@ func ProcessContainerIpsPerNet(containerID *provider.Container, ipAddressesFiltered := netcommons.FilterIPListByIPVersion(ipAddresses, aIPVersion) if len(ipAddressesFiltered) == 0 { // if no multus addresses found, skip this container - logrus.Debugf("Skipping %s, Network %s because no multus IPs are present", containerID, netKey) + log.Debug("Skipping %s, Network %s because no multus IPs are present", containerID, netKey) return } // Create an entry at "key" if it is not present @@ -98,7 +98,7 @@ func ProcessContainerIpsPerNet(containerID *provider.Container, // Then modify the copy firstIPIndex := 0 if entry.TesterSource.ContainerIdentifier == nil { - logrus.Debugf("%s selected to initiate ping tests", containerID) + log.Debug("%s selected to initiate ping tests", containerID) entry.TesterSource.ContainerIdentifier = containerID // if multiple interfaces are present for this network on this container/pod, pick the first one as the tester source ip entry.TesterSource.IP = ipAddressesFiltered[firstIPIndex] @@ -123,10 +123,10 @@ func RunNetworkingTests( //nolint:funlen netsUnderTest map[string]netcommons.NetTestContext, count int, aIPVersion netcommons.IPVersion) (report testhelper.FailureReasonOut, claimsLog loghelper.CuratedLogLines, skip bool) { - logrus.Debugf("%s", netcommons.PrintNetTestContextMap(netsUnderTest)) + log.Debug("%s", netcommons.PrintNetTestContextMap(netsUnderTest)) skip = false if len(netsUnderTest) == 0 { - logrus.Debugf("There are no %s networks to test, skipping test", aIPVersion) + log.Debug("There are no %s networks to test, skipping test", aIPVersion) skip = true return report, claimsLog, skip } @@ -139,25 +139,25 @@ func RunNetworkingTests( //nolint:funlen compliantNets[netName] = 0 nonCompliantNets[netName] = 0 if len(netUnderTest.DestTargets) == 0 { - logrus.Debugf("There are no containers to ping for %s network %s. A minimum of 2 containers is needed to run a ping test (a source and a destination) Skipping test", aIPVersion, netName) + log.Debug("There are no containers to ping for %s network %s. A minimum of 2 containers is needed to run a ping test (a source and a destination) Skipping test", aIPVersion, netName) continue } atLeastOneNetworkTested = true - logrus.Debugf("%s Ping tests on network %s. Number of target IPs: %d", aIPVersion, netName, len(netUnderTest.DestTargets)) + log.Debug("%s Ping tests on network %s. Number of target IPs: %d", aIPVersion, netName, len(netUnderTest.DestTargets)) for _, aDestIP := range netUnderTest.DestTargets { - logrus.Debugf("%s ping test on network %s from ( %s srcip: %s ) to ( %s dstip: %s )", + log.Debug("%s ping test on network %s from ( %s srcip: %s ) to ( %s dstip: %s )", aIPVersion, netName, netUnderTest.TesterSource.ContainerIdentifier, netUnderTest.TesterSource.IP, aDestIP.ContainerIdentifier, aDestIP.IP) result, err := TestPing(netUnderTest.TesterSource.ContainerIdentifier, aDestIP, count) - logrus.Debugf("Ping results: %s", result.String()) + log.Debug("Ping results: %s", result.String()) claimsLog.AddLogLine("%s ping test on network %s from ( %s srcip: %s ) to ( %s dstip: %s ) result: %s", aIPVersion, netName, netUnderTest.TesterSource.ContainerIdentifier, netUnderTest.TesterSource.IP, aDestIP.ContainerIdentifier, aDestIP.IP, result.String()) if err != nil { - logrus.Debugf("Ping failed with err:%s", err) + log.Debug("Ping failed with err:%s", err) } if result.outcome != testhelper.SUCCESS { nonCompliantNets[netName]++ @@ -197,7 +197,7 @@ func RunNetworkingTests( //nolint:funlen } } if !atLeastOneNetworkTested { - logrus.Debugf("There are no %s networks to test, skipping test", aIPVersion) + log.Debug("There are no %s networks to test, skipping test", aIPVersion) skip = true } diff --git a/cnf-certification-test/networking/netcommons/netcommons.go b/cnf-certification-test/networking/netcommons/netcommons.go index 172d8e6b5..99c837bd5 100644 --- a/cnf-certification-test/networking/netcommons/netcommons.go +++ b/cnf-certification-test/networking/netcommons/netcommons.go @@ -23,9 +23,9 @@ import ( "strings" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/networking/netutil" + "github.com/test-network-function/cnf-certification-test/internal/log" "github.com/test-network-function/cnf-certification-test/pkg/provider" "github.com/test-network-function/cnf-certification-test/pkg/testhelper" - "github.com/test-network-function/cnf-certification-test/pkg/tnf" corev1 "k8s.io/api/core/v1" ) @@ -153,7 +153,7 @@ func FindRogueContainersDeclaringPorts(containers []*provider.Container, portsTo for _, cut := range containers { for _, port := range cut.Ports { if portsToTest[port.ContainerPort] { - tnf.ClaimFilePrintf("%s has declared a port (%d) that has been reserved", cut, port.ContainerPort) + log.Debug("%s has declared a port (%d) that has been reserved", cut, port.ContainerPort) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, fmt.Sprintf("Container declares %s reserved port in %v", portsOrigin, portsToTest), false). @@ -196,7 +196,7 @@ func FindRoguePodsListeningToPorts(pods []*provider.Pod, portsToTest map[int32]b cut := put.Containers[0] listeningPorts, err := netutil.GetListeningPorts(cut) if err != nil { - tnf.ClaimFilePrintf("Failed to get the listening ports on %s, err: %v", cut, err) + log.Debug("Failed to get the listening ports on %s, err: %v", cut, err) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(cut.Namespace, put.Name, fmt.Sprintf("Failed to get the listening ports on pod, err: %v", err), false)) @@ -207,10 +207,10 @@ func FindRoguePodsListeningToPorts(pods []*provider.Pod, portsToTest map[int32]b // If pod contains an "istio-proxy" container, we need to make sure that the ports returned // overlap with the known istio ports if put.ContainsIstioProxy() && ReservedIstioPorts[int32(port.PortNumber)] { - tnf.ClaimFilePrintf("%s was found to be listening to port %d due to istio-proxy being present. Ignoring.", put, port.PortNumber) + log.Debug("%s was found to be listening to port %d due to istio-proxy being present. Ignoring.", put, port.PortNumber) continue } - tnf.ClaimFilePrintf("%s has one container (%s) listening on port %d that has been reserved", put, cut.Name, port.PortNumber) + log.Debug("%s has one container (%s) listening on port %d that has been reserved", put, cut.Name, port.PortNumber) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(cut.Namespace, put.Name, fmt.Sprintf("Pod Listens to %s reserved port in %v", portsOrigin, portsToTest), false). diff --git a/cnf-certification-test/networking/policies/policies.go b/cnf-certification-test/networking/policies/policies.go index 0514c2a33..0d504583b 100644 --- a/cnf-certification-test/networking/policies/policies.go +++ b/cnf-certification-test/networking/policies/policies.go @@ -17,7 +17,7 @@ package policies import ( - "github.com/sirupsen/logrus" + "github.com/test-network-function/cnf-certification-test/internal/log" networkingv1 "k8s.io/api/networking/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -29,21 +29,21 @@ func IsNetworkPolicyCompliant(np *networkingv1.NetworkPolicy, policyType network // https://kubernetes.io/docs/concepts/services-networking/network-policies/ if len(np.Spec.PolicyTypes) == 0 { - logrus.Debugf("%s: policy types found empty", np.Name) + log.Debug("%s: policy types found empty", np.Name) return false } // Ingress and Egress rules should be "empty" if it is a default rule. if policyType == networkingv1.PolicyTypeEgress { if np.Spec.Egress != nil || len(np.Spec.Egress) > 0 { - logrus.Debugf("%s: egress spec found not empty", np.Name) + log.Debug("%s: egress spec found not empty", np.Name) return false } } if policyType == networkingv1.PolicyTypeIngress { if np.Spec.Ingress != nil || len(np.Spec.Ingress) > 0 { - logrus.Debugf("%s: ingress spec found not empty", np.Name) + log.Debug("%s: ingress spec found not empty", np.Name) return false } } diff --git a/cnf-certification-test/networking/services/services.go b/cnf-certification-test/networking/services/services.go index d4c923bc4..a998ece49 100644 --- a/cnf-certification-test/networking/services/services.go +++ b/cnf-certification-test/networking/services/services.go @@ -19,8 +19,8 @@ package services import ( "fmt" - "github.com/sirupsen/logrus" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/networking/netcommons" + "github.com/test-network-function/cnf-certification-test/internal/log" corev1 "k8s.io/api/core/v1" ) @@ -36,12 +36,12 @@ func GetServiceIPVersion(aService *corev1.Service) (result netcommons.IPVersion, } if *aService.Spec.IPFamilyPolicy == corev1.IPFamilyPolicySingleStack && ipver == netcommons.IPv6 { - logrus.Debugf("%s is single stack ipv6", ToString(aService)) + log.Debug("%s is single stack ipv6", ToString(aService)) return netcommons.IPv6, nil } if *aService.Spec.IPFamilyPolicy == corev1.IPFamilyPolicySingleStack && ipver == netcommons.IPv4 { - logrus.Debugf("%s is single stack ipv4", ToString(aService)) + log.Debug("%s is single stack ipv4", ToString(aService)) return netcommons.IPv4, nil } if (*aService.Spec.IPFamilyPolicy == corev1.IPFamilyPolicyPreferDualStack || @@ -57,7 +57,7 @@ func GetServiceIPVersion(aService *corev1.Service) (result netcommons.IPVersion, return result, err } if res { - logrus.Debugf("%s is dual-stack", ToString(aService)) + log.Debug("%s is dual-stack", ToString(aService)) return netcommons.IPv4v6, nil } diff --git a/cnf-certification-test/networking/suite.go b/cnf-certification-test/networking/suite.go index 1b79fa021..cc60eae71 100644 --- a/cnf-certification-test/networking/suite.go +++ b/cnf-certification-test/networking/suite.go @@ -20,7 +20,6 @@ import ( "fmt" "strconv" - "github.com/sirupsen/logrus" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/common" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/identifiers" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/networking/icmp" @@ -28,10 +27,10 @@ import ( "github.com/test-network-function/cnf-certification-test/cnf-certification-test/networking/netutil" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/networking/policies" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/networking/services" + "github.com/test-network-function/cnf-certification-test/internal/log" "github.com/test-network-function/cnf-certification-test/pkg/checksdb" "github.com/test-network-function/cnf-certification-test/pkg/provider" "github.com/test-network-function/cnf-certification-test/pkg/testhelper" - "github.com/test-network-function/cnf-certification-test/pkg/tnf" networkingv1 "k8s.io/api/networking/v1" ) @@ -50,7 +49,7 @@ var ( env provider.TestEnvironment beforeEachFn = func(check *checksdb.Check) error { - logrus.Infof("Check %s: getting test environment.", check.ID) + check.LogInfo("Check %s: getting test environment.", check.ID) env = provider.GetTestEnvironment() return nil } @@ -58,7 +57,7 @@ var ( //nolint:funlen func LoadChecks() { - logrus.Debugf("Entering %s suite", common.NetworkingTestKey) + log.Debug("Entering %s suite", common.NetworkingTestKey) checksGroup := checksdb.NewChecksGroup(common.NetworkingTestKey). WithBeforeEachFn(beforeEachFn) @@ -169,7 +168,7 @@ func LoadChecks() { } func testExecProbDenyAtCPUPinning(check *checksdb.Check, dpdkPods []*provider.Pod) { - tnf.Logf(logrus.InfoLevel, "Check if exec probe is happening") + check.LogInfo("Check if exec probe is happening") var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -209,13 +208,13 @@ func testUndeclaredContainerPortsUsage(check *checksdb.Check, env *provider.Test firstPodContainer := put.Containers[0] listeningPorts, err := netutil.GetListeningPorts(firstPodContainer) if err != nil { - tnf.ClaimFilePrintf("Failed to get the container's listening ports, err: %v", err) + check.LogDebug("Failed to get the container's listening ports, err: %v", err) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, fmt.Sprintf("Failed to get the container's listening ports, err: %v", err), false)) continue } if len(listeningPorts) == 0 { - tnf.ClaimFilePrintf("None of the containers of %s have any listening port.", put) + check.LogDebug("None of the containers of %s have any listening port.", put) continue } @@ -223,13 +222,13 @@ func testUndeclaredContainerPortsUsage(check *checksdb.Check, env *provider.Test failedPod := false for listeningPort := range listeningPorts { if put.ContainsIstioProxy() && netcommons.ReservedIstioPorts[int32(listeningPort.PortNumber)] { - tnf.ClaimFilePrintf("%s is listening on port %d protocol %s, but the pod also contains istio-proxy. Ignoring.", + check.LogDebug("%s is listening on port %d protocol %s, but the pod also contains istio-proxy. Ignoring.", put, listeningPort.PortNumber, listeningPort.Protocol) continue } if ok := declaredPorts[listeningPort]; !ok { - tnf.ClaimFilePrintf("%s is listening on port %d protocol %s, but that port was not declared in any container spec.", + check.LogDebug("%s is listening on port %d protocol %s, but that port was not declared in any container spec.", put, listeningPort.PortNumber, listeningPort.Protocol) failedPod = true nonCompliantObjects = append(nonCompliantObjects, @@ -262,12 +261,12 @@ func testUndeclaredContainerPortsUsage(check *checksdb.Check, env *provider.Test func testNetworkConnectivity(env *provider.TestEnvironment, aIPVersion netcommons.IPVersion, aType netcommons.IFType, check *checksdb.Check) { netsUnderTest, claimsLog := icmp.BuildNetTestContext(env.Pods, aIPVersion, aType) // Saving curated logs to claims file - tnf.ClaimFilePrintf("%s", claimsLog.GetLogLines()) + check.LogDebug("%s", claimsLog.GetLogLines()) report, claimsLog, skip := icmp.RunNetworkingTests(netsUnderTest, defaultNumPings, aIPVersion) // Saving curated logs to claims file - tnf.ClaimFilePrintf("%s", claimsLog.GetLogLines()) + check.LogDebug("%s", claimsLog.GetLogLines()) if skip { - tnf.Logf(logrus.InfoLevel, "There are no %s networks to test with at least 2 pods, skipping test", aIPVersion) + check.LogInfo("There are no %s networks to test with at least 2 pods, skipping test", aIPVersion) return } check.SetResult(report.CompliantObjectsOut, report.NonCompliantObjectsOut) @@ -302,11 +301,11 @@ func testPartnerSpecificTCPPorts(check *checksdb.Check, env *provider.TestEnviro func testDualStackServices(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject - tnf.Logf(logrus.InfoLevel, "Testing services (should be either single stack ipv6 or dual-stack)") + check.LogInfo("Testing services (should be either single stack ipv6 or dual-stack)") for _, s := range env.Services { serviceIPVersion, err := services.GetServiceIPVersion(s) if err != nil { - tnf.ClaimFilePrintf("%s", err) + check.LogDebug("%s", err) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewReportObject("Could not get IP Version from service", testhelper.ServiceType, false). AddField(testhelper.Namespace, s.Namespace). AddField(testhelper.ServiceName, s.Name)) @@ -328,7 +327,7 @@ func testDualStackServices(check *checksdb.Check, env *provider.TestEnvironment) } func testNetworkPolicyDenyAll(check *checksdb.Check, env *provider.TestEnvironment) { - tnf.Logf(logrus.InfoLevel, "Test for Deny All in network policies") + check.LogInfo("Test for Deny All in network policies") var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -341,7 +340,7 @@ func testNetworkPolicyDenyAll(check *checksdb.Check, env *provider.TestEnvironme // Look through all of the network policies for a matching namespace. for index := range env.NetworkPolicies { - logrus.Debugf("Testing network policy %s against pod %s", env.NetworkPolicies[index].Name, put.String()) + check.LogDebug("Testing network policy %s against pod %s", env.NetworkPolicies[index].Name, put.String()) // Skip any network policies that don't match the namespace of the pod we are testing. if env.NetworkPolicies[index].Namespace != put.Namespace { @@ -362,13 +361,13 @@ func testNetworkPolicyDenyAll(check *checksdb.Check, env *provider.TestEnvironme // Network policy has not been found that contains a deny-all rule for both ingress and egress. podIsCompliant := true if !denyAllIngressFound { - tnf.ClaimFilePrintf("%s was found to not have a default ingress deny-all network policy.", put.Name) + check.LogDebug("%s was found to not have a default ingress deny-all network policy.", put.Name) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "Pod was found to not have a default ingress deny-all network policy", false)) podIsCompliant = false } if !denyAllEgressFound { - tnf.ClaimFilePrintf("%s was found to not have a default egress deny-all network policy.", put.Name) + check.LogDebug("%s was found to not have a default egress deny-all network policy.", put.Name) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "Pod was found to not have a default egress deny-all network policy", false)) podIsCompliant = false } @@ -389,17 +388,17 @@ func testRestartOnRebootLabelOnPodsUsingSriov(check *checksdb.Check, sriovPods [ var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject for _, pod := range sriovPods { - logrus.Debugf("Pod %s uses SRIOV network/s. Checking label %s existence & value.", pod, restartOnRebootLabel) + check.LogDebug("Pod %s uses SRIOV network/s. Checking label %s existence & value.", pod, restartOnRebootLabel) labelValue, exist := pod.GetLabels()[restartOnRebootLabel] if !exist { - tnf.ClaimFilePrintf("Pod %s is using SRIOV but the label %s was not found.", pod, restartOnRebootLabel) + check.LogDebug("Pod %s is using SRIOV but the label %s was not found.", pod, restartOnRebootLabel) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, fmt.Sprintf("Pod uses SRIOV but the label %s was not found", restartOnRebootLabel), false)) continue } if labelValue != "true" { - tnf.ClaimFilePrintf("Pod %s is using SRIOV but the %s label value is not true.", pod, restartOnRebootLabel) + check.LogDebug("Pod %s is using SRIOV but the %s label value is not true.", pod, restartOnRebootLabel) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, fmt.Sprintf("Pod uses SRIOV but the label %s is not set to true", restartOnRebootLabel), false)) continue } diff --git a/cnf-certification-test/observability/suite.go b/cnf-certification-test/observability/suite.go index f4728bd39..6803a6064 100644 --- a/cnf-certification-test/observability/suite.go +++ b/cnf-certification-test/observability/suite.go @@ -22,7 +22,6 @@ import ( "fmt" "io" - "github.com/sirupsen/logrus" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/common" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/identifiers" pdbv1 "github.com/test-network-function/cnf-certification-test/cnf-certification-test/observability/pdb" @@ -32,7 +31,6 @@ import ( "github.com/test-network-function/cnf-certification-test/pkg/checksdb" "github.com/test-network-function/cnf-certification-test/pkg/provider" "github.com/test-network-function/cnf-certification-test/pkg/testhelper" - "github.com/test-network-function/cnf-certification-test/pkg/tnf" corev1 "k8s.io/api/core/v1" ) @@ -40,14 +38,13 @@ var ( env provider.TestEnvironment beforeEachFn = func(check *checksdb.Check) error { - logrus.Infof("Check %s: getting test environment.", check.ID) + log.Info("Check %s: getting test environment.", check.ID) env = provider.GetTestEnvironment() return nil } ) func LoadChecks() { - logrus.Debugf("Entering %s suite", common.ObservabilityTestKey) log.Debug("Loading %s suite checks", common.ObservabilityTestKey) checksGroup := checksdb.NewChecksGroup(common.ObservabilityTestKey). @@ -130,20 +127,17 @@ func testContainersLogging(check *checksdb.Check, env *provider.TestEnvironment) var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject for _, cut := range env.Containers { - logrus.Info(fmt.Sprintf("Checking %s has some logging output", cut)) - check.LogInfo("Container %s has some logging output", cut) + check.LogInfo("Checking if container %s has some logging output", cut) hasLoggingOutput, err := containerHasLoggingOutput(cut) if err != nil { - tnf.Logf(logrus.ErrorLevel, "Failed to get %s log output: %s", cut, err) - check.LogError("Failed to get container %s log output, err: %v", cut, err) + check.LogError("Failed to get %s log output, err: %v", cut, err) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, "Could not get log output", false)) continue } if !hasLoggingOutput { - tnf.Logf(logrus.ErrorLevel, "%s does not have any line of log to stderr/stdout", cut) - check.LogError("Container %s does not have any line of log to stderr/stdout", cut) + check.LogError("%s does not have any line of log to stderr/stdout", cut) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, "No log line to stderr/stdout found", false)) } else { @@ -160,10 +154,10 @@ func testCrds(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject for _, crd := range env.Crds { - logrus.Info("Testing CRD " + crd.Name) + check.LogInfo("Testing CRD " + crd.Name) for _, ver := range crd.Spec.Versions { if _, ok := ver.Schema.OpenAPIV3Schema.Properties["status"]; !ok { - tnf.Logf(logrus.ErrorLevel, "FAILURE: CRD %s, version: %s does not have a status subresource.", crd.Name, ver.Name) + check.LogError("FAILURE: CRD %s, version: %s does not have a status subresource.", crd.Name, ver.Name) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewReportObject("Crd does not have a status sub resource set", testhelper.CustomResourceDefinitionType, false). AddField(testhelper.CustomResourceDefinitionName, crd.Name). @@ -186,9 +180,9 @@ func testTerminationMessagePolicy(check *checksdb.Check, env *provider.TestEnvir var nonCompliantObjects []*testhelper.ReportObject for _, cut := range env.Containers { - logrus.Info("Testing for terminationMessagePolicy: " + cut.String()) + check.LogInfo("Testing for terminationMessagePolicy: " + cut.String()) if cut.TerminationMessagePolicy != corev1.TerminationMessageFallbackToLogsOnError { - tnf.ClaimFilePrintf("FAILURE: %s does not have a TerminationMessagePolicy: FallbackToLogsOnError", cut) + check.LogDebug("FAILURE: %s does not have a TerminationMessagePolicy: FallbackToLogsOnError", cut) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, "TerminationMessagePolicy is not FallbackToLogsOnError", false)) } else { @@ -217,9 +211,9 @@ func testPodDisruptionBudgets(check *checksdb.Check, env *provider.TestEnvironme AddField(testhelper.DeploymentName, d.Name). AddField(testhelper.Namespace, d.Namespace). AddField(testhelper.PodDisruptionBudgetReference, env.PodDisruptionBudgets[pdbIndex].Name)) - tnf.ClaimFilePrintf("PDB %s is not valid for Deployment %s, err: %v", env.PodDisruptionBudgets[pdbIndex].Name, d.Name, err) + check.LogDebug("PDB %s is not valid for Deployment %s, err: %v", env.PodDisruptionBudgets[pdbIndex].Name, d.Name, err) } else { - logrus.Infof("PDB %s is valid for Deployment: %s", env.PodDisruptionBudgets[pdbIndex].Name, d.Name) + check.LogInfo("PDB %s is valid for Deployment: %s", env.PodDisruptionBudgets[pdbIndex].Name, d.Name) compliantObjects = append(compliantObjects, testhelper.NewReportObject("Deployment: references PodDisruptionBudget", testhelper.DeploymentType, true). AddField(testhelper.DeploymentName, d.Name). AddField(testhelper.Namespace, d.Namespace). @@ -246,9 +240,9 @@ func testPodDisruptionBudgets(check *checksdb.Check, env *provider.TestEnvironme AddField(testhelper.StatefulSetName, s.Name). AddField(testhelper.Namespace, s.Namespace). AddField(testhelper.PodDisruptionBudgetReference, env.PodDisruptionBudgets[pdbIndex].Name)) - tnf.ClaimFilePrintf("PDB %s is not valid for StatefulSet %s, err: %v", env.PodDisruptionBudgets[pdbIndex].Name, s.Name, err) + check.LogDebug("PDB %s is not valid for StatefulSet %s, err: %v", env.PodDisruptionBudgets[pdbIndex].Name, s.Name, err) } else { - logrus.Infof("PDB %s is valid for StatefulSet: %s", env.PodDisruptionBudgets[pdbIndex].Name, s.Name) + check.LogInfo("PDB %s is valid for StatefulSet: %s", env.PodDisruptionBudgets[pdbIndex].Name, s.Name) compliantObjects = append(compliantObjects, testhelper.NewReportObject("StatefulSet: references PodDisruptionBudget", testhelper.StatefulSetType, true). AddField(testhelper.StatefulSetName, s.Name). AddField(testhelper.Namespace, s.Namespace). diff --git a/cnf-certification-test/operator/phasecheck/phasecheck.go b/cnf-certification-test/operator/phasecheck/phasecheck.go index 6a8a5f85e..88aab9c52 100644 --- a/cnf-certification-test/operator/phasecheck/phasecheck.go +++ b/cnf-certification-test/operator/phasecheck/phasecheck.go @@ -21,10 +21,9 @@ import ( "time" "github.com/operator-framework/api/pkg/operators/v1alpha1" - "github.com/sirupsen/logrus" "github.com/test-network-function/cnf-certification-test/internal/clientsholder" + "github.com/test-network-function/cnf-certification-test/internal/log" "github.com/test-network-function/cnf-certification-test/pkg/provider" - "github.com/test-network-function/cnf-certification-test/pkg/tnf" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -37,22 +36,22 @@ func WaitOperatorReady(csv *v1alpha1.ClusterServiceVersion) bool { start := time.Now() for time.Since(start) < timeout { if isOperatorPhaseSucceeded(csv) { - tnf.ClaimFilePrintf("%s is ready", provider.CsvToString(csv)) + log.Debug("%s is ready", provider.CsvToString(csv)) return true } else if isOperatorPhaseFailedOrUnknown(csv) { - tnf.ClaimFilePrintf("%s failed to be ready, status=%s", provider.CsvToString(csv), csv.Status.Phase) + log.Debug("%s failed to be ready, status=%s", provider.CsvToString(csv), csv.Status.Phase) return false } // Operator is not ready, but we need to take into account that its pods // could have been deleted by some of the lifecycle test cases, so they // could be restarting. Let's give it some time before declaring it failed. - tnf.ClaimFilePrintf("Waiting for %s to be in Succeeded phase: %s", provider.CsvToString(csv), csv.Status.Phase) + log.Debug("Waiting for %s to be in Succeeded phase: %s", provider.CsvToString(csv), csv.Status.Phase) time.Sleep(time.Second) freshCsv, err := oc.OlmClient.OperatorsV1alpha1().ClusterServiceVersions(csv.Namespace).Get(context.TODO(), csv.Name, metav1.GetOptions{}) if err != nil { - tnf.Logf(logrus.ErrorLevel, "could not get csv %s, err: %v", provider.CsvToString(freshCsv), err) + log.Error("could not get csv %s, err: %v", provider.CsvToString(freshCsv), err) return false } @@ -60,19 +59,19 @@ func WaitOperatorReady(csv *v1alpha1.ClusterServiceVersion) bool { *csv = *freshCsv } if time.Since(start) > timeout { - tnf.Logf(logrus.ErrorLevel, "timeout waiting for csv %s to be ready", provider.CsvToString(csv)) + log.Error("timeout waiting for csv %s to be ready", provider.CsvToString(csv)) } return false } func isOperatorPhaseSucceeded(csv *v1alpha1.ClusterServiceVersion) bool { - logrus.Tracef("Checking succeeded status phase for csv %s (ns %s). Phase: %v", csv.Name, csv.Namespace, csv.Status.Phase) + log.Debug("Checking succeeded status phase for csv %s (ns %s). Phase: %v", csv.Name, csv.Namespace, csv.Status.Phase) return csv.Status.Phase == v1alpha1.CSVPhaseSucceeded } func isOperatorPhaseFailedOrUnknown(csv *v1alpha1.ClusterServiceVersion) bool { - logrus.Tracef("Checking failed status phase for csv %s (ns %s). Phase: %v", csv.Name, csv.Namespace, csv.Status.Phase) + log.Debug("Checking failed status phase for csv %s (ns %s). Phase: %v", csv.Name, csv.Namespace, csv.Status.Phase) return csv.Status.Phase == v1alpha1.CSVPhaseFailed || csv.Status.Phase == v1alpha1.CSVPhaseUnknown } diff --git a/cnf-certification-test/operator/suite.go b/cnf-certification-test/operator/suite.go index 09cd5b4c5..74926c982 100644 --- a/cnf-certification-test/operator/suite.go +++ b/cnf-certification-test/operator/suite.go @@ -19,28 +19,27 @@ package operator import ( "strings" - "github.com/sirupsen/logrus" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/common" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/identifiers" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/operator/phasecheck" + "github.com/test-network-function/cnf-certification-test/internal/log" "github.com/test-network-function/cnf-certification-test/pkg/checksdb" "github.com/test-network-function/cnf-certification-test/pkg/provider" "github.com/test-network-function/cnf-certification-test/pkg/testhelper" - "github.com/test-network-function/cnf-certification-test/pkg/tnf" ) var ( env provider.TestEnvironment beforeEachFn = func(check *checksdb.Check) error { - logrus.Infof("Check %s: getting test environment.", check.ID) + check.LogInfo("Check %s: getting test environment.", check.ID) env = provider.GetTestEnvironment() return nil } ) func LoadChecks() { - logrus.Debugf("Entering %s suite", common.OperatorTestKey) + log.Debug("Entering %s suite", common.OperatorTestKey) checksGroup := checksdb.NewChecksGroup(common.OperatorTestKey). WithBeforeEachFn(beforeEachFn) @@ -100,7 +99,7 @@ func testOperatorInstallationWithoutPrivileges(check *checksdb.Check, env *provi csv := env.Operators[i].Csv clusterPermissions := csv.Spec.InstallStrategy.StrategySpec.ClusterPermissions if len(clusterPermissions) == 0 { - logrus.Debugf("No clusterPermissions found in %s", env.Operators[i]) + check.LogDebug("No clusterPermissions found in %s", env.Operators[i]) compliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(env.Operators[i].Namespace, env.Operators[i].Name, "Operator has no privileges on cluster resources", true)) continue } @@ -111,7 +110,7 @@ func testOperatorInstallationWithoutPrivileges(check *checksdb.Check, env *provi permission := &clusterPermissions[permissionIndex] for ruleIndex := range permission.Rules { if n := len(permission.Rules[ruleIndex].ResourceNames); n > 0 { - tnf.ClaimFilePrintf("%s: cluster permission (service account %s) has %d resource names (rule index %d).", + check.LogDebug("%s: cluster permission (service account %s) has %d resource names (rule index %d).", env.Operators[i], permission.ServiceAccountName, n, ruleIndex) // Keep reviewing other permissions' rules so we can log all the failing ones in the claim file. badRuleFound = true @@ -141,7 +140,7 @@ func testOperatorOlmSubscription(check *checksdb.Check, env *provider.TestEnviro for i := range env.Operators { operator := env.Operators[i] if operator.SubscriptionName == "" { - tnf.ClaimFilePrintf("OLM subscription not found for operator from csv %s", provider.CsvToString(operator.Csv)) + check.LogDebug("OLM subscription not found for operator from csv %s", provider.CsvToString(operator.Csv)) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(env.Operators[i].Namespace, env.Operators[i].Name, "OLM subscription not found for operator, so it is not installed via OLM", false). AddField(testhelper.SubscriptionName, operator.SubscriptionName)) } else { diff --git a/cnf-certification-test/performance/suite.go b/cnf-certification-test/performance/suite.go index 0ca440412..067c89da9 100644 --- a/cnf-certification-test/performance/suite.go +++ b/cnf-certification-test/performance/suite.go @@ -22,17 +22,15 @@ import ( "strconv" "strings" - "github.com/sirupsen/logrus" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/accesscontrol/resources" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/common" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/identifiers" "github.com/test-network-function/cnf-certification-test/internal/crclient" + "github.com/test-network-function/cnf-certification-test/internal/log" "github.com/test-network-function/cnf-certification-test/pkg/checksdb" "github.com/test-network-function/cnf-certification-test/pkg/provider" "github.com/test-network-function/cnf-certification-test/pkg/scheduling" "github.com/test-network-function/cnf-certification-test/pkg/testhelper" - "github.com/test-network-function/cnf-certification-test/pkg/tnf" - v1 "k8s.io/api/core/v1" ) const ( @@ -44,7 +42,7 @@ var ( env provider.TestEnvironment beforeEachFn = func(check *checksdb.Check) error { - logrus.Infof("Check %s: getting test environment.", check.ID) + check.LogInfo("Check %s: getting test environment.", check.ID) env = provider.GetTestEnvironment() return nil } @@ -84,7 +82,7 @@ var ( //nolint:funlen func LoadChecks() { - logrus.Debugf("Entering %s suite", common.PerformanceTestKey) + log.Debug("Loading %s checks", common.PerformanceTestKey) checksGroup := checksdb.NewChecksGroup(common.PerformanceTestKey). WithBeforeEachFn(beforeEachFn) @@ -150,17 +148,7 @@ func LoadChecks() { checksGroup.Add(check) } -func CheckProbePeriodSeconds(elem *v1.Probe, cut *provider.Container, s string) bool { - if elem.PeriodSeconds > minExecProbePeriodSeconds { - tnf.ClaimFilePrintf("Container %s is using exec probes, PeriodSeconds of %s: %s", cut, s, - elem.PeriodSeconds) - return true - } - tnf.ClaimFilePrintf("Container %s is not using of exec probes, PeriodSeconds of %s: %s", cut, s, - elem.PeriodSeconds) - return false -} - +//nolint:funlen func testLimitedUseOfExecProbes(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -169,11 +157,17 @@ func testLimitedUseOfExecProbes(check *checksdb.Check, env *provider.TestEnviron for _, cut := range put.Containers { if cut.LivenessProbe != nil && cut.LivenessProbe.Exec != nil { counter++ - if CheckProbePeriodSeconds(cut.LivenessProbe, cut, "LivenessProbe") { + if cut.LivenessProbe.PeriodSeconds > minExecProbePeriodSeconds { + check.LogInfo("Container %s has a LivenessProbe with PeriodSeconds greater than %d (%d seconds)", + cut, minExecProbePeriodSeconds, cut.LivenessProbe.PeriodSeconds) + compliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(put.Namespace, put.Name, cut.Name, fmt.Sprintf("LivenessProbe exec probe has a PeriodSeconds greater than 10 (%d seconds)", cut.LivenessProbe.PeriodSeconds), true)) } else { + check.LogError("Container %s has a LivenessProbe with PeriodSeconds less than %d (%d seconds)", + cut, minExecProbePeriodSeconds, cut.LivenessProbe.PeriodSeconds) + nonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(put.Namespace, put.Name, cut.Name, fmt.Sprintf("LivenessProbe exec probe has a PeriodSeconds that is not greater than 10 (%d seconds)", @@ -182,11 +176,17 @@ func testLimitedUseOfExecProbes(check *checksdb.Check, env *provider.TestEnviron } if cut.StartupProbe != nil && cut.StartupProbe.Exec != nil { counter++ - if CheckProbePeriodSeconds(cut.StartupProbe, cut, "StartupProbe") { + if cut.StartupProbe.PeriodSeconds > minExecProbePeriodSeconds { + check.LogInfo("Container %s has a StartupProbe with PeriodSeconds greater than %d (%d seconds)", + cut, minExecProbePeriodSeconds, cut.LivenessProbe.PeriodSeconds) + compliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(put.Namespace, put.Name, cut.Name, fmt.Sprintf("StartupProbe exec probe has a PeriodSeconds greater than 10 (%d seconds)", cut.StartupProbe.PeriodSeconds), true)) } else { + check.LogError("Container %s has a StartupProbe with PeriodSeconds less than %d (%d seconds)", + cut, minExecProbePeriodSeconds, cut.LivenessProbe.PeriodSeconds) + nonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(put.Namespace, put.Name, cut.Name, fmt.Sprintf("StartupProbe exec probe has a PeriodSeconds that is not greater than 10 (%d seconds)", @@ -195,11 +195,17 @@ func testLimitedUseOfExecProbes(check *checksdb.Check, env *provider.TestEnviron } if cut.ReadinessProbe != nil && cut.ReadinessProbe.Exec != nil { counter++ - if CheckProbePeriodSeconds(cut.ReadinessProbe, cut, "ReadinessProbe") { + if cut.ReadinessProbe.PeriodSeconds > minExecProbePeriodSeconds { + check.LogInfo("Container %s has a ReadinessProbe with PeriodSeconds greater than %d (%d seconds)", + cut, minExecProbePeriodSeconds, cut.LivenessProbe.PeriodSeconds) + compliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(put.Namespace, put.Name, cut.Name, fmt.Sprintf("ReadinessProbe exec probe has a PeriodSeconds greater than 10 (%d seconds)", cut.ReadinessProbe.PeriodSeconds), true)) } else { + check.LogError("Container %s has a ReadinessProbe with PeriodSeconds less than %d (%d seconds)", + cut, minExecProbePeriodSeconds, cut.LivenessProbe.PeriodSeconds) + nonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(put.Namespace, put.Name, cut.Name, fmt.Sprintf("ReadinessProbe exec probe has a PeriodSeconds that is not greater than 10 (%d seconds)", @@ -211,12 +217,12 @@ func testLimitedUseOfExecProbes(check *checksdb.Check, env *provider.TestEnviron // If there >=10 exec probes, mark the entire cluster as a failure if counter >= maxNumberOfExecProbes { - tnf.ClaimFilePrintf(fmt.Sprintf("CNF has %d exec probes", counter)) + check.LogDebug(fmt.Sprintf("CNF has %d exec probes", counter)) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewReportObject(fmt.Sprintf("CNF has 10 or more exec probes (%d exec probes)", counter), testhelper.CnfType, false)) } else { // Compliant object compliantObjects = append(compliantObjects, testhelper.NewReportObject(fmt.Sprintf("CNF has less than 10 exec probes (%d exec probes)", counter), testhelper.CnfType, true)) - tnf.ClaimFilePrintf(fmt.Sprintf("CNF has less than %d exec probes", counter)) + check.LogDebug(fmt.Sprintf("CNF has less than %d exec probes", counter)) } check.SetResult(compliantObjects, nonCompliantObjects) @@ -241,7 +247,7 @@ func testExclusiveCPUPool(check *checksdb.Check, env *provider.TestEnvironment) exclusiveStr := strconv.Itoa(nBExclusiveCPUPoolContainers) sharedStr := strconv.Itoa(nBSharedCPUPoolContainers) - tnf.ClaimFilePrintf("Pod: %s has containers whose CPUs belong to different pools. Containers in the shared cpu pool: %d "+ + check.LogDebug("Pod: %s has containers whose CPUs belong to different pools. Containers in the shared cpu pool: %d "+ "Containers in the exclusive cpu pool: %d", put.String(), nBSharedCPUPoolContainers, nBExclusiveCPUPoolContainers) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "Pod has containers whose CPUs belong to different pools", false). AddField("SharedCPUPoolContainers", sharedStr). @@ -259,17 +265,17 @@ func testSchedulingPolicyInCPUPool(check *checksdb.Check, env *provider.TestEnvi var compliantContainersPids []*testhelper.ReportObject var nonCompliantContainersPids []*testhelper.ReportObject for _, testContainer := range podContainers { - logrus.Infof("Processing %v", testContainer) + check.LogInfo("Processing %v", testContainer) // Get the pid namespace pidNamespace, err := crclient.GetContainerPidNamespace(testContainer, env) if err != nil { - tnf.Logf(logrus.ErrorLevel, "unable to get pid namespace for container %s, err: %v", testContainer, err) + check.LogError("unable to get pid namespace for container %s, err: %v", testContainer, err) nonCompliantContainersPids = append(nonCompliantContainersPids, testhelper.NewContainerReportObject(testContainer.Namespace, testContainer.Podname, testContainer.Name, fmt.Sprintf("Internal error, err=%s", err), false)) continue } - logrus.Debugf("Obtained pidNamespace for %s is %s", testContainer, pidNamespace) + check.LogDebug("Obtained pidNamespace for %s is %s", testContainer, pidNamespace) // Get the list of process ids running in the pid namespace processes, err := crclient.GetPidsFromPidNamespace(pidNamespace, testContainer) @@ -285,7 +291,7 @@ func testSchedulingPolicyInCPUPool(check *checksdb.Check, env *provider.TestEnvi compliantContainersPids = append(compliantContainersPids, compliantPids...) nonCompliantContainersPids = append(nonCompliantContainersPids, nonCompliantPids...) - logrus.Debugf("Processed %v", testContainer) + check.LogDebug("Processed %v", testContainer) } check.SetResult(compliantContainersPids, nonCompliantContainersPids) @@ -329,7 +335,7 @@ func testRtAppsNoExecProbes(check *checksdb.Check, env *provider.TestEnvironment processes, err := crclient.GetContainerProcesses(cut, env) if err != nil { - tnf.ClaimFilePrintf("Could not determine the processes pids for container %s, err: %v", cut, err) + check.LogDebug("Could not determine the processes pids for container %s, err: %v", cut, err) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, "Could not determine the processes pids for container", false)) break } @@ -348,7 +354,7 @@ func testRtAppsNoExecProbes(check *checksdb.Check, env *provider.TestEnvironment AddField(testhelper.ProcessCommandLine, p.Args)) continue } - tnf.ClaimFilePrintf("Could not determine the scheduling policy for container %s (pid=%v), err: %v", cut, p.Pid, err) + check.LogDebug("Could not determine the scheduling policy for container %s (pid=%v), err: %v", cut, p.Pid, err) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, "Could not determine the scheduling policy for container", false). AddField(testhelper.ProcessID, strconv.Itoa(p.Pid)). AddField(testhelper.ProcessCommandLine, p.Args)) @@ -356,7 +362,7 @@ func testRtAppsNoExecProbes(check *checksdb.Check, env *provider.TestEnvironment continue } if scheduling.PolicyIsRT(schedPolicy) { - tnf.ClaimFilePrintf("Pod %s/Container %s defines exec probes while having a RT scheduling policy for pid %d", cut.Podname, cut, p.Pid) + check.LogDebug("Pod %s/Container %s defines exec probes while having a RT scheduling policy for pid %d", cut.Podname, cut, p.Pid) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, "Container defines exec probes while having a RT scheduling policy", false). AddField(testhelper.ProcessID, strconv.Itoa(p.Pid))) allProcessesCompliant = false diff --git a/cnf-certification-test/platform/bootparams/bootparams.go b/cnf-certification-test/platform/bootparams/bootparams.go index c28eb5a63..5475402b9 100644 --- a/cnf-certification-test/platform/bootparams/bootparams.go +++ b/cnf-certification-test/platform/bootparams/bootparams.go @@ -20,8 +20,8 @@ import ( "fmt" "strings" - "github.com/sirupsen/logrus" "github.com/test-network-function/cnf-certification-test/internal/clientsholder" + "github.com/test-network-function/cnf-certification-test/internal/log" "github.com/test-network-function/cnf-certification-test/pkg/arrayhelper" "github.com/test-network-function/cnf-certification-test/pkg/loghelper" "github.com/test-network-function/cnf-certification-test/pkg/provider" @@ -53,7 +53,7 @@ func TestBootParamsHelper(env *provider.TestEnvironment, cut *provider.Container claimsLog.AddLogLine("%s KernelCmdLineArg %q does not match MachineConfig value: %q!=%q", cut.NodeName, key, currentVal, mcVal) } else { - logrus.Tracef("%s KernelCmdLineArg==mcVal %q: %q==%q", cut.NodeName, key, currentVal, mcVal) + log.Debug("%s KernelCmdLineArg==mcVal %q: %q==%q", cut.NodeName, key, currentVal, mcVal) } } if grubVal, ok := grubKernelConfigMap[key]; ok { @@ -61,7 +61,7 @@ func TestBootParamsHelper(env *provider.TestEnvironment, cut *provider.Container claimsLog.AddLogLine("%s NodeGrubKernelArgs %q does not match MachineConfig value: %q!=%q", cut.NodeName, key, mcVal, grubVal) } else { - logrus.Tracef("%s NodeGrubKernelArg==mcVal %q: %q==%q", cut.NodeName, key, grubVal, mcVal) + log.Debug("%s NodeGrubKernelArg==mcVal %q: %q==%q", cut.NodeName, key, grubVal, mcVal) } } } diff --git a/cnf-certification-test/platform/cnffsdiff/fsdiff.go b/cnf-certification-test/platform/cnffsdiff/fsdiff.go index 6d5d9e7b8..da02bbb8a 100644 --- a/cnf-certification-test/platform/cnffsdiff/fsdiff.go +++ b/cnf-certification-test/platform/cnffsdiff/fsdiff.go @@ -22,8 +22,8 @@ import ( "fmt" mapset "github.com/deckarep/golang-set/v2" - "github.com/sirupsen/logrus" "github.com/test-network-function/cnf-certification-test/internal/clientsholder" + "github.com/test-network-function/cnf-certification-test/internal/log" "github.com/test-network-function/cnf-certification-test/pkg/testhelper" ) @@ -91,7 +91,7 @@ func intersectTargetFolders(src []string) []string { var dst []string for _, folder := range src { if targetFolders.Contains(folder) { - logrus.Tracef("Container's folder %s is altered.", folder) + log.Debug("Container's folder %s is altered.", folder) dst = append(dst, folder) } } @@ -119,7 +119,7 @@ func (f *FsDiff) RunTest(containerUID string) { defer f.unmountCustomPodman() - logrus.Infof("Running \"podman diff\" for container id %s", containerUID) + log.Info("Running \"podman diff\" for container id %s", containerUID) output, err := f.runPodmanDiff(containerUID) if err != nil { f.Error = err @@ -128,7 +128,7 @@ func (f *FsDiff) RunTest(containerUID string) { } // see if there's a match in the output - logrus.Traceln("Podman diff output is ", output) + log.Debug("Podman diff output is %s", output) diff := fsDiffJSON{} err = json.Unmarshal([]byte(output), &diff) @@ -184,13 +184,13 @@ func (f *FsDiff) unmountDebugPartnerPodmanFolder() error { func (f *FsDiff) installCustomPodman() error { // We need to create the destination folder first. - logrus.Infof("Creating temp folder %s", nodeTmpMountFolder) + log.Info("Creating temp folder %s", nodeTmpMountFolder) if err := f.createNodeFolder(); err != nil { return err } // Mount podman from partner debug pod into /host/tmp/... - logrus.Infof("Mouting %s into %s", partnerPodmanFolder, nodeTmpMountFolder) + log.Info("Mouting %s into %s", partnerPodmanFolder, nodeTmpMountFolder) if mountErr := f.mountDebugPartnerPodmanFolder(); mountErr != nil { // We need to delete the temp folder previously created as mount point. if deleteErr := f.deleteNodeFolder(); deleteErr != nil { @@ -206,7 +206,7 @@ func (f *FsDiff) installCustomPodman() error { func (f *FsDiff) unmountCustomPodman() { // Unmount podman folder from host. - logrus.Infof("Unmounting folder %s", nodeTmpMountFolder) + log.Info("Unmounting folder %s", nodeTmpMountFolder) if err := f.unmountDebugPartnerPodmanFolder(); err != nil { // Here, there's no point on trying to remove the temp folder used as mount point, as // that probably won't work either. @@ -215,7 +215,7 @@ func (f *FsDiff) unmountCustomPodman() { return } - logrus.Infof("Deleting folder %s", nodeTmpMountFolder) + log.Info("Deleting folder %s", nodeTmpMountFolder) if err := f.deleteNodeFolder(); err != nil { f.Error = err f.result = testhelper.ERROR diff --git a/cnf-certification-test/platform/hugepages/hugepages.go b/cnf-certification-test/platform/hugepages/hugepages.go index f8ce23d67..34074bf94 100644 --- a/cnf-certification-test/platform/hugepages/hugepages.go +++ b/cnf-certification-test/platform/hugepages/hugepages.go @@ -8,8 +8,8 @@ import ( "strconv" "strings" - "github.com/sirupsen/logrus" "github.com/test-network-function/cnf-certification-test/internal/clientsholder" + "github.com/test-network-function/cnf-certification-test/internal/log" "github.com/test-network-function/cnf-certification-test/pkg/provider" corev1 "k8s.io/api/core/v1" ) @@ -83,14 +83,14 @@ func NewTester(node *provider.Node, debugPod *corev1.Pod, commander clientsholde context: clientsholder.NewContext(debugPod.Namespace, debugPod.Name, debugPod.Spec.Containers[0].Name), } - logrus.Infof("Getting node %s numa's hugepages values.", node.Data.Name) + log.Info("Getting node %s numa's hugepages values.", node.Data.Name) var err error tester.nodeHugepagesByNuma, err = tester.getNodeNumaHugePages() if err != nil { return nil, fmt.Errorf("unable to get node hugepages, err: %v", err) } - logrus.Info("Parsing machineconfig's kernelArguments and systemd's hugepages units.") + log.Info("Parsing machineconfig's kernelArguments and systemd's hugepages units.") tester.mcSystemdHugepagesByNuma, err = getMcSystemdUnitsHugepagesConfig(&tester.node.Mc) if err != nil { return nil, fmt.Errorf("failed to get MC systemd hugepages config, err: %v", err) @@ -105,12 +105,12 @@ func (tester *Tester) HasMcSystemdHugepagesUnits() bool { func (tester *Tester) Run() error { if tester.HasMcSystemdHugepagesUnits() { - logrus.Info("Comparing MachineConfig Systemd hugepages info against node values.") + log.Info("Comparing MachineConfig Systemd hugepages info against node values.") if pass, err := tester.TestNodeHugepagesWithMcSystemd(); !pass { return fmt.Errorf("failed to compare machineConfig systemd's unit hugepages config with node values, err: %v", err) } } else { - logrus.Info("Comparing MC KernelArguments hugepages info against node values.") + log.Info("Comparing MC KernelArguments hugepages info against node values.") if pass, err := tester.TestNodeHugepagesWithKernelArgs(); !pass { return fmt.Errorf("failed to compare machineConfig KernelArguments with node ones, err: %v", err) } @@ -126,7 +126,7 @@ func (tester *Tester) TestNodeHugepagesWithMcSystemd() (bool, error) { // First, numa index should exist in MC mcCountBySize, numaExistsInMc := tester.mcSystemdHugepagesByNuma[nodeNumaIdx] if !numaExistsInMc { - logrus.Warnf("Numa %d does not exist in machine config. All hugepage count for all sizes must be zero.", nodeNumaIdx) + log.Warn("Numa %d does not exist in machine config. All hugepage count for all sizes must be zero.", nodeNumaIdx) for _, count := range nodeCountBySize { if count != 0 { return false, fmt.Errorf("node's numa %d hugepages config does not exist in node's machineconfig", nodeNumaIdx) @@ -198,7 +198,7 @@ func (tester *Tester) TestNodeHugepagesWithKernelArgs() (bool, error) { } if total == kernelCount { - logrus.Infof("kernelArguments' hugepages count:%d, size:%d match total node ones for that size.", kernelCount, kernelSize) + log.Info("kernelArguments' hugepages count:%d, size:%d match total node ones for that size.", kernelCount, kernelSize) } else { return false, fmt.Errorf("total hugepages of size %d won't match (node count=%d, expected=%d)", kernelSize, total, kernelCount) } @@ -211,7 +211,7 @@ func (tester *Tester) TestNodeHugepagesWithKernelArgs() (bool, error) { func (tester *Tester) getNodeNumaHugePages() (hugepages hugepagesByNuma, err error) { // This command must run inside the node, so we'll need the node's context to run commands inside the debug daemonset pod. stdout, stderr, err := tester.commander.ExecCommandContainer(tester.context, cmd) - logrus.Tracef("getNodeNumaHugePages stdout: %s, stderr: %s", stdout, stderr) + log.Debug("getNodeNumaHugePages stdout: %s, stderr: %s", stdout, stderr) if err != nil { return hugepagesByNuma{}, err } @@ -242,7 +242,7 @@ func (tester *Tester) getNodeNumaHugePages() (hugepages hugepagesByNuma, err err } } - logrus.Infof("Node %s hugepages: %s", tester.node.Data.Name, hugepages) + log.Info("Node %s hugepages: %s", tester.node.Data.Name, hugepages) return hugepages, nil } @@ -257,7 +257,7 @@ func getMcSystemdUnitsHugepagesConfig(mc *provider.MachineConfig) (hugepages hug if !strings.Contains(unit.Name, "hugepages-allocation") { continue } - logrus.Infof("Systemd Unit with hugepages info -> name: %s, contents: %s", unit.Name, unit.Contents) + log.Info("Systemd Unit with hugepages info -> name: %s, contents: %s", unit.Name, unit.Contents) unit.Contents = strings.Trim(unit.Contents, "\"") values := r.FindStringSubmatch(unit.Contents) if len(values) < UnitContentsRegexMatchLen { @@ -276,9 +276,9 @@ func getMcSystemdUnitsHugepagesConfig(mc *provider.MachineConfig) (hugepages hug } if len(hugepages) > 0 { - logrus.Infof("Machineconfig's systemd.units hugepages: %v", hugepages) + log.Info("Machineconfig's systemd.units hugepages: %v", hugepages) } else { - logrus.Infof("No hugepages found in machineconfig system.units") + log.Info("No hugepages found in machineconfig system.units") } return hugepages, nil @@ -290,7 +290,7 @@ func logMcKernelArgumentsHugepages(hugepagesPerSize map[int]int, defhugepagesz i for size, count := range hugepagesPerSize { sb.WriteString(fmt.Sprintf(", size=%dkB - count=%d", size, count)) } - logrus.Info(sb.String()) + log.Info(sb.String()) } // getMcHugepagesFromMcKernelArguments gets the hugepages params from machineconfig's kernelArguments @@ -335,7 +335,7 @@ func getMcHugepagesFromMcKernelArguments(mc *provider.MachineConfig) (hugepagesP if len(hugepagesPerSize) == 0 { hugepagesPerSize[RhelDefaultHugepagesz] = RhelDefaultHugepages - logrus.Warnf("No hugepages size found in node's machineconfig. Defaulting to size=%dkB (count=%d)", RhelDefaultHugepagesz, RhelDefaultHugepages) + log.Warn("No hugepages size found in node's machineconfig. Defaulting to size=%dkB (count=%d)", RhelDefaultHugepagesz, RhelDefaultHugepages) } logMcKernelArgumentsHugepages(hugepagesPerSize, defhugepagesz) diff --git a/cnf-certification-test/platform/isredhat/isredhat.go b/cnf-certification-test/platform/isredhat/isredhat.go index 78d4d87ff..b88e79149 100644 --- a/cnf-certification-test/platform/isredhat/isredhat.go +++ b/cnf-certification-test/platform/isredhat/isredhat.go @@ -20,8 +20,8 @@ import ( "errors" "regexp" - "github.com/sirupsen/logrus" "github.com/test-network-function/cnf-certification-test/internal/clientsholder" + "github.com/test-network-function/cnf-certification-test/internal/log" ) const ( @@ -60,7 +60,7 @@ func IsRHEL(output string) bool { } // /etc/redhat-release exists. check if it matches the regex for an official build. - logrus.Infof("redhat-release was found to be: %s", output) + log.Info("redhat-release was found to be: %s", output) redHatVersionRegex := regexp.MustCompile(VersionRegex) matchVersion := redHatVersionRegex.FindAllString(output, -1) return len(matchVersion) > 0 @@ -69,11 +69,11 @@ func IsRHEL(output string) bool { func (b *BaseImageInfo) runCommand(cmd string) (string, error) { output, outerr, err := b.ClientHolder.ExecCommandContainer(b.OCPContext, cmd) if err != nil { - logrus.Errorln("can not execute command on container ", err) + log.Error("can not execute command on container, err: %v", err) return "", err } if outerr != "" { - logrus.Errorln("error when running baseimage command ", outerr) + log.Error("error when running baseimage command, err: %v", outerr) return "", errors.New(outerr) } return output, nil diff --git a/cnf-certification-test/platform/nodetainted/nodetainted.go b/cnf-certification-test/platform/nodetainted/nodetainted.go index 128657c57..d03dcf03c 100644 --- a/cnf-certification-test/platform/nodetainted/nodetainted.go +++ b/cnf-certification-test/platform/nodetainted/nodetainted.go @@ -23,9 +23,8 @@ import ( "strconv" "strings" - "github.com/sirupsen/logrus" "github.com/test-network-function/cnf-certification-test/internal/clientsholder" - "github.com/test-network-function/cnf-certification-test/pkg/tnf" + "github.com/test-network-function/cnf-certification-test/internal/log" ) // NodeTainted holds information about tainted nodes. @@ -38,11 +37,11 @@ var runCommand = func(ctx *clientsholder.Context, cmd string) (string, error) { ch := clientsholder.GetClientsHolder() output, outerr, err := ch.ExecCommandContainer(*ctx, cmd) if err != nil { - logrus.Errorln("can not execute command on container ", err) + log.Error("can not execute command on container, err=%v", err) return "", err } if outerr != "" { - logrus.Errorln("error when running nodetainted command ", outerr) + log.Error("error when running nodetainted command err=%v", outerr) return "", errors.New(outerr) } return output, nil @@ -279,11 +278,11 @@ func (nt *NodeTainted) GetTainterModules(allowList map[string]bool) (tainters ma filteredTainters := map[string]string{} for moduleName, moduleTaintsLetters := range allTainters { moduleTaints := DecodeKernelTaintsFromLetters(moduleTaintsLetters) - logrus.Debugf("%s: Module %s has taints (%s): %s", nt.node, moduleName, moduleTaintsLetters, moduleTaints) + log.Debug("%s: Module %s has taints (%s): %s", nt.node, moduleName, moduleTaintsLetters, moduleTaints) // Apply allowlist. if allowList[moduleName] { - tnf.ClaimFilePrintf("%s module %s is tainting the kernel but it has been allowlisted (taints: %v)", + log.Debug("%s module %s is tainting the kernel but it has been allowlisted (taints: %v)", nt.node, moduleName, moduleTaints) } else { filteredTainters[moduleName] = moduleTaintsLetters diff --git a/cnf-certification-test/platform/suite.go b/cnf-certification-test/platform/suite.go index 5509cac81..a1d04bc1c 100644 --- a/cnf-certification-test/platform/suite.go +++ b/cnf-certification-test/platform/suite.go @@ -21,20 +21,18 @@ import ( "strconv" "strings" - "github.com/sirupsen/logrus" - clientsholder "github.com/test-network-function/cnf-certification-test/internal/clientsholder" - "github.com/test-network-function/cnf-certification-test/pkg/checksdb" - "github.com/test-network-function/cnf-certification-test/pkg/compatibility" - "github.com/test-network-function/cnf-certification-test/pkg/provider" - "github.com/test-network-function/cnf-certification-test/pkg/testhelper" - "github.com/test-network-function/cnf-certification-test/pkg/tnf" - "github.com/test-network-function/cnf-certification-test/cnf-certification-test/common" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/identifiers" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/platform/bootparams" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/platform/cnffsdiff" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/platform/hugepages" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/platform/isredhat" + clientsholder "github.com/test-network-function/cnf-certification-test/internal/clientsholder" + "github.com/test-network-function/cnf-certification-test/internal/log" + "github.com/test-network-function/cnf-certification-test/pkg/checksdb" + "github.com/test-network-function/cnf-certification-test/pkg/compatibility" + "github.com/test-network-function/cnf-certification-test/pkg/provider" + "github.com/test-network-function/cnf-certification-test/pkg/testhelper" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/platform/operatingsystem" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/platform/sysctlconfig" @@ -46,7 +44,7 @@ var ( env provider.TestEnvironment beforeEachFn = func(check *checksdb.Check) error { - logrus.Infof("Check %s: getting test environment.", check.ID) + check.LogInfo("Check %s: getting test environment.", check.ID) env = provider.GetTestEnvironment() return nil } @@ -54,7 +52,7 @@ var ( //nolint:funlen func LoadChecks() { - logrus.Debugf("Entering %s suite", common.PlatformAlterationTestKey) + log.Debug("Entering %s suite", common.PlatformAlterationTestKey) checksGroup := checksdb.NewChecksGroup(common.PlatformAlterationTestKey). WithBeforeEachFn(beforeEachFn) @@ -233,13 +231,13 @@ func testServiceMesh(check *checksdb.Check, env *provider.TestEnvironment) { istioProxyFound := false for _, cut := range put.Containers { if cut.IsIstioProxy() { - tnf.ClaimFilePrintf("Istio proxy container found on %s", put) + check.LogDebug("Istio proxy container found on %s", put) istioProxyFound = true break } } if !istioProxyFound { - tnf.ClaimFilePrintf("Pod found without service mesh: %s", put.String()) + check.LogDebug("Pod found without service mesh: %s", put.String()) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "Pod found without service mesh container", false)) } else { compliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "Pod found with service mesh container", true)) @@ -254,7 +252,7 @@ func testContainersFsDiff(check *checksdb.Check, env *provider.TestEnvironment) var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject for _, cut := range env.Containers { - logrus.Debug(fmt.Sprintf("%s should not install new packages after starting", cut.String())) + check.LogDebug(fmt.Sprintf("%s should not install new packages after starting", cut.String())) debugPod := env.DebugPods[cut.NodeName] ctxt := clientsholder.NewContext(debugPod.Namespace, debugPod.Name, debugPod.Spec.Containers[0].Name) @@ -265,13 +263,13 @@ func testContainersFsDiff(check *checksdb.Check, env *provider.TestEnvironment) compliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, "Container is not modified", true)) continue case testhelper.FAILURE: - tnf.ClaimFilePrintf("%s - changed folders: %v, deleted folders: %v", cut, fsDiffTester.ChangedFolders, fsDiffTester.DeletedFolders) + check.LogDebug("%s - changed folders: %v, deleted folders: %v", cut, fsDiffTester.ChangedFolders, fsDiffTester.DeletedFolders) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, "Container is modified", false). AddField("ChangedFolders", strings.Join(fsDiffTester.ChangedFolders, ",")). AddField("DeletedFolders", strings.Join(fsDiffTester.DeletedFolders, ","))) case testhelper.ERROR: - tnf.ClaimFilePrintf("%s - error while running fs-diff: %v", cut, fsDiffTester.Error) + check.LogDebug("%s - error while running fs-diff: %v", cut, fsDiffTester.Error) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, "Error while running fs-diff", false).AddField(testhelper.Error, fsDiffTester.Error.Error())) } } @@ -297,7 +295,7 @@ func testTainted(check *checksdb.Check, env *provider.TestEnvironment) { // otherTaints maps a node to a list of taint bits that haven't been set by any module. otherTaints := map[string][]int{} - logrus.Infof("Modules allowlist: %+v", env.Config.AcceptedKernelTaints) + check.LogInfo("Modules allowlist: %+v", env.Config.AcceptedKernelTaints) // helper map to make the checks easier. allowListedModules := map[string]bool{} for _, module := range env.Config.AcceptedKernelTaints { @@ -308,7 +306,7 @@ func testTainted(check *checksdb.Check, env *provider.TestEnvironment) { for _, dp := range env.DebugPods { nodeName := dp.Spec.NodeName - logrus.Infof("Checking kernel taints of node %s", nodeName) + check.LogInfo("Checking kernel taints of node %s", nodeName) ocpContext := clientsholder.NewContext(dp.Namespace, dp.Name, dp.Spec.Containers[0].Name) tf := nodetainted.NewNodeTaintedTester(&ocpContext, nodeName) @@ -316,19 +314,19 @@ func testTainted(check *checksdb.Check, env *provider.TestEnvironment) { // Get the taints mask from the node kernel taintsMask, err := tf.GetKernelTaintsMask() if err != nil { - tnf.ClaimFilePrintf("Failed to retrieve kernel taint information from node %s: %v", nodeName, err) + check.LogDebug("Failed to retrieve kernel taint information from node %s: %v", nodeName, err) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, "Failed to retrieve kernel taint information from node", false). AddField(testhelper.Error, err.Error())) continue } if taintsMask == 0 { - tnf.ClaimFilePrintf("Node %s has no non-approved kernel taints.", nodeName) + check.LogDebug("Node %s has no non-approved kernel taints.", nodeName) compliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(nodeName, "Node has no non-approved kernel taints", true)) continue } - tnf.ClaimFilePrintf("Node %s kernel is tainted. Taints mask=%d - Decoded taints: %v", + check.LogDebug("Node %s kernel is tainted. Taints mask=%d - Decoded taints: %v", nodeName, taintsMask, nodetainted.DecodeKernelTaintsFromBitMask(taintsMask)) // Check the allow list. If empty, mark this node as failed. @@ -347,7 +345,7 @@ func testTainted(check *checksdb.Check, env *provider.TestEnvironment) { // one tainter module. tainters, taintBitsByAllModules, err := tf.GetTainterModules(allowListedModules) if err != nil { - tnf.ClaimFilePrintf("failed to get tainter modules from node %s: %v", nodeName, err) + check.LogDebug("failed to get tainter modules from node %s: %v", nodeName, err) errNodes = append(errNodes, nodeName) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, "Failed to get tainter modules", false). AddField(testhelper.Error, err.Error())) @@ -364,7 +362,7 @@ func testTainted(check *checksdb.Check, env *provider.TestEnvironment) { // Create non-compliant taint objects for each of the taints for _, taint := range moduleTaints { - tnf.ClaimFilePrintf("Node %s - module %s taints kernel: %s", nodeName, moduleName, taint) + check.LogDebug("Node %s - module %s taints kernel: %s", nodeName, moduleName, taint) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewTaintReportObject(nodetainted.RemoveAllExceptNumbers(taint), nodeName, taint, false).AddField(testhelper.ModuleName, moduleName)) // Set the node as non-compliant for future reporting @@ -375,7 +373,7 @@ func testTainted(check *checksdb.Check, env *provider.TestEnvironment) { // Lastly, check that all kernel taint bits come from modules. otherKernelTaints := nodetainted.GetOtherTaintedBits(taintsMask, taintBitsByAllModules) for _, taintedBit := range otherKernelTaints { - tnf.ClaimFilePrintf("Node %s - taint bit %d is set but it is not caused by any module.", nodeName, taintedBit) + check.LogDebug("Node %s - taint bit %d is set but it is not caused by any module.", nodeName, taintedBit) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewTaintReportObject(strconv.Itoa(taintedBit), nodeName, nodetainted.GetTaintMsg(taintedBit), false). AddField(testhelper.ModuleName, "N/A")) otherTaints[nodeName] = append(otherTaints[nodeName], taintedBit) @@ -389,35 +387,35 @@ func testTainted(check *checksdb.Check, env *provider.TestEnvironment) { } } - logrus.Infof("Nodes with errors: %+v", errNodes) - logrus.Infof("Bad Modules: %+v", badModules) - logrus.Infof("Taints not related to any module: %+v", otherTaints) + check.LogInfo("Nodes with errors: %+v", errNodes) + check.LogInfo("Bad Modules: %+v", badModules) + check.LogInfo("Taints not related to any module: %+v", otherTaints) if len(errNodes) > 0 { - logrus.Infof("Failed to get kernel taints from some nodes: %+v", errNodes) + check.LogInfo("Failed to get kernel taints from some nodes: %+v", errNodes) } if len(badModules) > 0 || len(otherTaints) > 0 { - logrus.Info("Nodes have been found to be tainted. Check claim log for more details.") + check.LogInfo("Nodes have been found to be tainted. Check claim log for more details.") } check.SetResult(compliantObjects, nonCompliantObjects) } func testIsRedHatRelease(check *checksdb.Check, env *provider.TestEnvironment) { - logrus.Info("should report a proper Red Hat version") + check.LogInfo("should report a proper Red Hat version") var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject for _, cut := range env.Containers { - logrus.Infof("%s is checked for Red Hat version", cut) + check.LogInfo("%s is checked for Red Hat version", cut) baseImageTester := isredhat.NewBaseImageTester(clientsholder.GetClientsHolder(), clientsholder.NewContext(cut.Namespace, cut.Podname, cut.Name)) result, err := baseImageTester.TestContainerIsRedHatRelease() if err != nil { - logrus.Error("failed to collect release information from container: ", err) + check.LogError("failed to collect release information from container, err=%v", err) } if !result { - tnf.ClaimFilePrintf("%s has failed the RHEL release check", cut) + check.LogDebug("%s has failed the RHEL release check", cut) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, "Failed the RHEL release check", false)) } else { compliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, "Passed the RHEL release check", true)) @@ -441,13 +439,13 @@ func testIsSELinuxEnforcing(check *checksdb.Check, env *provider.TestEnvironment ctx := clientsholder.NewContext(debugPod.Namespace, debugPod.Name, debugPod.Spec.Containers[0].Name) outStr, errStr, err := o.ExecCommandContainer(ctx, getenforceCommand) if err != nil || errStr != "" { - logrus.Errorf("Failed to execute command %s in debug %s, errStr: %s, err: %v", getenforceCommand, debugPod.String(), errStr, err) + check.LogError("Failed to execute command %s in debug %s, errStr: %s, err: %v", getenforceCommand, debugPod.String(), errStr, err) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(debugPod.Namespace, debugPod.Name, "Failed to execute command", false)) nodesError++ continue } if outStr != enforcingString { - tnf.ClaimFilePrintf(fmt.Sprintf("Node %s is not running selinux, %s command returned: %s", debugPod.Spec.NodeName, getenforceCommand, outStr)) + check.LogDebug(fmt.Sprintf("Node %s is not running selinux, %s command returned: %s", debugPod.Spec.NodeName, getenforceCommand, outStr)) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(debugPod.Spec.NodeName, "SELinux is not enforced", false)) nodesFailed++ } else { @@ -455,10 +453,10 @@ func testIsSELinuxEnforcing(check *checksdb.Check, env *provider.TestEnvironment } } if nodesError > 0 { - logrus.Infof("Failed because could not run %s command on %d nodes", getenforceCommand, nodesError) + check.LogInfo("Failed because could not run %s command on %d nodes", getenforceCommand, nodesError) } if nodesFailed > 0 { - logrus.Infof(fmt.Sprintf("Failed because %d nodes are not running selinux", nodesFailed)) + check.LogInfo(fmt.Sprintf("Failed because %d nodes are not running selinux", nodesFailed)) } check.SetResult(compliantObjects, nonCompliantObjects) @@ -476,19 +474,19 @@ func testHugepages(check *checksdb.Check, env *provider.TestEnvironment) { debugPod, exist := env.DebugPods[node.Data.Name] if !exist { - tnf.ClaimFilePrintf("Node %s: tnf debug pod not found.", node.Data.Name) + check.LogDebug("Node %s: tnf debug pod not found.", node.Data.Name) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(node.Data.Name, "tnf debug pod not found", false)) continue } hpTester, err := hugepages.NewTester(&node, debugPod, clientsholder.GetClientsHolder()) if err != nil { - tnf.ClaimFilePrintf("Unable to get node hugepages tester for node %s, err: %v", node.Data.Name, err) + check.LogDebug("Unable to get node hugepages tester for node %s, err: %v", node.Data.Name, err) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(node.Data.Name, "Unable to get node hugepages tester", false)) } if err := hpTester.Run(); err != nil { - tnf.ClaimFilePrintf("Node %s: %v", node.Data.Name, err) + check.LogDebug("Node %s: %v", node.Data.Name, err) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(node.Data.Name, err.Error(), false)) } else { compliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(node.Data.Name, "Passed the hugepages check", true)) @@ -504,7 +502,7 @@ func testUnalteredBootParams(check *checksdb.Check, env *provider.TestEnvironmen alreadyCheckedNodes := map[string]bool{} for _, cut := range env.Containers { if alreadyCheckedNodes[cut.NodeName] { - logrus.Debugf("Skipping node %s: already checked.", cut.NodeName) + check.LogDebug("Skipping node %s: already checked.", cut.NodeName) continue } alreadyCheckedNodes[cut.NodeName] = true @@ -514,7 +512,7 @@ func testUnalteredBootParams(check *checksdb.Check, env *provider.TestEnvironmen if err != nil || len(claimsLog.GetLogLines()) != 0 { nonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(cut.NodeName, "Failed the boot params check", false). AddField(testhelper.DebugPodName, env.DebugPods[cut.NodeName].Name)) - tnf.ClaimFilePrintf("%s", claimsLog.GetLogLines()) + check.LogDebug("%s", claimsLog.GetLogLines()) } else { compliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(cut.NodeName, "Passed the boot params check", true). AddField(testhelper.DebugPodName, env.DebugPods[cut.NodeName].Name)) @@ -542,7 +540,7 @@ func testSysctlConfigs(check *checksdb.Check, env *provider.TestEnvironment) { sysctlSettings, err := sysctlconfig.GetSysctlSettings(env, cut.NodeName) if err != nil { - tnf.ClaimFilePrintf("Could not get sysctl settings for node %s, error: %v", cut.NodeName, err) + check.LogDebug("Could not get sysctl settings for node %s, error: %v", cut.NodeName, err) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(cut.NodeName, "Could not get sysctl settings", false)) continue } @@ -552,7 +550,7 @@ func testSysctlConfigs(check *checksdb.Check, env *provider.TestEnvironment) { for key, sysctlConfigVal := range sysctlSettings { if mcVal, ok := mcKernelArgumentsMap[key]; ok { if mcVal != sysctlConfigVal { - tnf.ClaimFilePrintf(fmt.Sprintf("Kernel config mismatch in node %s for %s (sysctl value: %s, machine config value: %s)", + check.LogDebug(fmt.Sprintf("Kernel config mismatch in node %s for %s (sysctl value: %s, machine config value: %s)", cut.NodeName, key, sysctlConfigVal, mcVal)) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(cut.NodeName, fmt.Sprintf("Kernel config mismatch for %s", key), false)) validSettings = false @@ -568,26 +566,26 @@ func testSysctlConfigs(check *checksdb.Check, env *provider.TestEnvironment) { } func testOCPStatus(check *checksdb.Check, env *provider.TestEnvironment) { - logrus.Infof("Testing the OCP Version for lifecycle status") + check.LogInfo("Testing the OCP Version for lifecycle status") clusterIsInEOL := false switch env.OCPStatus { case compatibility.OCPStatusEOL: msg := fmt.Sprintf("OCP Version %s has been found to be in end of life", env.OpenshiftVersion) - tnf.ClaimFilePrintf(msg) + check.LogDebug(msg) clusterIsInEOL = true case compatibility.OCPStatusMS: msg := fmt.Sprintf("OCP Version %s has been found to be in maintenance support", env.OpenshiftVersion) - tnf.ClaimFilePrintf(msg) + check.LogDebug(msg) case compatibility.OCPStatusGA: msg := fmt.Sprintf("OCP Version %s has been found to be in general availability", env.OpenshiftVersion) - tnf.ClaimFilePrintf(msg) + check.LogDebug(msg) case compatibility.OCPStatusPreGA: msg := fmt.Sprintf("OCP Version %s has been found to be in pre-general availability", env.OpenshiftVersion) - tnf.ClaimFilePrintf(msg) + check.LogDebug(msg) default: msg := fmt.Sprintf("OCP Version %s was unable to be found in the lifecycle compatibility matrix", env.OpenshiftVersion) - tnf.ClaimFilePrintf(msg) + check.LogDebug(msg) } var compliantObjects []*testhelper.ReportObject @@ -604,9 +602,9 @@ func testOCPStatus(check *checksdb.Check, env *provider.TestEnvironment) { //nolint:funlen func testNodeOperatingSystemStatus(check *checksdb.Check, env *provider.TestEnvironment) { - logrus.Info("Testing the control-plane and workers in the cluster for Operating System compatibility") + check.LogInfo("Testing the control-plane and workers in the cluster for Operating System compatibility") - logrus.Debug(fmt.Sprintf("There are %d nodes to process for Operating System compatibility.", len(env.Nodes))) + check.LogDebug(fmt.Sprintf("There are %d nodes to process for Operating System compatibility.", len(env.Nodes))) failedControlPlaneNodes := []string{} failedWorkerNodes := []string{} @@ -614,13 +612,13 @@ func testNodeOperatingSystemStatus(check *checksdb.Check, env *provider.TestEnvi var nonCompliantObjects []*testhelper.ReportObject for _, node := range env.Nodes { // Get the OSImage which should tell us what version of operating system the node is running. - logrus.Debug(fmt.Sprintf("Node %s is running operating system: %s", node.Data.Name, node.Data.Status.NodeInfo.OSImage)) + check.LogDebug(fmt.Sprintf("Node %s is running operating system: %s", node.Data.Name, node.Data.Status.NodeInfo.OSImage)) // Control plane nodes must be RHCOS (also CentOS Stream starting in OCP 4.13) // Per the release notes from OCP documentation: // "You must use RHCOS machines for the control plane, and you can use either RHCOS or RHEL for compute machines." if node.IsMasterNode() && !node.IsRHCOS() && !node.IsCSCOS() { - tnf.ClaimFilePrintf("Master node %s has been found to be running an incompatible operating system: %s", node.Data.Name, node.Data.Status.NodeInfo.OSImage) + check.LogDebug("Master node %s has been found to be running an incompatible operating system: %s", node.Data.Name, node.Data.Status.NodeInfo.OSImage) failedControlPlaneNodes = append(failedControlPlaneNodes, node.Data.Name) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(node.Data.Name, "Master node has been found to be running an incompatible OS", false).AddField(testhelper.OSImage, node.Data.Status.NodeInfo.OSImage)) continue @@ -633,21 +631,21 @@ func testNodeOperatingSystemStatus(check *checksdb.Check, env *provider.TestEnvi // Get the short version from the node shortVersion, err := node.GetRHCOSVersion() if err != nil { - tnf.ClaimFilePrintf("Node %s failed to gather RHCOS version. Error: %v", node.Data.Name, err) + check.LogDebug("Node %s failed to gather RHCOS version. Error: %v", node.Data.Name, err) failedWorkerNodes = append(failedWorkerNodes, node.Data.Name) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(node.Data.Name, "Failed to gather RHCOS version", false)) continue } if shortVersion == operatingsystem.NotFoundStr { - tnf.ClaimFilePrintf("Node %s has an RHCOS operating system that is not found in our internal database. Skipping as to not cause failures due to database mismatch.", node.Data.Name) + check.LogDebug("Node %s has an RHCOS operating system that is not found in our internal database. Skipping as to not cause failures due to database mismatch.", node.Data.Name) continue } // If the node's RHCOS version and the OpenShift version are not compatible, the node fails. - logrus.Debugf("Comparing RHCOS shortVersion: %s to openshiftVersion: %s", shortVersion, env.OpenshiftVersion) + check.LogDebug("Comparing RHCOS shortVersion: %s to openshiftVersion: %s", shortVersion, env.OpenshiftVersion) if !compatibility.IsRHCOSCompatible(shortVersion, env.OpenshiftVersion) { - tnf.ClaimFilePrintf("Node %s has been found to be running an incompatible version of RHCOS: %s", node.Data.Name, shortVersion) + check.LogDebug("Node %s has been found to be running an incompatible version of RHCOS: %s", node.Data.Name, shortVersion) failedWorkerNodes = append(failedWorkerNodes, node.Data.Name) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(node.Data.Name, "Worker node has been found to be running an incompatible OS", false). AddField(testhelper.OSImage, node.Data.Status.NodeInfo.OSImage)) @@ -659,7 +657,7 @@ func testNodeOperatingSystemStatus(check *checksdb.Check, env *provider.TestEnvi // Get the short version from the node shortVersion, err := node.GetCSCOSVersion() if err != nil { - tnf.ClaimFilePrintf("Node %s failed to gather CentOS Stream CoreOS version. Error: %v", node.Data.Name, err) + check.LogDebug("Node %s failed to gather CentOS Stream CoreOS version. Error: %v", node.Data.Name, err) failedWorkerNodes = append(failedWorkerNodes, node.Data.Name) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(node.Data.Name, "Failed to gather CentOS Stream CoreOS version", false)) continue @@ -672,28 +670,28 @@ func testNodeOperatingSystemStatus(check *checksdb.Check, env *provider.TestEnvi Node %s is using CentOS Stream CoreOS %s, which is not being used yet in any OCP RC/GA version. Relaxing the conditions to check the OS as a result. ` - tnf.ClaimFilePrintf(msg, node.Data.Name, shortVersion) + check.LogDebug(msg, node.Data.Name, shortVersion) } else if node.IsRHEL() { // Get the short version from the node shortVersion, err := node.GetRHELVersion() if err != nil { - tnf.ClaimFilePrintf("Node %s failed to gather RHEL version. Error: %v", node.Data.Name, err) + check.LogDebug("Node %s failed to gather RHEL version. Error: %v", node.Data.Name, err) failedWorkerNodes = append(failedWorkerNodes, node.Data.Name) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(node.Data.Name, "Failed to gather RHEL version", false)) continue } // If the node's RHEL version and the OpenShift version are not compatible, the node fails. - logrus.Debugf("Comparing RHEL shortVersion: %s to openshiftVersion: %s", shortVersion, env.OpenshiftVersion) + check.LogDebug("Comparing RHEL shortVersion: %s to openshiftVersion: %s", shortVersion, env.OpenshiftVersion) if !compatibility.IsRHELCompatible(shortVersion, env.OpenshiftVersion) { - tnf.ClaimFilePrintf("Node %s has been found to be running an incompatible version of RHEL: %s", node.Data.Name, shortVersion) + check.LogDebug("Node %s has been found to be running an incompatible version of RHEL: %s", node.Data.Name, shortVersion) failedWorkerNodes = append(failedWorkerNodes, node.Data.Name) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(node.Data.Name, "Worker node has been found to be running an incompatible OS", false).AddField(testhelper.OSImage, node.Data.Status.NodeInfo.OSImage)) } else { compliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(node.Data.Name, "Worker node has been found to be running a compatible OS", true).AddField(testhelper.OSImage, node.Data.Status.NodeInfo.OSImage)) } } else { - tnf.ClaimFilePrintf("Node %s has been found to be running an incompatible operating system", node.Data.Name) + check.LogDebug("Node %s has been found to be running an incompatible operating system", node.Data.Name) failedWorkerNodes = append(failedWorkerNodes, node.Data.Name) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(node.Data.Name, "Worker node has been found to be running an incompatible OS", false).AddField(testhelper.OSImage, node.Data.Status.NodeInfo.OSImage)) } @@ -704,13 +702,13 @@ func testNodeOperatingSystemStatus(check *checksdb.Check, env *provider.TestEnvi if n := len(failedControlPlaneNodes); n > 0 { errMsg := fmt.Sprintf("Number of control plane nodes running non-RHCOS based operating systems: %d", n) b.WriteString(errMsg) - tnf.ClaimFilePrintf(errMsg) + check.LogDebug(errMsg) } if n := len(failedWorkerNodes); n > 0 { errMsg := fmt.Sprintf("Number of worker nodes running non-RHCOS or non-RHEL based operating systems: %d", n) b.WriteString(errMsg) - tnf.ClaimFilePrintf(errMsg) + check.LogDebug(errMsg) } check.SetResult(compliantObjects, nonCompliantObjects) diff --git a/cnf-certification-test/preflight/suite.go b/cnf-certification-test/preflight/suite.go index 19f33524d..ba6d6e641 100644 --- a/cnf-certification-test/preflight/suite.go +++ b/cnf-certification-test/preflight/suite.go @@ -17,24 +17,24 @@ package preflight import ( + "os" "strings" plibRuntime "github.com/redhat-openshift-ecosystem/openshift-preflight/certification" - "github.com/sirupsen/logrus" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/common" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/identifiers" + "github.com/test-network-function/cnf-certification-test/internal/log" "github.com/test-network-function/cnf-certification-test/pkg/checksdb" "github.com/test-network-function/cnf-certification-test/pkg/configuration" "github.com/test-network-function/cnf-certification-test/pkg/provider" "github.com/test-network-function/cnf-certification-test/pkg/testhelper" - "github.com/test-network-function/cnf-certification-test/pkg/tnf" ) var ( env provider.TestEnvironment beforeEachFn = func(check *checksdb.Check) error { - logrus.Infof("Check %s: getting test environment.", check.ID) + log.Info("Check %s: getting test environment.", check.ID) env = provider.GetTestEnvironment() return nil } @@ -66,7 +66,7 @@ func ShouldRun(labelsExpr string) bool { // Add safeguard against running the preflight tests if the docker config does not exist. preflightDockerConfigFile := configuration.GetTestParameters().PfltDockerconfig if preflightDockerConfigFile == "" || preflightDockerConfigFile == "NA" { - logrus.Warn("Skipping the preflight suite because the Docker Config file is not provided.") + log.Warn("Skipping the preflight suite because the Docker Config file is not provided.") return false } @@ -74,7 +74,7 @@ func ShouldRun(labelsExpr string) bool { } func LoadChecks() { - logrus.Debugf("Entering %s suite", common.PreflightTestKey) + log.Debug("Entering %s suite", common.PreflightTestKey) // As the preflight lib's checks need to run here, we need to get the test environment now. env = provider.GetTestEnvironment() @@ -84,10 +84,10 @@ func LoadChecks() { testPreflightContainers(checksGroup, &env) if provider.IsOCPCluster() { - logrus.Debugf("OCP cluster detected, allowing operator tests to run") + log.Debug("OCP cluster detected, allowing operator tests to run") testPreflightOperators(checksGroup, &env) } else { - logrus.Debugf("Skipping the preflight operators test because it requires an OCP cluster to run against") + log.Debug("Skipping the preflight operators test because it requires an OCP cluster to run against") } } @@ -98,16 +98,17 @@ func testPreflightOperators(checksGroup *checksdb.ChecksGroup, env *provider.Tes // in-general you are only going to have an operator installed once in a cluster. err := op.SetPreflightResults(env) if err != nil { - logrus.Fatalf("failed running preflight on operator: %s error: %v", op.Name, err) + log.Error("failed running preflight on operator: %s error: %v", op.Name, err) + os.Exit(1) } } - logrus.Infof("Completed running preflight operator tests for %d operators", len(env.Operators)) + log.Info("Completed running preflight operator tests for %d operators", len(env.Operators)) // Handle Operator-based preflight tests // Note: We only care about the `testEntry` variable below because we need its 'Description' and 'Suggestion' variables. for testName, testEntry := range getUniqueTestEntriesFromOperatorResults(env.Operators) { - logrus.Infof("Testing operator ginkgo test: %s", testName) + log.Info("Testing operator ginkgo test: %s", testName) generatePreflightOperatorGinkgoTest(checksGroup, testName, testEntry.Metadata().Description, testEntry.Help().Suggestion, env.Operators) } } @@ -118,19 +119,20 @@ func testPreflightContainers(checksGroup *checksdb.ChecksGroup, env *provider.Te // Loop through all of the containers, run preflight, and set their results into their respective objects for _, cut := range env.Containers { - logrus.Debugf("Running preflight container tests for: %s", cut.Name) + log.Debug("Running preflight container tests for: %s", cut.Name) err := cut.SetPreflightResults(preflightImageCache, env) if err != nil { - logrus.Fatalf("failed running preflight on image: %s error: %v", cut.Image, err) + log.Error("failed running preflight on image: %s error: %v", cut.Image, err) + os.Exit(1) } } - logrus.Infof("Completed running preflight container tests for %d containers", len(env.Containers)) + log.Info("Completed running preflight container tests for %d containers", len(env.Containers)) // Handle Container-based preflight tests // Note: We only care about the `testEntry` variable below because we need its 'Description' and 'Suggestion' variables. for testName, testEntry := range getUniqueTestEntriesFromContainerResults(env.Containers) { - logrus.Infof("Testing container ginkgo test: %s", testName) + log.Info("Testing container ginkgo test: %s", testName) generatePreflightContainerGinkgoTest(checksGroup, testName, testEntry.Metadata().Description, testEntry.Help().Suggestion, env.Containers) } } @@ -158,19 +160,19 @@ func generatePreflightContainerGinkgoTest(checksGroup *checksdb.ChecksGroup, tes for _, r := range cut.PreflightResults.Passed { if r.Name() == testName { compliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, "Container has passed preflight test "+testName, true)) - logrus.Infof("%s has passed preflight test: %s", cut.String(), testName) + log.Info("%s has passed preflight test: %s", cut.String(), testName) } } for _, r := range cut.PreflightResults.Failed { if r.Name() == testName { nonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, "Container has failed preflight test "+testName, false)) - tnf.Logf(logrus.WarnLevel, "%s has failed preflight test: %s", cut, testName) + log.Warn("%s has failed preflight test: %s", cut, testName) } } for _, r := range cut.PreflightResults.Errors { if r.Name() == testName { nonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, "Container has errored preflight test "+testName, false)) - tnf.Logf(logrus.ErrorLevel, "%s has errored preflight test: %s", cut, testName) + log.Error("%s has errored preflight test: %s", cut, testName) } } } @@ -204,19 +206,19 @@ func generatePreflightOperatorGinkgoTest(checksGroup *checksdb.ChecksGroup, test for _, op := range operators { for _, r := range op.PreflightResults.Passed { if r.Name() == testName { - logrus.Infof("%s has passed preflight test: %s", op.String(), testName) + log.Info("%s has passed preflight test: %s", op.String(), testName) compliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(op.Namespace, op.Name, "Operator passed preflight test "+testName, true)) } } for _, r := range op.PreflightResults.Failed { if r.Name() == testName { - tnf.Logf(logrus.WarnLevel, "%s has failed preflight test: %s", op, testName) + log.Warn("%s has failed preflight test: %s", op, testName) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(op.Namespace, op.Name, "Operator failed preflight test "+testName, false)) } } for _, r := range op.PreflightResults.Errors { if r.Name() == testName { - tnf.Logf(logrus.ErrorLevel, "%s has errored preflight test: %s", op, testName) + log.Error("%s has errored preflight test: %s", op, testName) nonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(op.Namespace, op.Name, "Operator has errored preflight test "+testName, false)) } } diff --git a/cnf-certification-test/results/archiver.go b/cnf-certification-test/results/archiver.go index e35ae18f5..d2fe15056 100644 --- a/cnf-certification-test/results/archiver.go +++ b/cnf-certification-test/results/archiver.go @@ -9,7 +9,7 @@ import ( "path/filepath" "time" - "github.com/sirupsen/logrus" + "github.com/test-network-function/cnf-certification-test/internal/log" ) const ( @@ -42,7 +42,7 @@ func CompressResultsArtifacts(outputDir string, filePaths []string) error { zipFileName := generateZipFileName() zipFilePath := filepath.Join(outputDir, zipFileName) - logrus.Infof("Compressing results artifacts into %s", zipFilePath) + log.Info("Compressing results artifacts into %s", zipFilePath) zipFile, err := os.Create(zipFilePath) if err != nil { return fmt.Errorf("failed creating tar.gz file %s in dir %s (filepath=%s): %v", @@ -56,7 +56,7 @@ func CompressResultsArtifacts(outputDir string, filePaths []string) error { defer tarWriter.Close() for _, file := range filePaths { - logrus.Debugf("Zipping file %s", file) + log.Debug("Zipping file %s", file) tarHeader, err := getFileTarHeader(file) if err != nil { diff --git a/cnf-certification-test/tnf_config.yml b/cnf-certification-test/tnf_config.yml index ccd897afb..847c704d1 100644 --- a/cnf-certification-test/tnf_config.yml +++ b/cnf-certification-test/tnf_config.yml @@ -1,5 +1,5 @@ targetNameSpaces: - - name: tnf + - name: jmontesi podsUnderTestLabels: - "test-network-function.com/generic: target" # deprecated operator label ("test-network-function.com/operator:"") still configured by default, no need to add it here diff --git a/cnf-certification-test/webserver/webserver.go b/cnf-certification-test/webserver/webserver.go index b575d0508..066c5e8fe 100644 --- a/cnf-certification-test/webserver/webserver.go +++ b/cnf-certification-test/webserver/webserver.go @@ -8,7 +8,6 @@ import ( "encoding/json" "fmt" "io" - rlog "log" "net" "net/http" "os" @@ -17,8 +16,8 @@ import ( "github.com/gorilla/websocket" "github.com/robert-nix/ansihtml" - "github.com/sirupsen/logrus" "github.com/test-network-function/cnf-certification-test/internal/clientsholder" + "github.com/test-network-function/cnf-certification-test/internal/log" "github.com/test-network-function/cnf-certification-test/pkg/certsuite" "github.com/test-network-function/cnf-certification-test/pkg/configuration" "github.com/test-network-function/cnf-certification-test/pkg/provider" @@ -50,7 +49,7 @@ var toast []byte //go:embed index.js var index []byte -var Buf *bytes.Buffer +var buf *bytes.Buffer var upgrader = websocket.Upgrader{ CheckOrigin: func(r *http.Request) bool { @@ -61,13 +60,13 @@ var upgrader = websocket.Upgrader{ func logStreamHandler(w http.ResponseWriter, r *http.Request) { conn, err := upgrader.Upgrade(w, r, nil) if err != nil { - logrus.Printf("WebSocket upgrade error: %v", err) + log.Info("WebSocket upgrade error: %v", err) return } defer conn.Close() // Create a scanner to read the log file line by line for { - scanner := bufio.NewScanner(Buf) + scanner := bufio.NewScanner(buf) for scanner.Scan() { line := scanner.Bytes() fmt.Println(string(line)) @@ -80,7 +79,7 @@ func logStreamHandler(w http.ResponseWriter, r *http.Request) { } } if err := scanner.Err(); err != nil { - logrus.Printf("Error reading log file: %v", err) + log.Info("Error reading log file: %v", err) return } } @@ -188,7 +187,7 @@ func StartServer(outputFolder string) { http.HandleFunc("/runFunction", runHandler) - logrus.Infof("Server is running on :8084...") + log.Info("Server is running on :8084...") if err := server.ListenAndServe(); err != nil { panic(err) } @@ -198,12 +197,12 @@ func StartServer(outputFolder string) { // //nolint:funlen func runHandler(w http.ResponseWriter, r *http.Request) { - Buf = bytes.NewBufferString("") - logrus.SetOutput(Buf) - rlog.SetOutput(Buf) + buf = bytes.NewBufferString("") + // The log output will be written to the log file and to this buffer buf + log.SetLogger(log.GetMultiLogger(buf)) jsonData := r.FormValue("jsonData") // "jsonData" is the name of the JSON input field - logrus.Info(jsonData) + log.Info(jsonData) var data RequestedData if err := json.Unmarshal([]byte(jsonData), &data); err != nil { fmt.Println("Error:", err) @@ -218,7 +217,7 @@ func runHandler(w http.ResponseWriter, r *http.Request) { } defer file.Close() - logrus.Infof("Kubeconfig file name received: %s", fileHeader.Filename) + log.Info("Kubeconfig file name received: %s", fileHeader.Filename) kubeconfigTempFile, err := os.CreateTemp("", "webserver-kubeconfig-*") if err != nil { http.Error(w, "Failed to create temp file to store the kubeconfig content.", http.StatusBadRequest) @@ -226,10 +225,10 @@ func runHandler(w http.ResponseWriter, r *http.Request) { } defer func() { - logrus.Infof("Removing temporary kubeconfig file %s", kubeconfigTempFile.Name()) + log.Info("Removing temporary kubeconfig file %s", kubeconfigTempFile.Name()) err = os.Remove(kubeconfigTempFile.Name()) if err != nil { - logrus.Errorf("Failed to remove temp kubeconfig file %s", kubeconfigTempFile.Name()) + log.Error("Failed to remove temp kubeconfig file %s", kubeconfigTempFile.Name()) } }() @@ -241,12 +240,13 @@ func runHandler(w http.ResponseWriter, r *http.Request) { _ = kubeconfigTempFile.Close() - logrus.Infof("Web Server kubeconfig file : %v (copied into %v)", fileHeader.Filename, kubeconfigTempFile.Name()) - logrus.Infof("Web Server Labels filter : %v", flattenedOptions) + log.Info("Web Server kubeconfig file : %v (copied into %v)", fileHeader.Filename, kubeconfigTempFile.Name()) + log.Info("Web Server Labels filter : %v", flattenedOptions) tnfConfig, err := os.ReadFile("tnf_config.yml") if err != nil { - logrus.Fatalf("Error reading YAML file: %v", err) + log.Error("Error reading YAML file: %v", err) + os.Exit(1) //nolint:gocritic } newData := updateTnf(tnfConfig, &data) @@ -254,7 +254,8 @@ func runHandler(w http.ResponseWriter, r *http.Request) { // Write the modified YAML data back to the file err = os.WriteFile("tnf_config.yml", newData, os.ModePerm) if err != nil { - logrus.Fatalf("Error writing YAML file: %v", err) + log.Error("Error writing YAML file: %v", err) + os.Exit(1) } _ = clientsholder.GetNewClientsHolder(kubeconfigTempFile.Name()) @@ -265,7 +266,7 @@ func runHandler(w http.ResponseWriter, r *http.Request) { labelsFilter := strings.Join(flattenedOptions, "") outputFolder := r.Context().Value(outputFolderCtxKey).(string) - logrus.Infof("Running CNF Cert Suite (web-mode). Labels filter: %s, outputFolder: %s", labelsFilter, outputFolder) + log.Info("Running CNF Cert Suite (web-mode). Labels filter: %s, outputFolder: %s", labelsFilter, outputFolder) certsuite.Run(labelsFilter, outputFolder, defaultTimeout) // Return the result as JSON @@ -277,7 +278,7 @@ func runHandler(w http.ResponseWriter, r *http.Request) { // Serialize the response data to JSON jsonResponse, err := json.Marshal(response) if err != nil { - logrus.Errorf("Failed to marshal jsonResponse: %v", err) + log.Error("Failed to marshal jsonResponse: %v", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -285,10 +286,10 @@ func runHandler(w http.ResponseWriter, r *http.Request) { // Set the Content-Type header to specify that the response is JSON w.Header().Set("Content-Type", "application/json") // Write the JSON response to the client - logrus.Infof("Sending web response: %v", response) + log.Info("Sending web response: %v", response) _, err = w.Write(jsonResponse) if err != nil { - logrus.Errorf("Failed to write jsonResponse: %v", err) + log.Error("Failed to write jsonResponse: %v", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -301,7 +302,8 @@ func updateTnf(tnfConfig []byte, data *RequestedData) []byte { err := yaml.Unmarshal(tnfConfig, &config) if err != nil { - logrus.Fatalf("Error unmarshalling YAML: %v", err) + log.Error("Error unmarshalling YAML: %v", err) + os.Exit(1) } // Modify the configuration @@ -385,7 +387,8 @@ func updateTnf(tnfConfig []byte, data *RequestedData) []byte { // Serialize the modified config back to YAML format newData, err := yaml.Marshal(&config) if err != nil { - logrus.Fatalf("Error marshaling YAML: %v", err) + log.Error("Error marshaling YAML: %v", err) + os.Exit(1) } return newData } diff --git a/docs/index.md b/docs/index.md index ee52758b3..10fffc385 100644 --- a/docs/index.md +++ b/docs/index.md @@ -20,7 +20,7 @@ The purpose of the tests and the framework is to test the interaction of CNF wit **Features** -* The test suite generates a report (`claim.json`) and saves the test execution log (`tnf-execution.log`) in a configurable output directory. +* The test suite generates a report (`claim.json`) and saves the test execution log (`cnf-certsuite.log`) in a configurable output directory. * The catalog of the existing test cases and test building blocks are available in [CATALOG.md](https://github.com/test-network-function/cnf-certification-test/blob/main/CATALOG.md) diff --git a/docs/test-container.md b/docs/test-container.md index a9fb044a4..6d90dc6c1 100644 --- a/docs/test-container.md +++ b/docs/test-container.md @@ -38,7 +38,7 @@ In order to get the required information, the test suite does not `ssh` into nod **Required arguments** * `-t` to provide the path of the local directory that contains tnf config files -* `-o` to provide the path of the local directory where test results (claim.json), the execution logs (tnf-execution.log), and the results artifacts file (results.tar.gz) will be available from after the container exits. +* `-o` to provide the path of the local directory where test results (claim.json), the execution logs (cnf-certsuite.log), and the results artifacts file (results.tar.gz) will be available from after the container exits. !!! warning diff --git a/docs/test-output.md b/docs/test-output.md index eed0cfcce..b7e2b70b7 100644 --- a/docs/test-output.md +++ b/docs/test-output.md @@ -48,7 +48,7 @@ For more details on the contents of the claim file ## Execution logs -The test suite also saves a copy of the execution logs at [test output directory]/tnf-execution.log +The test suite also saves a copy of the execution logs at [test output directory]/cnf-certsuite.log ## Results artifacts zip file diff --git a/go.mod b/go.mod index fa554a4e0..8254d1548 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.21.4 require ( github.com/Masterminds/semver/v3 v3.2.1 github.com/basgys/goxml2json v1.1.0 - github.com/sirupsen/logrus v1.9.3 + github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/cobra v1.8.0 github.com/stretchr/testify v1.8.4 github.com/xeipuuv/gojsonschema v1.2.0 // indirect diff --git a/internal/clientsholder/clientsholder.go b/internal/clientsholder/clientsholder.go index 066f28930..ff5bf43c5 100644 --- a/internal/clientsholder/clientsholder.go +++ b/internal/clientsholder/clientsholder.go @@ -25,7 +25,7 @@ import ( clientconfigv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" olmClient "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned" olmFakeClient "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/fake" - "github.com/sirupsen/logrus" + "github.com/test-network-function/cnf-certification-test/internal/log" apiextv1c "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiextv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" @@ -152,12 +152,13 @@ func GetClientsHolder(filenames ...string) *ClientsHolder { return &clientsHolder } if len(filenames) == 0 { - logrus.Errorf("Please provide a valid kubeconfig. Either set the KUBECONFIG environment variable or alternatively copy a kube config to $HOME/.kube/config") + log.Error("Please provide a valid kubeconfig. Either set the KUBECONFIG environment variable or alternatively copy a kube config to $HOME/.kube/config") os.Exit(exitUsage) } clientsHolder, err := newClientsHolder(filenames...) if err != nil { - logrus.Panic("Failed to create k8s clients holder: ", err) + log.Error("Failed to create k8s clients holder, err: %v", err) + os.Exit(1) } return clientsHolder } @@ -165,7 +166,8 @@ func GetClientsHolder(filenames ...string) *ClientsHolder { func GetNewClientsHolder(kubeconfigFile string) *ClientsHolder { _, err := newClientsHolder(kubeconfigFile) if err != nil { - logrus.Panic("Failed to create k8s clients holder: ", err) + log.Error("Failed to create k8s clients holder, err: %v", err) + os.Exit(1) } return &clientsHolder @@ -209,7 +211,7 @@ func GetClientConfigFromRestConfig(restConfig *rest.Config) *clientcmdapi.Config func getClusterRestConfig(filenames ...string) (*rest.Config, error) { restConfig, err := rest.InClusterConfig() if err == nil { - logrus.Infof("CNF Cert Suite is running inside a cluster.") + log.Info("CNF Cert Suite is running inside a cluster.") // Convert restConfig to clientcmdapi.Config so we can get the kubeconfig "file" bytes // needed by preflight's operator checks. @@ -223,7 +225,7 @@ func getClusterRestConfig(filenames ...string) (*rest.Config, error) { return restConfig, nil } - logrus.Infof("Running outside a cluster. Parsing kubeconfig file/s %+v", filenames) + log.Info("Running outside a cluster. Parsing kubeconfig file/s %+v", filenames) if len(filenames) == 0 { return nil, errors.New("no kubeconfig files set") } @@ -261,7 +263,7 @@ func getClusterRestConfig(filenames ...string) (*rest.Config, error) { // GetClientsHolder instantiate an ocp client func newClientsHolder(filenames ...string) (*ClientsHolder, error) { //nolint:funlen // this is a special function with lots of assignments - logrus.Infof("Creating k8s go-clients holder.") + log.Info("Creating k8s go-clients holder.") var err error clientsHolder.RestConfig, err = getClusterRestConfig(filenames...) diff --git a/internal/clientsholder/command.go b/internal/clientsholder/command.go index dea1c55a3..9acd3e014 100644 --- a/internal/clientsholder/command.go +++ b/internal/clientsholder/command.go @@ -22,7 +22,7 @@ import ( "fmt" "strings" - "github.com/sirupsen/logrus" + "github.com/test-network-function/cnf-certification-test/internal/log" corev1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/remotecommand" "k8s.io/kubectl/pkg/scheme" @@ -39,7 +39,7 @@ func (clientsholder *ClientsHolder) ExecCommandContainer( commandStr := []string{"sh", "-c", command} var buffOut bytes.Buffer var buffErr bytes.Buffer - logrus.Trace(fmt.Sprintf("execute command on ns=%s, pod=%s container=%s, cmd: %s", ctx.GetNamespace(), ctx.GetPodName(), ctx.GetContainerName(), strings.Join(commandStr, " "))) + log.Debug(fmt.Sprintf("execute command on ns=%s, pod=%s container=%s, cmd: %s", ctx.GetNamespace(), ctx.GetPodName(), ctx.GetContainerName(), strings.Join(commandStr, " "))) req := clientsholder.K8sClient.CoreV1().RESTClient(). Post(). Namespace(ctx.GetNamespace()). @@ -57,7 +57,7 @@ func (clientsholder *ClientsHolder) ExecCommandContainer( exec, err := remotecommand.NewSPDYExecutor(clientsholder.RestConfig, "POST", req.URL()) if err != nil { - logrus.Error(err) + log.Error("%v", err) return stdout, stderr, err } err = exec.StreamWithContext(context.TODO(), remotecommand.StreamOptions{ @@ -66,11 +66,11 @@ func (clientsholder *ClientsHolder) ExecCommandContainer( }) stdout, stderr = buffOut.String(), buffErr.String() if err != nil { - logrus.Error(err) - logrus.Error(req.URL()) - logrus.Error("command: ", command) - logrus.Error("stderr: ", stderr) - logrus.Error("stdout: ", stdout) + log.Error("%v", err) + log.Error("%v", req.URL()) + log.Error("command: %s", command) + log.Error("stderr: %s", stderr) + log.Error("stdout: %s", stdout) return stdout, stderr, err } return stdout, stderr, err diff --git a/internal/crclient/crclient.go b/internal/crclient/crclient.go index a8691adab..7d7590c95 100644 --- a/internal/crclient/crclient.go +++ b/internal/crclient/crclient.go @@ -22,8 +22,8 @@ import ( "strconv" "strings" - "github.com/sirupsen/logrus" "github.com/test-network-function/cnf-certification-test/internal/clientsholder" + "github.com/test-network-function/cnf-certification-test/internal/log" "github.com/test-network-function/cnf-certification-test/pkg/provider" ) @@ -62,7 +62,7 @@ func GetPidFromContainer(cut *provider.Container, ctx clientsholder.Context) (in case "cri-o", "containerd": pidCmd = "chroot /host crictl inspect --output go-template --template '{{.info.pid}}' " + cut.UID + DevNull default: - logrus.Debugf("Container runtime %s not supported yet for this test, skipping", cut.Runtime) + log.Debug("Container runtime %s not supported yet for this test, skipping", cut.Runtime) return 0, fmt.Errorf("container runtime %s not supported", cut.Runtime) } @@ -90,7 +90,7 @@ func GetContainerPidNamespace(testContainer *provider.Container, env *provider.T if err != nil { return "", fmt.Errorf("unable to get container process id due to: %v", err) } - logrus.Debugf("Obtained process id for %s is %d", testContainer, pid) + log.Debug("Obtained process id for %s is %d", testContainer, pid) command := fmt.Sprintf("lsns -p %d -t pid -n", pid) stdout, stderr, err := clientsholder.GetClientsHolder().ExecCommandContainer(ocpContext, command) @@ -162,17 +162,17 @@ func GetPidsFromPidNamespace(pidNamespace string, container *provider.Container) } aPidNs, err := strconv.Atoi(v[1]) if err != nil { - logrus.Errorf("could not convert string %s to integer, err=%s", v[1], err) + log.Error("could not convert string %s to integer, err=%s", v[1], err) continue } aPid, err := strconv.Atoi(v[2]) if err != nil { - logrus.Errorf("could not convert string %s to integer, err=%s", v[2], err) + log.Error("could not convert string %s to integer, err=%s", v[2], err) continue } aPPid, err := strconv.Atoi(v[3]) if err != nil { - logrus.Errorf("could not convert string %s to integer, err=%s", v[3], err) + log.Error("could not convert string %s to integer, err=%s", v[3], err) continue } p = append(p, &Process{PidNs: aPidNs, Pid: aPid, Args: v[4], PPid: aPPid}) diff --git a/internal/log/log.go b/internal/log/log.go index 12be527ce..b750ba02d 100644 --- a/internal/log/log.go +++ b/internal/log/log.go @@ -6,6 +6,7 @@ import ( "io" "log/slog" "runtime" + "strings" "time" ) @@ -15,13 +16,17 @@ type Logger struct { var logger *slog.Logger -func SetupLogger(logWriter io.Writer) { +func SetupLogger(logWriter io.Writer, level slog.Level) { opts := Options{ - Level: slog.LevelDebug, + Level: level, } logger = slog.New(NewCustomHandler(logWriter, &opts)) } +func SetLogger(l *slog.Logger) { + logger = l +} + func Debug(msg string, args ...any) { Logf(logger, slog.LevelDebug, msg, args...) } @@ -45,9 +50,27 @@ func GetMultiLogger(w io.Writer) *slog.Logger { return slog.New(NewMultiHandler(logger.Handler(), NewCustomHandler(w, &opts))) } +func ParseLevel(level string) (slog.Level, error) { + switch strings.ToLower(level) { + case "debug": + return slog.LevelDebug, nil + case "info": + return slog.LevelInfo, nil + case "warn", "warning": + return slog.LevelWarn, nil + case "error": + return slog.LevelError, nil + } + + return 0, fmt.Errorf("not a valid slog Level: %q", level) +} + // The Logf function should be called inside a log wrapper function. // Otherwise the code source reference will be invalid. func Logf(logger *slog.Logger, level slog.Level, format string, args ...any) { + if logger == nil { + logger = slog.Default() + } if !logger.Enabled(context.Background(), level) { return } diff --git a/main.go b/main.go index 5a050c4d6..7b9dc8e2d 100644 --- a/main.go +++ b/main.go @@ -24,9 +24,7 @@ import ( "path/filepath" "time" - "github.com/sirupsen/logrus" "github.com/test-network-function/cnf-certification-test/pkg/certsuite" - "github.com/test-network-function/cnf-certification-test/pkg/loghelper" "github.com/test-network-function/cnf-certification-test/pkg/versions" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/webserver" @@ -97,19 +95,6 @@ func init() { } } -// setLogLevel sets the log level for logrus based on the "TNF_LOG_LEVEL" environment variable -func setLogLevel() { - params := configuration.GetTestParameters() - - var logLevel, err = logrus.ParseLevel(params.LogLevel) - if err != nil { - logrus.Error("TNF_LOG_LEVEL environment set with an invalid value, defaulting to DEBUG \n Valid values are: trace, debug, info, warn, error, fatal, panic") - logLevel = logrus.DebugLevel - } - - logrus.SetLevel(logLevel) -} - func getK8sClientsConfigFileNames() []string { params := configuration.GetTestParameters() fileNames := []string{} @@ -121,58 +106,62 @@ func getK8sClientsConfigFileNames() []string { kubeConfigFilePath := filepath.Join(params.Home, ".kube", "config") // Check if the kubeconfig path exists if _, err := os.Stat(kubeConfigFilePath); err == nil { - logrus.Infof("kubeconfig path %s is present", kubeConfigFilePath) + log.Info("kubeconfig path %s is present", kubeConfigFilePath) // Only add the kubeconfig to the list of paths if it exists, since it is not added by the user fileNames = append(fileNames, kubeConfigFilePath) } else { - logrus.Infof("kubeconfig path %s is not present", kubeConfigFilePath) + log.Info("kubeconfig path %s is not present", kubeConfigFilePath) } } return fileNames } -//nolint:funlen -func main() { - err := configuration.LoadEnvironmentVariables() +func createLogFile(outputDir string) (*os.File, error) { + logFilePath := outputDir + "/" + logFileName + err := os.Remove(logFilePath) + if err != nil && !os.IsNotExist(err) { + return nil, fmt.Errorf("could not delete old log file, err: %v", err) + } + + logFile, err := os.OpenFile(logFilePath, os.O_RDWR|os.O_CREATE, logFilePermissions) if err != nil { - fmt.Fprintf(os.Stderr, "could not load the environment variables, err: %v", err) - os.Exit(1) + return nil, fmt.Errorf("could not open a new log file, err: %v", err) } - // Set up logging params for logrus - loghelper.SetLogFormat() - setLogLevel() + return logFile, nil +} - logrusLogFile, err := os.OpenFile(logFileName, os.O_RDWR|os.O_CREATE, logFilePermissions) +func setupLogger(logFile *os.File) { + logLevel, err := log.ParseLevel(configuration.GetTestParameters().LogLevel) if err != nil { - fmt.Fprintf(os.Stderr, "could not create log file, err: %v", err) - os.Exit(1) + fmt.Fprintf(os.Stderr, "Could not parse log level, err: %v. Defaulting to DEBUG.", err) } - defer logrusLogFile.Close() - logrus.SetOutput(logrusLogFile) + log.SetupLogger(logFile, logLevel) + log.Info("Log file: %s (level=%s)", logFileName, logLevel.String()) +} - // Set up logger - err = os.Remove("test_log") // TODO: use proper file when logrus is removed - if err != nil && !os.IsNotExist(err) { - fmt.Fprintf(os.Stderr, "could not delete old log file, err: %v", err) - os.Exit(1) //nolint:gocritic // the error will not happen after logrus is removed +//nolint:funlen +func main() { + err := configuration.LoadEnvironmentVariables() + if err != nil { + fmt.Fprintf(os.Stderr, "Could not load the environment variables, err: %v", err) + os.Exit(1) } - logFile, err := os.OpenFile("test_log", os.O_RDWR|os.O_CREATE, logFilePermissions) + logFile, err := createLogFile(*claimPath) if err != nil { - fmt.Fprintf(os.Stderr, "could not create log file, err: %v", err) + fmt.Fprintf(os.Stderr, "Could not create the log file, err: %v", err) os.Exit(1) } defer logFile.Close() - log.SetupLogger(logFile) - log.Info("Log file: %s", logFileName) + setupLogger(logFile) - logrus.Infof("TNF Version : %v", versions.GitVersion()) - logrus.Infof("Claim Format Version: %s", versions.ClaimFormatVersion) - logrus.Infof("Labels filter : %v", *labelsFlag) + log.Info("TNF Version : %v", versions.GitVersion()) + log.Info("Claim Format Version: %s", versions.ClaimFormatVersion) + log.Info("Labels filter : %v", *labelsFlag) cli.PrintBanner() @@ -183,32 +172,36 @@ func main() { fmt.Printf("Log file: %s\n", logFileName) fmt.Printf("\n") + fmt.Println("Building test environment...") + fmt.Printf("\n") + _ = clientsholder.GetClientsHolder(getK8sClientsConfigFileNames()...) certsuite.LoadChecksDB(*labelsFlag) if *listFlag { // ToDo: List all the available checks, filtered with --labels. - logrus.Errorf("Not implemented yet.") - os.Exit(1) + + fmt.Fprint(os.Stderr, "Checks listing is not implemented yet") + os.Exit(1) //nolint:gocritic } // Diagnostic functions will run when no labels are provided. if *labelsFlag == noLabelsExpr { - logrus.Warnf("CNF Certification Suite will run in diagnostic mode so no test case will be launched.") + log.Warn("CNF Certification Suite will run in diagnostic mode so no test case will be launched.") } var timeout time.Duration timeout, err = time.ParseDuration(*timeoutFlag) if err != nil { - logrus.Errorf("Failed to parse timeout flag %v: %v, using default timeout value %v", *timeoutFlag, err, timeoutFlagDefaultvalue) + log.Error("Failed to parse timeout flag %v: %v, using default timeout value %v", *timeoutFlag, err, timeoutFlagDefaultvalue) timeout = timeoutFlagDefaultvalue } // Set clientsholder singleton with the filenames from the env vars. - logrus.Infof("Output folder for the claim file: %s", *claimPath) + log.Info("Output folder for the claim file: %s", *claimPath) if *serverModeFlag { - logrus.Info("Running CNF Certification Suite in web server mode.") + log.Info("Running CNF Certification Suite in web server mode.") webserver.StartServer(*claimPath) } else { log.Info("Running CNF Certification Suite in stand-alone mode.") diff --git a/pkg/autodiscover/autodiscover.go b/pkg/autodiscover/autodiscover.go index 218f97dc0..32ae2a38a 100644 --- a/pkg/autodiscover/autodiscover.go +++ b/pkg/autodiscover/autodiscover.go @@ -19,14 +19,15 @@ package autodiscover import ( "context" "errors" + "os" "regexp" "time" configv1 "github.com/openshift/api/config/v1" clientconfigv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" olmv1Alpha "github.com/operator-framework/api/pkg/operators/v1alpha1" - "github.com/sirupsen/logrus" "github.com/test-network-function/cnf-certification-test/internal/clientsholder" + "github.com/test-network-function/cnf-certification-test/internal/log" "github.com/test-network-function/cnf-certification-test/pkg/compatibility" "github.com/test-network-function/cnf-certification-test/pkg/configuration" "helm.sh/helm/v3/pkg/release" @@ -103,11 +104,11 @@ var data = DiscoveredTestData{} func warnDeprecation(config *configuration.TestConfiguration) { if len(config.OperatorsUnderTestLabels) == 0 { - logrus.Warnf("DEPRECATED: deprecated default operator label in use ( %s:%s ) is about to be obsolete. Please use the new \"operatorsUnderTestLabels\" field to specify operators labels instead.", + log.Warn("DEPRECATED: deprecated default operator label in use ( %s:%s ) is about to be obsolete. Please use the new \"operatorsUnderTestLabels\" field to specify operators labels instead.", deprecatedHardcodedOperatorLabelName, deprecatedHardcodedOperatorLabelValue) } if len(config.PodsUnderTestLabels) == 0 { - logrus.Warn("No Pod under test labels configured. Tests on pods and containers will not run. Please use the \"podsUnderTestLabels\" field to specify labels for pods under test") + log.Warn("No Pod under test labels configured. Tests on pods and containers will not run. Please use the \"podsUnderTestLabels\" field to specify labels for pods under test") } } @@ -120,7 +121,7 @@ func createLabels(labelStrings []string) (labelObjects []labelObject) { values := r.FindStringSubmatch(label) if len(values) != labelRegexMatches { - logrus.Errorf("failed to parse label=%s, will not be used!, ", label) + log.Error("failed to parse label=%s, will not be used!, ", label) continue } var aLabel labelObject @@ -140,7 +141,8 @@ func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData var err error data.StorageClasses, err = getAllStorageClasses() if err != nil { - logrus.Fatalf("Failed to retrieve storageClasses - err: %v", err) + log.Error("Failed to retrieve storageClasses - err: %v", err) + os.Exit(1) } podsUnderTestLabelsObjects := createLabels(config.PodsUnderTestLabels) @@ -151,8 +153,8 @@ func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData // adds DEPRECATED hardcoded operator label operatorsUnderTestLabelsObjects = append(operatorsUnderTestLabelsObjects, labelObject{LabelKey: deprecatedHardcodedOperatorLabelName, LabelValue: deprecatedHardcodedOperatorLabelValue}) - logrus.Infof("parsed pods under test labels: %+v", podsUnderTestLabelsObjects) - logrus.Infof("parsed operators under test labels: %+v", operatorsUnderTestLabelsObjects) + log.Info("parsed pods under test labels: %+v", podsUnderTestLabelsObjects) + log.Info("parsed operators under test labels: %+v", operatorsUnderTestLabelsObjects) data.AllNamespaces, _ = getAllNamespaces(oc.K8sClient.CoreV1()) data.AllSubscriptions = findSubscriptions(oc.OlmClient, []string{""}) @@ -167,15 +169,18 @@ func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData data.DebugPods, _ = findPodsByLabel(oc.K8sClient.CoreV1(), debugLabels, debugNS) data.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1()) if err != nil { - logrus.Fatalf("Cannot get resource quotas, error: %v", err) + log.Error("Cannot get resource quotas, error: %v", err) + os.Exit(1) } data.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces) if err != nil { - logrus.Fatalf("Cannot get pod disruption budgets, error: %v", err) + log.Error("Cannot get pod disruption budgets, error: %v", err) + os.Exit(1) } data.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient) if err != nil { - logrus.Fatalln("Cannot get network policies") + log.Error("Cannot get network policies") + os.Exit(1) } data.Crds = FindTestCrdNames(config.CrdFilters) data.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds) @@ -185,13 +190,15 @@ func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData openshiftVersion, err := getOpenshiftVersion(oc.OcpClient) if err != nil { - logrus.Fatalf("Failed to get the OpenShift version: %v", err) + log.Error("Failed to get the OpenShift version: %v", err) + os.Exit(1) } data.OpenshiftVersion = openshiftVersion k8sVersion, err := oc.K8sClient.Discovery().ServerVersion() if err != nil { - logrus.Fatalf("Cannot get the K8s version, error: %v", err) + log.Error("Cannot get the K8s version, error: %v", err) + os.Exit(1) } data.IstioServiceMeshFound = isIstioServiceMeshInstalled(data.AllNamespaces) data.ValidProtocolNames = config.ValidProtocolNames @@ -206,37 +213,44 @@ func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData // Find ClusterRoleBindings clusterRoleBindings, err := getClusterRoleBindings() if err != nil { - logrus.Fatalf("Cannot get cluster role bindings, error: %v", err) + log.Error("Cannot get cluster role bindings, error: %v", err) + os.Exit(1) } data.ClusterRoleBindings = clusterRoleBindings // Find RoleBindings roleBindings, err := getRoleBindings() if err != nil { - logrus.Fatalf("Cannot get cluster role bindings, error: %v", err) + log.Error("Cannot get cluster role bindings, error: %v", err) + os.Exit(1) } data.RoleBindings = roleBindings // find roles roles, err := getRoles() if err != nil { - logrus.Fatalf("Cannot get roles, error: %v", err) + log.Error("Cannot get roles, error: %v", err) + os.Exit(1) } data.Roles = roles data.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces) data.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) if err != nil { - logrus.Fatalf("Cannot get list of nodes, error: %v", err) + log.Error("Cannot get list of nodes, error: %v", err) + os.Exit(1) } data.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1()) if err != nil { - logrus.Fatalf("Cannot get list of persistent volumes, error: %v", err) + log.Error("Cannot get list of persistent volumes, error: %v", err) + os.Exit(1) } data.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1()) if err != nil { - logrus.Fatalf("Cannot get list of persistent volume claims, error: %v", err) + log.Error("Cannot get list of persistent volume claims, error: %v", err) + os.Exit(1) } data.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList) if err != nil { - logrus.Fatalf("Cannot get list of services, error: %v", err) + log.Error("Cannot get list of services, error: %v", err) + os.Exit(1) } if config.CollectorAppEndPoint == "" { @@ -262,7 +276,7 @@ func getOpenshiftVersion(oClient clientconfigv1.ConfigV1Interface) (ver string, if err != nil { switch { case kerrors.IsNotFound(err): - logrus.Warnf("Unable to get ClusterOperator CR from openshift-apiserver. Running in a non-OCP cluster.") + log.Warn("Unable to get ClusterOperator CR from openshift-apiserver. Running in a non-OCP cluster.") return NonOpenshiftClusterVersion, nil default: return "", err @@ -273,7 +287,7 @@ func getOpenshiftVersion(oClient clientconfigv1.ConfigV1Interface) (ver string, if ver.Name == tnfCsvTargetLabelName { // openshift-apiserver does not report version, // clusteroperator/openshift-apiserver does, and only version number - logrus.Infof("OpenShift Version found: %v", ver.Version) + log.Info("OpenShift Version found: %v", ver.Version) return ver.Version, nil } } diff --git a/pkg/autodiscover/autodiscover_crds.go b/pkg/autodiscover/autodiscover_crds.go index e75d349b0..13f200443 100644 --- a/pkg/autodiscover/autodiscover_crds.go +++ b/pkg/autodiscover/autodiscover_crds.go @@ -19,7 +19,7 @@ package autodiscover import ( "strings" - "github.com/sirupsen/logrus" + "github.com/test-network-function/cnf-certification-test/internal/log" "github.com/test-network-function/cnf-certification-test/pkg/configuration" "context" @@ -35,7 +35,7 @@ func getClusterCrdNames() (crdList []*apiextv1.CustomResourceDefinition, err err oc := clientsholder.GetClientsHolder() crds, err := oc.APIExtClient.ApiextensionsV1().CustomResourceDefinitions().List(context.TODO(), metav1.ListOptions{}) if err != nil { - logrus.Errorln("error when listing crds") + log.Error("error when listing crds") return crdList, err } for idx := range crds.Items { @@ -48,7 +48,7 @@ func getClusterCrdNames() (crdList []*apiextv1.CustomResourceDefinition, err err func FindTestCrdNames(crdFilters []configuration.CrdFilter) (targetCrds []*apiextv1.CustomResourceDefinition) { clusterCrds, err := getClusterCrdNames() if err != nil { - logrus.Errorf("Unable to get cluster CRD.") + log.Error("Unable to get cluster CRD.") return []*apiextv1.CustomResourceDefinition{} } for _, crd := range clusterCrds { diff --git a/pkg/autodiscover/autodiscover_events.go b/pkg/autodiscover/autodiscover_events.go index 14e39beb0..9f8b88db7 100644 --- a/pkg/autodiscover/autodiscover_events.go +++ b/pkg/autodiscover/autodiscover_events.go @@ -19,7 +19,7 @@ package autodiscover import ( "context" - "github.com/sirupsen/logrus" + "github.com/test-network-function/cnf-certification-test/internal/log" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" @@ -34,7 +34,7 @@ func findAbnormalEvents(oc corev1client.CoreV1Interface, namespaces []string) (a for _, ns := range namespaces { someAbnormalEvents, err := oc.Events(ns).List(context.TODO(), metav1.ListOptions{FieldSelector: "type!=Normal"}) if err != nil { - logrus.Errorf("failed to get event list for namespace %s, err:%s", ns, err) + log.Error("failed to get event list for namespace %s, err:%s", ns, err) continue } abnormalEvents = append(abnormalEvents, someAbnormalEvents.Items...) diff --git a/pkg/autodiscover/autodiscover_operators.go b/pkg/autodiscover/autodiscover_operators.go index 990ec5956..c1b005649 100644 --- a/pkg/autodiscover/autodiscover_operators.go +++ b/pkg/autodiscover/autodiscover_operators.go @@ -23,8 +23,8 @@ import ( helmclient "github.com/mittwald/go-helm-client" olmv1Alpha "github.com/operator-framework/api/pkg/operators/v1alpha1" clientOlm "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned" - "github.com/sirupsen/logrus" "github.com/test-network-function/cnf-certification-test/internal/clientsholder" + "github.com/test-network-function/cnf-certification-test/internal/log" "github.com/test-network-function/cnf-certification-test/pkg/configuration" "github.com/test-network-function/cnf-certification-test/pkg/stringhelper" "helm.sh/helm/v3/pkg/release" @@ -50,15 +50,15 @@ func isIstioServiceMeshInstalled(allNs []string) bool { gvr := schema.GroupVersionResource{Group: "install.istio.io", Version: "v1alpha1", Resource: "istiooperators"} cr, err := oc.DynamicClient.Resource(gvr).Namespace(istioNamespace).Get(context.TODO(), istioCR, metav1.GetOptions{}) if err != nil { - logrus.Errorf("failed when checking the Istio CR, err: %v", err) + log.Error("failed when checking the Istio CR, err: %v", err) return false } if cr == nil { - logrus.Warnf("The Istio installation CR is missing (but the Istio namespace exists)") + log.Warn("The Istio installation CR is missing (but the Istio namespace exists)") return false } - logrus.Infof("Istio Service Mesh detected") + log.Info("Istio Service Mesh detected") return true } @@ -66,19 +66,19 @@ func isIstioServiceMeshInstalled(allNs []string) bool { func findOperatorsByLabel(olmClient clientOlm.Interface, labels []labelObject, namespaces []configuration.Namespace) []*olmv1Alpha.ClusterServiceVersion { csvs := []*olmv1Alpha.ClusterServiceVersion{} for _, ns := range namespaces { - logrus.Debugf("Searching CSVs in namespace %s", ns) + log.Debug("Searching CSVs in namespace %s", ns) for _, aLabelObject := range labels { label := aLabelObject.LabelKey // DEPRECATED special processing for deprecated operator label. Value not needed to match. if aLabelObject.LabelKey != deprecatedHardcodedOperatorLabelName { label += "=" + aLabelObject.LabelValue } - logrus.Debugf("Searching CSVs with label %s", label) + log.Debug("Searching CSVs with label %s", label) csvList, err := olmClient.OperatorsV1alpha1().ClusterServiceVersions(ns.Name).List(context.TODO(), metav1.ListOptions{ LabelSelector: label, }) if err != nil { - logrus.Errorln("error when listing csvs in ns=", ns, " label=", label) + log.Error("error when listing csvs in ns=%s label=%s", ns, label) continue } @@ -88,9 +88,9 @@ func findOperatorsByLabel(olmClient clientOlm.Interface, labels []labelObject, n } } - logrus.Infof("Found %d CSVs:", len(csvs)) + log.Info("Found %d CSVs:", len(csvs)) for i := range csvs { - logrus.Infof(" CSV name: %s (ns: %s)", csvs[i].Name, csvs[i].Namespace) + log.Info(" CSV name: %s (ns: %s)", csvs[i].Name, csvs[i].Namespace) } return csvs @@ -98,7 +98,7 @@ func findOperatorsByLabel(olmClient clientOlm.Interface, labels []labelObject, n func getAllNamespaces(oc corev1client.CoreV1Interface) (allNs []string, err error) { nsList, err := oc.Namespaces().List(context.TODO(), metav1.ListOptions{}) if err != nil { - logrus.Errorln("Error when listing", "err: ", err) + log.Error("Error when listing, err: %s", err) return allNs, fmt.Errorf("error getting all namespaces, err: %s", err) } for index := range nsList.Items { @@ -109,18 +109,18 @@ func getAllNamespaces(oc corev1client.CoreV1Interface) (allNs []string, err erro func getAllOperators(olmClient clientOlm.Interface) []*olmv1Alpha.ClusterServiceVersion { csvs := []*olmv1Alpha.ClusterServiceVersion{} - logrus.Debugf("Searching CSVs in namespace All") + log.Debug("Searching CSVs in namespace All") csvList, err := olmClient.OperatorsV1alpha1().ClusterServiceVersions("").List(context.TODO(), metav1.ListOptions{}) if err != nil { - logrus.Errorln("error when listing csvs in all namespaces") + log.Error("error when listing csvs in all namespaces") } for i := range csvList.Items { csvs = append(csvs, &csvList.Items[i]) } - logrus.Infof("Found %d CSVs:", len(csvs)) + log.Info("Found %d CSVs:", len(csvs)) for i := range csvs { - logrus.Infof(" CSV name: %s (ns: %s)", csvs[i].Name, csvs[i].Namespace) + log.Info(" CSV name: %s (ns: %s)", csvs[i].Name, csvs[i].Namespace) } return csvs } @@ -132,18 +132,18 @@ func findSubscriptions(olmClient clientOlm.Interface, namespaces []string) []olm if ns == "" { displayNs = "All Namespaces" } - logrus.Debugf("Searching subscriptions in namespace %s", displayNs) + log.Debug("Searching subscriptions in namespace %s", displayNs) subscription, err := olmClient.OperatorsV1alpha1().Subscriptions(ns).List(context.TODO(), metav1.ListOptions{}) if err != nil { - logrus.Errorln("error when listing subscriptions in ns=", ns) + log.Error("error when listing subscriptions in ns=%s", ns) continue } subscriptions = append(subscriptions, subscription.Items...) } - logrus.Infof("Found %d subscriptions in the target namespaces", len(subscriptions)) + log.Info("Found %d subscriptions in the target namespaces", len(subscriptions)) for i := range subscriptions { - logrus.Infof(" Subscriptions name: %s (ns: %s)", subscriptions[i].Name, subscriptions[i].Namespace) + log.Info(" Subscriptions name: %s (ns: %s)", subscriptions[i].Name, subscriptions[i].Namespace) } return subscriptions } @@ -158,7 +158,7 @@ func getHelmList(restConfig *rest.Config, namespaces []string) map[string][]*rel RepositoryConfig: "/tmp/.helmrepo", Debug: true, Linting: true, - DebugLog: logrus.Printf, + DebugLog: log.Info, }, RestConfig: restConfig, } @@ -177,7 +177,7 @@ func getHelmList(restConfig *rest.Config, namespaces []string) map[string][]*rel func getAllInstallPlans(olmClient clientOlm.Interface) (out []*olmv1Alpha.InstallPlan) { installPlanList, err := olmClient.OperatorsV1alpha1().InstallPlans("").List(context.TODO(), metav1.ListOptions{}) if err != nil { - logrus.Errorf("unable get installplans in cluster, err: %s", err) + log.Error("unable get installplans in cluster, err: %s", err) return out } for index := range installPlanList.Items { @@ -190,7 +190,7 @@ func getAllInstallPlans(olmClient clientOlm.Interface) (out []*olmv1Alpha.Instal func getAllCatalogSources(olmClient clientOlm.Interface) (out []*olmv1Alpha.CatalogSource) { catalogSourcesList, err := olmClient.OperatorsV1alpha1().CatalogSources("").List(context.TODO(), metav1.ListOptions{}) if err != nil { - logrus.Errorf("unable get CatalogSources in cluster, err: %s", err) + log.Error("unable get CatalogSources in cluster, err: %s", err) return out } for index := range catalogSourcesList.Items { diff --git a/pkg/autodiscover/autodiscover_pods.go b/pkg/autodiscover/autodiscover_pods.go index befb9484e..a5b89345f 100644 --- a/pkg/autodiscover/autodiscover_pods.go +++ b/pkg/autodiscover/autodiscover_pods.go @@ -19,7 +19,7 @@ package autodiscover import ( "context" - "github.com/sirupsen/logrus" + "github.com/test-network-function/cnf-certification-test/internal/log" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" @@ -31,12 +31,12 @@ func findPodsByLabel(oc corev1client.CoreV1Interface, labels []labelObject, name for _, ns := range namespaces { for _, aLabelObject := range labels { label := aLabelObject.LabelKey + "=" + aLabelObject.LabelValue - logrus.Debugf("Searching Pods with label %s", label) + log.Debug("Searching Pods with label %s", label) pods, err := oc.Pods(ns).List(context.TODO(), metav1.ListOptions{ LabelSelector: label, }) if err != nil { - logrus.Errorln("error when listing pods in ns=", ns, " label=", label, "err: ", err) + log.Error("error when listing pods in ns=%s label=%s, err: %v", ns, label, err) continue } diff --git a/pkg/autodiscover/autodiscover_podset.go b/pkg/autodiscover/autodiscover_podset.go index 74cd83c3e..dde5473fd 100644 --- a/pkg/autodiscover/autodiscover_podset.go +++ b/pkg/autodiscover/autodiscover_podset.go @@ -18,7 +18,7 @@ package autodiscover import ( "context" - "github.com/sirupsen/logrus" + "github.com/test-network-function/cnf-certification-test/internal/log" appsv1 "k8s.io/api/apps/v1" scalingv1 "k8s.io/api/autoscaling/v1" @@ -32,7 +32,7 @@ import ( func FindDeploymentByNameByNamespace(appClient appv1client.AppsV1Interface, namespace, name string) (*appsv1.Deployment, error) { dp, err := appClient.Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { - logrus.Error("Cannot retrieve deployment in ns=", namespace, " name=", name) + log.Error("Cannot retrieve deployment in ns=%s name=%s", namespace, name) return nil, err } return dp, nil @@ -40,7 +40,7 @@ func FindDeploymentByNameByNamespace(appClient appv1client.AppsV1Interface, name func FindStatefulsetByNameByNamespace(appClient appv1client.AppsV1Interface, namespace, name string) (*appsv1.StatefulSet, error) { ss, err := appClient.StatefulSets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { - logrus.Error("Cannot retrieve deployment in ns=", namespace, " name=", name) + log.Error("Cannot retrieve deployment in ns=%s name=%s", namespace, name) return nil, err } return ss, nil @@ -49,7 +49,7 @@ func FindStatefulsetByNameByNamespace(appClient appv1client.AppsV1Interface, nam func FindCrObjectByNameByNamespace(scalesGetter scale.ScalesGetter, ns, name string, groupResourceSchema schema.GroupResource) (*scalingv1.Scale, error) { crScale, err := scalesGetter.Scales(ns).Get(context.TODO(), groupResourceSchema, name, metav1.GetOptions{}) if err != nil { - logrus.Error("Cannot retrieve deployment in ns=", ns, " name=", name) + log.Error("Cannot retrieve deployment in ns=%s name=%s", ns, name) return nil, err } return crScale, nil @@ -65,25 +65,25 @@ func findDeploymentByLabel( for _, ns := range namespaces { dps, err := appClient.Deployments(ns).List(context.TODO(), metav1.ListOptions{}) if err != nil { - logrus.Errorf("Failed to list deployments in ns=%s, err: %v . Trying to proceed.", ns, err) + log.Error("Failed to list deployments in ns=%s, err: %v . Trying to proceed.", ns, err) continue } if len(dps.Items) == 0 { - logrus.Warn("Did not find any deployments in ns=", ns) + log.Warn("Did not find any deployments in ns=%s", ns) } for i := 0; i < len(dps.Items); i++ { for _, aLabelObject := range labels { - logrus.Tracef("Searching pods in deployment %q found in ns %q using label %s=%s", dps.Items[i].Name, ns, aLabelObject.LabelKey, aLabelObject.LabelValue) + log.Debug("Searching pods in deployment %q found in ns %q using label %s=%s", dps.Items[i].Name, ns, aLabelObject.LabelKey, aLabelObject.LabelValue) if dps.Items[i].Spec.Template.ObjectMeta.Labels[aLabelObject.LabelKey] == aLabelObject.LabelValue { deployments = append(deployments, dps.Items[i]) - logrus.Info("Deployment ", dps.Items[i].Name, " found in ns ", ns) + log.Info("Deployment %s found in ns=%s", dps.Items[i].Name, ns) } } } } if len(deployments) == 0 { - logrus.Warnf("Did not find any deployment in the configured namespaces %v", namespaces) + log.Warn("Did not find any deployment in the configured namespaces %v", namespaces) } return deployments } @@ -98,25 +98,25 @@ func findStatefulSetByLabel( for _, ns := range namespaces { ss, err := appClient.StatefulSets(ns).List(context.TODO(), metav1.ListOptions{}) if err != nil { - logrus.Errorf("Failed to list statefulsets in ns=%s, err: %v . Trying to proceed.", ns, err) + log.Error("Failed to list statefulsets in ns=%s, err: %v . Trying to proceed.", ns, err) continue } if len(ss.Items) == 0 { - logrus.Warn("Did not find any statefulSet in ns=", ns) + log.Warn("Did not find any statefulSet in ns=%s", ns) } for i := 0; i < len(ss.Items); i++ { for _, aLabelObject := range labels { - logrus.Tracef("Searching pods in statefulset %q found in ns %q using label %s=%s", ss.Items[i].Name, ns, aLabelObject.LabelKey, aLabelObject.LabelValue) + log.Debug("Searching pods in statefulset %q found in ns %q using label %s=%s", ss.Items[i].Name, ns, aLabelObject.LabelKey, aLabelObject.LabelValue) if ss.Items[i].Spec.Template.ObjectMeta.Labels[aLabelObject.LabelKey] == aLabelObject.LabelValue { statefulsets = append(statefulsets, ss.Items[i]) - logrus.Info("StatefulSet ", ss.Items[i].Name, " found in ns ", ns) + log.Info("StatefulSet %s found in ns=%s", ss.Items[i].Name, ns) } } } } if len(statefulsets) == 0 { - logrus.Warnf("Did not find any statefulset in the configured namespaces %v", namespaces) + log.Warn("Did not find any statefulset in the configured namespaces %v", namespaces) } return statefulsets } @@ -126,7 +126,7 @@ func findHpaControllers(cs kubernetes.Interface, namespaces []string) []*scaling for _, ns := range namespaces { hpas, err := cs.AutoscalingV1().HorizontalPodAutoscalers(ns).List(context.TODO(), metav1.ListOptions{}) if err != nil { - logrus.Error("Cannot list HorizontalPodAutoscalers on namespace ", ns, " err ", err) + log.Error("Cannot list HorizontalPodAutoscalers on namespace %s, err: %v", ns, err) return m } for i := 0; i < len(hpas.Items); i++ { @@ -134,7 +134,7 @@ func findHpaControllers(cs kubernetes.Interface, namespaces []string) []*scaling } } if len(m) == 0 { - logrus.Info("Cannot find any deployed HorizontalPodAutoscaler") + log.Info("Cannot find any deployed HorizontalPodAutoscaler") } return m } diff --git a/pkg/autodiscover/autodiscover_pv.go b/pkg/autodiscover/autodiscover_pv.go index b1676a3b2..b65c712cd 100644 --- a/pkg/autodiscover/autodiscover_pv.go +++ b/pkg/autodiscover/autodiscover_pv.go @@ -19,8 +19,8 @@ package autodiscover import ( "context" - "github.com/sirupsen/logrus" "github.com/test-network-function/cnf-certification-test/internal/clientsholder" + "github.com/test-network-function/cnf-certification-test/internal/log" corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -47,7 +47,7 @@ func getAllStorageClasses() ([]storagev1.StorageClass, error) { o := clientsholder.GetClientsHolder() storageclasslist, err := o.K8sClient.StorageV1().StorageClasses().List(context.TODO(), metav1.ListOptions{}) if err != nil { - logrus.Errorln("Error when listing", "err: ", err) + log.Error("Error when listing, err: %v", err) return nil, err } return storageclasslist.Items, nil diff --git a/pkg/autodiscover/autodiscover_rbac.go b/pkg/autodiscover/autodiscover_rbac.go index 51cda0b0c..bbe1d0d01 100644 --- a/pkg/autodiscover/autodiscover_rbac.go +++ b/pkg/autodiscover/autodiscover_rbac.go @@ -19,8 +19,8 @@ package autodiscover import ( "context" - "github.com/sirupsen/logrus" "github.com/test-network-function/cnf-certification-test/internal/clientsholder" + "github.com/test-network-function/cnf-certification-test/internal/log" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -31,7 +31,7 @@ func getRoleBindings() ([]rbacv1.RoleBinding, error) { clientsHolder := clientsholder.GetClientsHolder() roleList, roleErr := clientsHolder.K8sClient.RbacV1().RoleBindings("").List(context.TODO(), metav1.ListOptions{}) if roleErr != nil { - logrus.Errorf("executing rolebinding command failed with error: %v", roleErr) + log.Error("executing rolebinding command failed with error: %v", roleErr) return nil, roleErr } return roleList.Items, nil @@ -44,7 +44,7 @@ func getClusterRoleBindings() ([]rbacv1.ClusterRoleBinding, error) { clientsHolder := clientsholder.GetClientsHolder() crbList, crbErr := clientsHolder.K8sClient.RbacV1().ClusterRoleBindings().List(context.TODO(), metav1.ListOptions{}) if crbErr != nil { - logrus.Errorf("executing clusterrolebinding command failed with error: %v", crbErr) + log.Error("executing clusterrolebinding command failed with error: %v", crbErr) return nil, crbErr } return crbList.Items, nil @@ -56,7 +56,7 @@ func getRoles() ([]rbacv1.Role, error) { clientsHolder := clientsholder.GetClientsHolder() roleList, roleErr := clientsHolder.K8sClient.RbacV1().Roles("").List(context.TODO(), metav1.ListOptions{}) if roleErr != nil { - logrus.Errorf("executing roles command failed with error: %v", roleErr) + log.Error("executing roles command failed with error: %v", roleErr) return nil, roleErr } return roleList.Items, nil diff --git a/pkg/autodiscover/autodiscover_scales.go b/pkg/autodiscover/autodiscover_scales.go index 99a3ae2d6..1a33d0836 100644 --- a/pkg/autodiscover/autodiscover_scales.go +++ b/pkg/autodiscover/autodiscover_scales.go @@ -2,9 +2,10 @@ package autodiscover import ( "context" + "os" - "github.com/sirupsen/logrus" "github.com/test-network-function/cnf-certification-test/internal/clientsholder" + "github.com/test-network-function/cnf-certification-test/internal/log" scalingv1 "k8s.io/api/autoscaling/v1" apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -23,7 +24,7 @@ func GetScaleCrUnderTest(namespaces []string, crds []*apiextv1.CustomResourceDef var scaleObjects []ScaleObject for _, crd := range crds { if crd.Spec.Scope != apiextv1.NamespaceScoped { - logrus.Warnf("Target CRD %s is cluster-wide scoped. Skipping search of scale objects.", crd.Name) + log.Warn("Target CRD %s is cluster-wide scoped. Skipping search of scale objects.", crd.Name) continue } @@ -37,23 +38,24 @@ func GetScaleCrUnderTest(namespaces []string, crds []*apiextv1.CustomResourceDef // Filter out non-scalable CRDs. if crdVersion.Subresources == nil || crdVersion.Subresources.Scale == nil { - logrus.Infof("Target CRD %s is not scalable. Skipping search of scalable CRs.", crd.Name) + log.Info("Target CRD %s is not scalable. Skipping search of scalable CRs.", crd.Name) continue } - logrus.Debugf("Looking for Scalable CRs of CRD %s (api version %s, group %s, plural %s) in target namespaces.", + log.Debug("Looking for Scalable CRs of CRD %s (api version %s, group %s, plural %s) in target namespaces.", crd.Name, crdVersion.Name, crd.Spec.Group, crd.Spec.Names.Plural) for _, ns := range namespaces { crs, err := dynamicClient.Resource(gvr).Namespace(ns).List(context.TODO(), metav1.ListOptions{}) if err != nil { - logrus.Fatalf("Error getting CRs of CRD %s in namespace %s: %v", crd.Name, ns, err) + log.Error("Error getting CRs of CRD %s in namespace %s: %v", crd.Name, ns, err) + os.Exit(1) } if len(crs.Items) > 0 { scaleObjects = append(scaleObjects, getCrScaleObjects(crs.Items, crd)...) } else { - logrus.Warnf("No CRs of CRD %s found in the target namespaces.", crd.Name) + log.Warn("No CRs of CRD %s found in the target namespaces.", crd.Name) } } } @@ -75,7 +77,8 @@ func getCrScaleObjects(crs []unstructured.Unstructured, crd *apiextv1.CustomReso namespace := cr.GetNamespace() crScale, err := clients.ScalingClient.Scales(namespace).Get(context.TODO(), groupResourceSchema, name, metav1.GetOptions{}) if err != nil { - logrus.Fatalf("Error while getting the scale of CR=%s (CRD=%s) in namespace %s: %v", name, crd.Name, namespace, err) + log.Error("Error while getting the scale of CR=%s (CRD=%s) in namespace %s: %v", name, crd.Name, namespace, err) + os.Exit(1) } scaleObjects = append(scaleObjects, ScaleObject{Scale: crScale, GroupResourceSchema: groupResourceSchema}) diff --git a/pkg/certsuite/certsuite.go b/pkg/certsuite/certsuite.go index 3694232f8..fcd5f417e 100644 --- a/pkg/certsuite/certsuite.go +++ b/pkg/certsuite/certsuite.go @@ -5,7 +5,6 @@ import ( "path/filepath" "time" - "github.com/sirupsen/logrus" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/accesscontrol" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/certification" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/lifecycle" @@ -17,6 +16,7 @@ import ( "github.com/test-network-function/cnf-certification-test/cnf-certification-test/platform" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/preflight" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/results" + "github.com/test-network-function/cnf-certification-test/internal/log" "github.com/test-network-function/cnf-certification-test/pkg/checksdb" "github.com/test-network-function/cnf-certification-test/pkg/claimhelper" "github.com/test-network-function/cnf-certification-test/pkg/collector" @@ -52,25 +52,26 @@ func Run(labelsFilter, outputFolder string, timeout time.Duration) { claimBuilder, err := claimhelper.NewClaimBuilder() if err != nil { - logrus.Fatalf("Failed to get claim builder: %v", err) + log.Error("Failed to get claim builder: %v", err) + os.Exit(1) } claimOutputFile := filepath.Join(outputFolder, results.ClaimFileName) - logrus.Infof("Running checks matching labels expr %q with timeout %v", labelsFilter, timeout) + log.Info("Running checks matching labels expr %q with timeout %v", labelsFilter, timeout) startTime := time.Now() err = checksdb.RunChecks(labelsFilter, timeout) if err != nil { - logrus.Error(err) + log.Error("%v", err) } endTime := time.Now() - logrus.Infof("Finished running checks in %v", endTime.Sub(startTime)) + log.Info("Finished running checks in %v", endTime.Sub(startTime)) // Marshal the claim and output to file claimBuilder.Build(claimOutputFile) if configuration.GetTestParameters().EnableXMLCreation { - logrus.Infof("XML file creation is enabled. Creating JUnit XML file: %s", junitXMLOutputFile) + log.Info("XML file creation is enabled. Creating JUnit XML file: %s", junitXMLOutputFile) claimBuilder.ToJUnitXML(junitXMLOutputFile, startTime, endTime) } @@ -78,7 +79,7 @@ func Run(labelsFilter, outputFolder string, timeout time.Duration) { if configuration.GetTestParameters().EnableDataCollection { err = collector.SendClaimFileToCollector(env.CollectorAppEndPoint, claimOutputFile, env.ExecutedBy, env.PartnerName, env.CollectorAppPassword) if err != nil { - logrus.Errorf("Failed to send post request to the collector: %v", err) + log.Error("Failed to send post request to the collector: %v", err) } } @@ -86,7 +87,7 @@ func Run(labelsFilter, outputFolder string, timeout time.Duration) { resultsOutputDir := outputFolder webFilePaths, err := results.CreateResultsWebFiles(resultsOutputDir) if err != nil { - logrus.Errorf("Failed to create results web files: %v", err) + log.Error("Failed to create results web files: %v", err) } allArtifactsFilePaths := []string{filepath.Join(outputFolder, results.ClaimFileName)} @@ -98,7 +99,8 @@ func Run(labelsFilter, outputFolder string, timeout time.Duration) { if !configuration.GetTestParameters().OmitArtifactsZipFile { err = results.CompressResultsArtifacts(resultsOutputDir, allArtifactsFilePaths) if err != nil { - logrus.Fatalf("Failed to compress results artifacts: %v", err) + log.Error("Failed to compress results artifacts: %v", err) + os.Exit(1) } } @@ -107,7 +109,8 @@ func Run(labelsFilter, outputFolder string, timeout time.Duration) { for _, file := range webFilePaths { err := os.Remove(file) if err != nil { - logrus.Fatalf("failed to remove web file %s: %v", file, err) + log.Error("failed to remove web file %s: %v", file, err) + os.Exit(1) } } } diff --git a/pkg/checksdb/check.go b/pkg/checksdb/check.go index 47c6a8998..7815a20f6 100644 --- a/pkg/checksdb/check.go +++ b/pkg/checksdb/check.go @@ -7,7 +7,6 @@ import ( "sync" "time" - "github.com/sirupsen/logrus" "github.com/test-network-function/cnf-certification-test/internal/cli" "github.com/test-network-function/cnf-certification-test/internal/log" "github.com/test-network-function/cnf-certification-test/pkg/testhelper" @@ -185,7 +184,7 @@ func (check *Check) SetResult(compliantObjects, nonCompliantObjects []*testhelpe resultObjectsStr, err := testhelper.ResultObjectsToString(compliantObjects, nonCompliantObjects) if err != nil { - logrus.Errorf("Failed to get result objects string for check %s: %v", check.ID, err) + check.LogError("Failed to get result objects string for check %s: %v", check.ID, err) } check.CapturedOutput = resultObjectsStr @@ -200,7 +199,7 @@ func (check *Check) SetResult(compliantObjects, nonCompliantObjects []*testhelpe check.FailureReason = resultObjectsStr } else if len(compliantObjects) == 0 { // Mark this check as skipped. - logrus.Warnf("Check %s marked as skipped as both compliant and non-compliant objects lists are empty.", check.ID) + check.LogWarn("Check %s marked as skipped as both compliant and non-compliant objects lists are empty.", check.ID) check.FailureReason = "Compliant and non-compliant objects lists are empty." check.Result = CheckResultSkipped } @@ -239,7 +238,7 @@ func (check *Check) SetResultError(reason string) { } if check.Result == CheckResultError { - logrus.Warnf("Check %s result was already marked as error.", check.ID) + check.LogWarn("Check %s result was already marked as error.", check.ID) return } check.Result = CheckResultError @@ -270,7 +269,7 @@ func (check *Check) Run() error { check.EndTime = time.Now() }() - logrus.Infof("RUNNING CHECK: %s (labels: %v)", check.ID, check.Labels) + log.Info("RUNNING CHECK: %s (labels: %v)", check.ID, check.Labels) if check.BeforeCheckFn != nil { if err := check.BeforeCheckFn(check); err != nil { return fmt.Errorf("check %s failed in before check function: %v", check.ID, err) diff --git a/pkg/checksdb/checksdb.go b/pkg/checksdb/checksdb.go index 03987bf14..27f55d287 100644 --- a/pkg/checksdb/checksdb.go +++ b/pkg/checksdb/checksdb.go @@ -10,9 +10,9 @@ import ( "time" "unicode/utf8" - "github.com/sirupsen/logrus" "github.com/test-network-function/cnf-certification-test/cnf-certification-test/identifiers" "github.com/test-network-function/cnf-certification-test/internal/cli" + "github.com/test-network-function/cnf-certification-test/internal/log" "github.com/test-network-function/cnf-certification-test/pkg/stringhelper" "github.com/test-network-function/test-network-function-claim/pkg/claim" ) @@ -67,23 +67,23 @@ func RunChecks(labelsExpr string, timeout time.Duration) error { select { case <-groupDone: - logrus.Tracef("Group %s finished running checks.", group.name) + log.Debug("Group %s finished running checks.", group.name) case <-abortChan: - logrus.Warnf("Group %s aborted.", group.name) + log.Warn("Group %s aborted.", group.name) stopChan <- true abort = true abortReason = "Test suite aborted due to error" _ = group.OnAbort(labelsExpr, abortReason) case <-timeOutChan: - logrus.Warnf("Running all checks timed-out.") + log.Warn("Running all checks timed-out.") stopChan <- true abort = true abortReason = "global time-out" _ = group.OnAbort(labelsExpr, abortReason) case <-sigIntChan: - logrus.Warnf("SIGINT/SIGTERM received.") + log.Warn("SIGINT/SIGTERM received.") stopChan <- true abort = true @@ -99,7 +99,7 @@ func RunChecks(labelsExpr string, timeout time.Duration) error { printFailedChecksLog() if len(errs) > 0 { - logrus.Errorf("RunChecks errors: %v", errs) + log.Error("RunChecks errors: %v", errs) return fmt.Errorf("%d errors found in checks/groups", len(errs)) } @@ -109,10 +109,11 @@ func RunChecks(labelsExpr string, timeout time.Duration) error { func recordCheckResult(check *Check) { claimID, ok := identifiers.TestIDToClaimID[check.ID] if !ok { - logrus.Fatalf("TestID %s has no corresponding Claim ID", check.ID) + check.LogError("TestID %s has no corresponding Claim ID", check.ID) + os.Exit(1) } - logrus.Infof("Recording result %q of check %s, claimID: %+v", check.Result, check.ID, claimID) + log.Info("Recording result %q of check %s, claimID: %+v", check.Result, check.ID, claimID) resultsDB[check.ID] = claim.Result{ TestID: &claimID, State: check.Result.String(), @@ -190,11 +191,11 @@ func printFailedChecksLog() { fmt.Println(strings.Repeat("-", nbSymbols)) fmt.Println(logHeader) fmt.Println(strings.Repeat("-", nbSymbols)) - log := check.GetLogs() - if log == "" { + checkLogs := check.GetLogs() + if checkLogs == "" { fmt.Println("Empty log output") } else { - fmt.Println(log) + fmt.Println(checkLogs) } } } diff --git a/pkg/checksdb/checksgroup.go b/pkg/checksdb/checksgroup.go index ed8f0f955..2df499bbd 100644 --- a/pkg/checksdb/checksgroup.go +++ b/pkg/checksdb/checksgroup.go @@ -6,8 +6,8 @@ import ( "runtime/debug" "strings" - "github.com/sirupsen/logrus" "github.com/test-network-function/cnf-certification-test/internal/cli" + "github.com/test-network-function/cnf-certification-test/internal/log" ) type ChecksGroup struct { @@ -75,7 +75,7 @@ func (group *ChecksGroup) Add(check *Check) { } func skipCheck(check *Check, reason string) { - logrus.Infof("Skipping check %s, reason: %s", check.ID, reason) + check.LogInfo("Skipping check %s, reason: %s", check.ID, reason) fmt.Printf("[ "+cli.Yellow+"SKIP"+cli.Reset+" ] %s\n", check.ID) @@ -99,7 +99,7 @@ func onFailure(failureType, failureMsg string, group *ChecksGroup, currentCheck } func runBeforeAllFn(group *ChecksGroup, checks []*Check) (err error) { - logrus.Tracef("GROUP %s - Running beforeAll", group.name) + log.Debug("GROUP %s - Running beforeAll", group.name) if group.beforeAllFn == nil { return nil } @@ -108,14 +108,14 @@ func runBeforeAllFn(group *ChecksGroup, checks []*Check) (err error) { defer func() { if r := recover(); r != nil { stackTrace := fmt.Sprint(r) + "\n" + string(debug.Stack()) - logrus.Errorf("Panic while running beforeAll function:\n%v", stackTrace) + log.Error("Panic while running beforeAll function:\n%v", stackTrace) // Set first check's result as error and skip the remaining ones. err = onFailure("beforeAll function panicked", "\n:"+stackTrace, group, firstCheck, checks) } }() if err := group.beforeAllFn(checks); err != nil { - logrus.Errorf("Unexpected error while running beforeAll function: %v", err) + log.Error("Unexpected error while running beforeAll function: %v", err) // Set first check's result as error and skip the remaining ones. return onFailure("beforeAll function unexpected error", err.Error(), group, firstCheck, checks) } @@ -124,7 +124,7 @@ func runBeforeAllFn(group *ChecksGroup, checks []*Check) (err error) { } func runAfterAllFn(group *ChecksGroup, checks []*Check) (err error) { - logrus.Tracef("GROUP %s - Running afterAll", group.name) + log.Debug("GROUP %s - Running afterAll", group.name) if group.afterAllFn == nil { return nil @@ -135,14 +135,14 @@ func runAfterAllFn(group *ChecksGroup, checks []*Check) (err error) { defer func() { if r := recover(); r != nil { stackTrace := fmt.Sprint(r) + "\n" + string(debug.Stack()) - logrus.Errorf("Panic while running afterAll function:\n%v", stackTrace) + log.Error("Panic while running afterAll function:\n%v", stackTrace) // Set last check's result as error, no need to skip anyone. err = onFailure("afterAll function panicked", "\n: "+stackTrace, group, lastCheck, zeroRemainingChecks) } }() if err := group.afterAllFn(group.checks); err != nil { - logrus.Errorf("Unexpected error while running afterAll function: %v", err.Error()) + log.Error("Unexpected error while running afterAll function: %v", err.Error()) // Set last check's result as error, no need to skip anyone. return onFailure("afterAll function unexpected error", err.Error(), group, lastCheck, zeroRemainingChecks) } @@ -151,7 +151,7 @@ func runAfterAllFn(group *ChecksGroup, checks []*Check) (err error) { } func runBeforeEachFn(group *ChecksGroup, check *Check, remainingChecks []*Check) (err error) { - logrus.Tracef("GROUP %s - Running beforeEach for check %s", group.name, check.ID) + log.Debug("GROUP %s - Running beforeEach for check %s", group.name, check.ID) if group.beforeEachFn == nil { return nil } @@ -159,14 +159,14 @@ func runBeforeEachFn(group *ChecksGroup, check *Check, remainingChecks []*Check) defer func() { if r := recover(); r != nil { stackTrace := fmt.Sprint(r) + "\n" + string(debug.Stack()) - logrus.Errorf("Panic while running beforeEach function:\n%v", stackTrace) + log.Error("Panic while running beforeEach function:\n%v", stackTrace) // Set last check's result as error, no need to skip anyone. err = onFailure("beforeEach function panicked", "\n: "+stackTrace, group, check, remainingChecks) } }() if err := group.beforeEachFn(check); err != nil { - logrus.Errorf("Unexpected error while running beforeEach function:\n%v", err.Error()) + log.Error("Unexpected error while running beforeEach function:\n%v", err.Error()) // Set last check's result as error, no need to skip anyone. return onFailure("beforeEach function unexpected error", err.Error(), group, check, remainingChecks) } @@ -175,7 +175,7 @@ func runBeforeEachFn(group *ChecksGroup, check *Check, remainingChecks []*Check) } func runAfterEachFn(group *ChecksGroup, check *Check, remainingChecks []*Check) (err error) { - logrus.Tracef("GROUP %s - Running afterEach for check %s", group.name, check.ID) + log.Debug("GROUP %s - Running afterEach for check %s", group.name, check.ID) if group.afterEachFn == nil { return nil @@ -184,14 +184,14 @@ func runAfterEachFn(group *ChecksGroup, check *Check, remainingChecks []*Check) defer func() { if r := recover(); r != nil { stackTrace := fmt.Sprint(r) + "\n" + string(debug.Stack()) - logrus.Errorf("Panic while running afterEach function:\n%v", stackTrace) + log.Error("Panic while running afterEach function:\n%v", stackTrace) // Set last check's result as error, no need to skip anyone. err = onFailure("afterEach function panicked", "\n: "+stackTrace, group, check, remainingChecks) } }() if err := group.afterEachFn(check); err != nil { - logrus.Errorf("Unexpected error while running afterEach function:\n%v", err.Error()) + log.Error("Unexpected error while running afterEach function:\n%v", err.Error()) // Set last check's result as error, no need to skip anyone. return onFailure("afterEach function unexpected error", err.Error(), group, check, remainingChecks) } @@ -204,8 +204,6 @@ func shouldSkipCheck(check *Check) (skip bool, reasons []string) { return false, []string{} } - logrus.Tracef("Running check %s skipCheck functions (%d).", check.ID, len(check.SkipCheckFns)) - // Short-circuit if len(check.SkipCheckFns) == 0 { return false, []string{} @@ -217,7 +215,7 @@ func shouldSkipCheck(check *Check) (skip bool, reasons []string) { defer func() { if r := recover(); r != nil { stackTrace := fmt.Sprint(r) + "\n" + string(debug.Stack()) - logrus.Errorf("Skip check function (idx=%d) panic'ed: %s", currentSkipFnIndex, stackTrace) + check.LogError("Skip check function (idx=%d) panic'ed: %s", currentSkipFnIndex, stackTrace) skip = true reasons = []string{fmt.Sprintf("skipCheckFn (idx=%d) panic:\n%s", currentSkipFnIndex, stackTrace)} } @@ -252,18 +250,18 @@ func shouldSkipCheck(check *Check) (skip bool, reasons []string) { } func runCheck(check *Check, group *ChecksGroup, remainingChecks []*Check) (err error) { - logrus.Infof("Running check %s", check.ID) + check.LogInfo("Running check") defer func() { if r := recover(); r != nil { stackTrace := fmt.Sprint(r) + "\n" + string(debug.Stack()) - logrus.Errorf("Panic while running check %s function:\n%v", check.ID, stackTrace) + check.LogError("Panic while running check %s function:\n%v", check.ID, stackTrace) err = onFailure(fmt.Sprintf("check %s function panic", check.ID), stackTrace, group, check, remainingChecks) } }() if err := check.Run(); err != nil { - logrus.Errorf("Unexpected error while running check %s function: %v", check.ID, err.Error()) + check.LogError("Unexpected error while running check %s function: %v", check.ID, err.Error()) return onFailure(fmt.Sprintf("check %s function unexpected error", check.ID), err.Error(), group, check, remainingChecks) } @@ -286,7 +284,7 @@ func runCheck(check *Check, group *ChecksGroup, remainingChecks []*Check) (err e // //nolint:funlen func (group *ChecksGroup) RunChecks(labelsExpr string, stopChan <-chan bool, abortChan chan bool) (errs []error) { - logrus.Infof("Running group %q checks.", group.name) + log.Info("Running group %q checks.", group.name) fmt.Printf("Running suite %s\n", strings.ToUpper(group.name)) labelsExprEvaluator, err := NewLabelsExprEvaluator(labelsExpr) @@ -322,7 +320,7 @@ func (group *ChecksGroup) RunChecks(labelsExpr string, stopChan <-chan bool, abo return errs } - logrus.Infof("Checks to run: %d (group's total=%d)", len(checks), len(group.checks)) + log.Info("Checks to run: %d (group's total=%d)", len(checks), len(group.checks)) group.currentRunningCheckIdx = 0 for i, check := range checks { // Fast stop in case the stop (abort/timeout) signal received. @@ -393,7 +391,7 @@ func (group *ChecksGroup) OnAbort(labelsExpr, abortReason string) error { } func (group *ChecksGroup) RecordChecksResults() { - logrus.Infof("Recording checks results of group %s", group.name) + log.Info("Recording checks results of group %s", group.name) for _, check := range group.checks { recordCheckResult(check) } diff --git a/pkg/checksdb/labels.go b/pkg/checksdb/labels.go index 4b19a6683..8304fa606 100644 --- a/pkg/checksdb/labels.go +++ b/pkg/checksdb/labels.go @@ -7,7 +7,7 @@ import ( "go/token" "strings" - "github.com/sirupsen/logrus" + "github.com/test-network-function/cnf-certification-test/internal/log" ) type LabelsExprEvaluator interface { @@ -69,7 +69,7 @@ func (exprParser labelsExprParser) Eval(labels []string) bool { return false } default: - logrus.Errorf("Unexpected/not-implemented expr: %v", v) + log.Error("Unexpected/not-implemented expr: %v", v) return false } return false diff --git a/pkg/claimhelper/claimhelper.go b/pkg/claimhelper/claimhelper.go index 727a40151..d0b67e69a 100644 --- a/pkg/claimhelper/claimhelper.go +++ b/pkg/claimhelper/claimhelper.go @@ -26,7 +26,7 @@ import ( "os" "time" - log "github.com/sirupsen/logrus" + "github.com/test-network-function/cnf-certification-test/internal/log" "github.com/test-network-function/cnf-certification-test/pkg/checksdb" "github.com/test-network-function/cnf-certification-test/pkg/diagnostics" @@ -144,7 +144,7 @@ func (c *ClaimBuilder) Build(outputFile string) { payload := MarshalClaimOutput(c.claimRoot) WriteClaimOutput(outputFile, payload) - log.Infof("Claim file created at %s", outputFile) + log.Info("Claim file created at %s", outputFile) } func populateXMLFromClaim(c claim.Claim, startTime, endTime time.Time) TestSuitesXML { @@ -216,13 +216,15 @@ func (c *ClaimBuilder) ToJUnitXML(outputFile string, startTime, endTime time.Tim // Write the JUnit XML file. payload, err := xml.MarshalIndent(xmlOutput, "", " ") if err != nil { - log.Fatalf("Failed to generate the xml: %v", err) + log.Error("Failed to generate the xml: %v", err) + os.Exit(1) } - log.Infof("Writing JUnit XML file: %s", outputFile) + log.Info("Writing JUnit XML file: %s", outputFile) err = os.WriteFile(outputFile, payload, claimFilePermissions) if err != nil { - log.Fatal("Failed to write the xml file") + log.Error("Failed to write the xml file") + os.Exit(1) } } @@ -236,7 +238,7 @@ func MarshalConfigurations() (configurations []byte, err error) { config := provider.GetTestEnvironment() configurations, err = j.Marshal(config) if err != nil { - log.Errorf("error converting configurations to JSON: %v", err) + log.Error("error converting configurations to JSON: %v", err) return configurations, err } return configurations, nil @@ -247,7 +249,8 @@ func MarshalConfigurations() (configurations []byte, err error) { func UnmarshalConfigurations(configurations []byte, claimConfigurations map[string]interface{}) { err := j.Unmarshal(configurations, &claimConfigurations) if err != nil { - log.Fatalf("error unmarshalling configurations: %v", err) + log.Error("error unmarshalling configurations: %v", err) + os.Exit(1) } } @@ -255,7 +258,8 @@ func UnmarshalConfigurations(configurations []byte, claimConfigurations map[stri func UnmarshalClaim(claimFile []byte, claimRoot *claim.Root) { err := j.Unmarshal(claimFile, &claimRoot) if err != nil { - log.Fatalf("error unmarshalling claim file: %v", err) + log.Error("error unmarshalling claim file: %v", err) + os.Exit(1) } } @@ -263,13 +267,13 @@ func UnmarshalClaim(claimFile []byte, claimRoot *claim.Root) { func ReadClaimFile(claimFileName string) (data []byte, err error) { data, err = os.ReadFile(claimFileName) if err != nil { - log.Errorf("ReadFile failed with err: %v", err) + log.Error("ReadFile failed with err: %v", err) } path, err := os.Getwd() if err != nil { - log.Errorf("Getwd failed with err: %v", err) + log.Error("Getwd failed with err: %v", err) } - log.Infof("Reading claim file at path: %s", path) + log.Info("Reading claim file at path: %s", path) return data, nil } @@ -277,7 +281,7 @@ func ReadClaimFile(claimFileName string) (data []byte, err error) { func GetConfigurationFromClaimFile(claimFileName string) (env *provider.TestEnvironment, err error) { data, err := ReadClaimFile(claimFileName) if err != nil { - log.Errorf("ReadClaimFile failed with err: %v", err) + log.Error("ReadClaimFile failed with err: %v", err) return env, err } var aRoot claim.Root @@ -296,7 +300,8 @@ func GetConfigurationFromClaimFile(claimFileName string) (env *provider.TestEnvi func MarshalClaimOutput(claimRoot *claim.Root) []byte { payload, err := j.MarshalIndent(claimRoot, "", " ") if err != nil { - log.Fatalf("Failed to generate the claim: %v", err) + log.Error("Failed to generate the claim: %v", err) + os.Exit(1) } return payload } @@ -305,7 +310,8 @@ func MarshalClaimOutput(claimRoot *claim.Root) []byte { func WriteClaimOutput(claimOutputFile string, payload []byte) { err := os.WriteFile(claimOutputFile, payload, claimFilePermissions) if err != nil { - log.Fatalf("Error writing claim data:\n%s", string(payload)) + log.Error("Error writing claim data:\n%s", string(payload)) + os.Exit(1) } } @@ -348,7 +354,8 @@ func LoadJUnitXMLIntoMap(result map[string]interface{}, junitFilename, key strin } result[key], err = junit.ExportJUnitAsMap(junitFilename) if err != nil { - log.Fatalf("error reading JUnit XML file into JSON: %v", err) + log.Error("error reading JUnit XML file into JSON: %v", err) + os.Exit(1) } } diff --git a/pkg/compatibility/compatibility.go b/pkg/compatibility/compatibility.go index 0f142f69b..2457ddc17 100644 --- a/pkg/compatibility/compatibility.go +++ b/pkg/compatibility/compatibility.go @@ -21,7 +21,7 @@ import ( "time" gv "github.com/hashicorp/go-version" - "github.com/sirupsen/logrus" + "github.com/test-network-function/cnf-certification-test/internal/log" "github.com/test-network-function/cnf-certification-test/pkg/stringhelper" ) @@ -258,12 +258,12 @@ func IsRHCOSCompatible(machineVersion, ocpVersion string) bool { // Collect the machine version and the entry version mv, err := gv.NewVersion(machineVersion) if err != nil { - logrus.Errorf("Error parsing machineVersion: %s err: %v", machineVersion, err) + log.Error("Error parsing machineVersion: %s err: %v", machineVersion, err) return false } ev, err := gv.NewVersion(entry.MinRHCOSVersion) if err != nil { - logrus.Errorf("Error parsing MinRHCOSVersion: %s err: %v", entry.MinRHCOSVersion, err) + log.Error("Error parsing MinRHCOSVersion: %s err: %v", entry.MinRHCOSVersion, err) return false } diff --git a/pkg/configuration/utils.go b/pkg/configuration/utils.go index f149a54b6..4f321279d 100644 --- a/pkg/configuration/utils.go +++ b/pkg/configuration/utils.go @@ -21,7 +21,7 @@ import ( "os" "github.com/kelseyhightower/envconfig" - log "github.com/sirupsen/logrus" + "github.com/test-network-function/cnf-certification-test/internal/log" "gopkg.in/yaml.v3" ) @@ -39,7 +39,7 @@ func LoadConfiguration(filePath string) (TestConfiguration, error) { return configuration, nil } - log.Info("Loading config from file: ", filePath) + log.Info("Loading config from file: %s", filePath) contents, err := os.ReadFile(filePath) if err != nil { return configuration, err @@ -52,10 +52,10 @@ func LoadConfiguration(filePath string) (TestConfiguration, error) { // Set default namespace for the debug daemonset pods, in case it was not set. if configuration.DebugDaemonSetNamespace == "" { - log.Warnf("No namespace configured for the debug DaemonSet. Defaulting to namespace %s", defaultDebugDaemonSetNamespace) + log.Warn("No namespace configured for the debug DaemonSet. Defaulting to namespace %s", defaultDebugDaemonSetNamespace) configuration.DebugDaemonSetNamespace = defaultDebugDaemonSetNamespace } else { - log.Infof("Namespace for debug DaemonSet: %s", configuration.DebugDaemonSetNamespace) + log.Info("Namespace for debug DaemonSet: %s", configuration.DebugDaemonSetNamespace) } confLoaded = true diff --git a/pkg/diagnostics/diagnostics.go b/pkg/diagnostics/diagnostics.go index 40b8af855..e9bf0666d 100644 --- a/pkg/diagnostics/diagnostics.go +++ b/pkg/diagnostics/diagnostics.go @@ -24,8 +24,8 @@ import ( "context" - "github.com/sirupsen/logrus" "github.com/test-network-function/cnf-certification-test/internal/clientsholder" + "github.com/test-network-function/cnf-certification-test/internal/log" "github.com/test-network-function/cnf-certification-test/pkg/provider" corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" @@ -62,13 +62,13 @@ func GetCniPlugins() (out map[string][]interface{}) { ctx := clientsholder.NewContext(debugPod.Namespace, debugPod.Name, debugPod.Spec.Containers[0].Name) outStr, errStr, err := o.ExecCommandContainer(ctx, cniPluginsCommand) if err != nil || errStr != "" { - logrus.Errorf("Failed to execute command %s in debug pod %s", cniPluginsCommand, debugPod.String()) + log.Error("Failed to execute command %s in debug pod %s", cniPluginsCommand, debugPod.String()) continue } decoded := []interface{}{} err = json.Unmarshal([]byte(outStr), &decoded) if err != nil { - logrus.Errorf("could not decode json file because of: %s", err) + log.Error("could not decode json file because of: %s", err) continue } out[debugPod.Spec.NodeName] = decoded @@ -85,25 +85,25 @@ func GetHwInfoAllNodes() (out map[string]NodeHwInfo) { hw := NodeHwInfo{} lscpu, err := getHWJsonOutput(debugPod, o, lscpuCommand) if err != nil { - logrus.Errorf("problem getting lscpu for node %s", debugPod.Spec.NodeName) + log.Error("problem getting lscpu for node %s", debugPod.Spec.NodeName) } var ok bool hw.Lscpu, ok = lscpu.(map[string]interface{})["lscpu"] if !ok { - logrus.Errorf("problem casting lscpu field for node %s, lscpu=%v", debugPod.Spec.NodeName, lscpu) + log.Error("problem casting lscpu field for node %s, lscpu=%v", debugPod.Spec.NodeName, lscpu) } hw.IPconfig, err = getHWJsonOutput(debugPod, o, ipCommand) if err != nil { - logrus.Errorf("problem getting ip config for node %s", debugPod.Spec.NodeName) + log.Error("problem getting ip config for node %s", debugPod.Spec.NodeName) } hw.Lsblk, err = getHWJsonOutput(debugPod, o, lsblkCommand) if err != nil { - logrus.Errorf("problem getting lsblk for node %s", debugPod.Spec.NodeName) + log.Error("problem getting lsblk for node %s", debugPod.Spec.NodeName) } hw.Lspci, err = getHWTextOutput(debugPod, o, lspciCommand) if err != nil { - logrus.Errorf("problem getting lspci for node %s", debugPod.Spec.NodeName) + log.Error("problem getting lspci for node %s", debugPod.Spec.NodeName) } out[debugPod.Spec.NodeName] = hw } @@ -141,12 +141,12 @@ func GetNodeJSON() (out map[string]interface{}) { nodesJSON, err := json.Marshal(env.Nodes) if err != nil { - logrus.Errorf("Could not Marshall env.Nodes, err=%v", err) + log.Error("Could not Marshall env.Nodes, err=%v", err) } err = json.Unmarshal(nodesJSON, &out) if err != nil { - logrus.Errorf("Could not unMarshall env.Nodes, err=%v", err) + log.Error("Could not unMarshall env.Nodes, err=%v", err) } return out @@ -157,25 +157,25 @@ func GetCsiDriver() (out map[string]interface{}) { o := clientsholder.GetClientsHolder() csiDriver, err := o.K8sClient.StorageV1().CSIDrivers().List(context.TODO(), apimachineryv1.ListOptions{}) if err != nil { - logrus.Errorf("Fail CSIDrivers.list err:%s", err) + log.Error("Fail CSIDrivers.list err:%s", err) return out } scheme := runtime.NewScheme() err = storagev1.AddToScheme(scheme) if err != nil { - logrus.Errorf("Fail AddToScheme err:%s", err) + log.Error("Fail AddToScheme err:%s", err) return out } codec := serializer.NewCodecFactory(scheme).LegacyCodec(storagev1.SchemeGroupVersion) data, err := runtime.Encode(codec, csiDriver) if err != nil { - logrus.Errorf("Fail to encode Nodes to json, er: %s", err) + log.Error("Fail to encode Nodes to json, er: %s", err) return out } err = json.Unmarshal(data, &out) if err != nil { - logrus.Errorf("failed to marshall nodes json, err: %v", err) + log.Error("failed to marshall nodes json, err: %v", err) return out } return out diff --git a/pkg/loghelper/loghelper.go b/pkg/loghelper/loghelper.go index 6164fb1b6..a8a520529 100644 --- a/pkg/loghelper/loghelper.go +++ b/pkg/loghelper/loghelper.go @@ -18,12 +18,8 @@ package loghelper import ( "fmt" - "path" - "runtime" - "strconv" - "time" - "github.com/sirupsen/logrus" + "github.com/test-network-function/cnf-certification-test/internal/log" ) // CuratedLogLines @@ -37,7 +33,7 @@ type CuratedLogLines struct { func (list *CuratedLogLines) AddLogLine(format string, args ...interface{}) { message := fmt.Sprintf(format+"\n", args...) list.lines = append(list.lines, message) - logrus.Debug(message) + log.Debug(message) } // Init checks a slice for a given string. @@ -48,18 +44,3 @@ func (list *CuratedLogLines) Init(lines ...string) { func (list *CuratedLogLines) GetLogLines() []string { return list.lines } - -// SetLogFormat sets the log format for logrus -func SetLogFormat() { - customFormatter := new(logrus.TextFormatter) - customFormatter.TimestampFormat = time.StampMilli - customFormatter.PadLevelText = true - customFormatter.FullTimestamp = true - customFormatter.ForceColors = true - logrus.SetReportCaller(true) - customFormatter.CallerPrettyfier = func(f *runtime.Frame) (string, string) { - _, filename := path.Split(f.File) - return strconv.Itoa(f.Line) + "]", fmt.Sprintf("[%s:", filename) - } - logrus.SetFormatter(customFormatter) -} diff --git a/pkg/loghelper/loghelper_test.go b/pkg/loghelper/loghelper_test.go index 0043f7fb1..fb0923d5a 100644 --- a/pkg/loghelper/loghelper_test.go +++ b/pkg/loghelper/loghelper_test.go @@ -17,13 +17,16 @@ package loghelper import ( + "os" "testing" "github.com/stretchr/testify/assert" + "github.com/test-network-function/cnf-certification-test/internal/log" ) func TestLogLines(t *testing.T) { - SetLogFormat() + logLevel, _ := log.ParseLevel("INFO") + log.SetupLogger(os.Stdout, logLevel) ll := CuratedLogLines{} ll.Init("one", "two", "three") assert.Equal(t, []string{"one", "two", "three"}, ll.GetLogLines()) diff --git a/pkg/provider/containers.go b/pkg/provider/containers.go index 8b0bb3361..2125e640b 100644 --- a/pkg/provider/containers.go +++ b/pkg/provider/containers.go @@ -21,13 +21,12 @@ import ( "context" "errors" "fmt" - "log" - "os" + defaultLog "log" "strings" "github.com/go-logr/logr" "github.com/go-logr/stdr" - "github.com/sirupsen/logrus" + "github.com/test-network-function/cnf-certification-test/internal/log" corev1 "k8s.io/api/core/v1" "github.com/redhat-openshift-ecosystem/openshift-preflight/artifacts" @@ -81,19 +80,19 @@ func (c *Container) GetUID() (string, error) { uid = split[len(split)-1] } if uid == "" { - logrus.Debugln(fmt.Sprintf("could not find uid of %s/%s/%s\n", c.Namespace, c.Podname, c.Name)) + log.Debug(fmt.Sprintf("could not find uid of %s/%s/%s\n", c.Namespace, c.Podname, c.Name)) return "", errors.New("cannot determine container UID") } - logrus.Debugln(fmt.Sprintf("uid of %s/%s/%s=%s\n", c.Namespace, c.Podname, c.Name, uid)) + log.Debug(fmt.Sprintf("uid of %s/%s/%s=%s\n", c.Namespace, c.Podname, c.Name, uid)) return uid, nil } func (c *Container) SetPreflightResults(preflightImageCache map[string]plibRuntime.Results, env *TestEnvironment) error { - logrus.Infof("Running preflight container test against image: %s with name: %s", c.Image, c.Name) + log.Info("Running preflight container test against image: %s with name: %s", c.Image, c.Name) // Short circuit if the image already exists in the cache if _, exists := preflightImageCache[c.Image]; exists { - logrus.Infof("Container image: %s exists in the cache. Skipping this run.", c.Image) + log.Info("Container image: %s exists in the cache. Skipping this run.", c.Image) c.PreflightResults = preflightImageCache[c.Image] return nil } @@ -101,7 +100,7 @@ func (c *Container) SetPreflightResults(preflightImageCache map[string]plibRunti opts := []plibContainer.Option{} opts = append(opts, plibContainer.WithDockerConfigJSONFromFile(env.GetDockerConfigFile())) if env.IsPreflightInsecureAllowed() { - logrus.Info("Insecure connections are being allowed to preflight") + log.Info("Insecure connections are being allowed to preflight") opts = append(opts, plibContainer.WithInsecureConnection()) } @@ -114,21 +113,20 @@ func (c *Container) SetPreflightResults(preflightImageCache map[string]plibRunti // Add logger output to the context logbytes := bytes.NewBuffer([]byte{}) - checklogger := log.Default() + checklogger := defaultLog.Default() checklogger.SetOutput(logbytes) logger := stdr.New(checklogger) ctx = logr.NewContext(ctx, logger) check := plibContainer.NewCheck(c.Image, opts...) results, runtimeErr := check.Run(ctx) - logrus.StandardLogger().Out = os.Stderr if runtimeErr != nil { - logrus.Error(runtimeErr) + log.Error("%v", runtimeErr) return runtimeErr } - // Take all of the preflight logs and stick them into logrus. - logrus.Info(logbytes.String()) + // Take all of the preflight logs and stick them into our log. + log.Info(logbytes.String()) // Store the result into the cache and store the Results into the container's PreflightResults var. preflightImageCache[c.Image] = results diff --git a/pkg/provider/filters.go b/pkg/provider/filters.go index 04e260ca1..43cbc3e11 100644 --- a/pkg/provider/filters.go +++ b/pkg/provider/filters.go @@ -20,8 +20,8 @@ import ( "fmt" "strings" - "github.com/sirupsen/logrus" "github.com/test-network-function/cnf-certification-test/internal/clientsholder" + "github.com/test-network-function/cnf-certification-test/internal/log" ) func (env *TestEnvironment) GetGuaranteedPodsWithExclusiveCPUs() []*Pod { @@ -125,7 +125,7 @@ func filterDPDKRunningPods(pods []*Pod) []*Pod { findCommand := fmt.Sprintf("%s '%s'", findDeviceSubCommand, pod.MultusPCIs[0]) outStr, errStr, err := o.ExecCommandContainer(ctx, findCommand) if err != nil || errStr != "" { - logrus.Errorf("Failed to execute command %s in debug %s, errStr: %s, err: %v", findCommand, pod.String(), errStr, err) + log.Error("Failed to execute command %s in debug %s, errStr: %s, err: %v", findCommand, pod.String(), errStr, err) continue } if strings.Contains(outStr, dpdkDriver) { diff --git a/pkg/provider/isolation.go b/pkg/provider/isolation.go index b47760a00..e3df68bad 100644 --- a/pkg/provider/isolation.go +++ b/pkg/provider/isolation.go @@ -17,7 +17,7 @@ package provider import ( - "github.com/sirupsen/logrus" + "github.com/test-network-function/cnf-certification-test/internal/log" ) func AreResourcesIdentical(p *Pod) bool { @@ -25,7 +25,7 @@ func AreResourcesIdentical(p *Pod) bool { for _, cut := range p.Containers { // At least limits must be specified (requests default to limits if not specified) if len(cut.Resources.Limits) == 0 { - logrus.Debugf("%s has been found with undefined limits.", cut.String()) + log.Debug("%s has been found with undefined limits.", cut.String()) return false } @@ -37,12 +37,12 @@ func AreResourcesIdentical(p *Pod) bool { // Check for mismatches if !cpuRequests.Equal(*cpuLimits) { - logrus.Debugf("%s has CPU requests %f and limits %f that do not match.", cut.String(), cpuRequests.AsApproximateFloat64(), cpuLimits.AsApproximateFloat64()) + log.Debug("%s has CPU requests %f and limits %f that do not match.", cut.String(), cpuRequests.AsApproximateFloat64(), cpuLimits.AsApproximateFloat64()) return false } if !memoryRequests.Equal(*memoryLimits) { - logrus.Debugf("%s has memory requests %f and limits %f that do not match.", cut.String(), memoryRequests.AsApproximateFloat64(), memoryLimits.AsApproximateFloat64()) + log.Debug("%s has memory requests %f and limits %f that do not match.", cut.String(), memoryRequests.AsApproximateFloat64(), memoryLimits.AsApproximateFloat64()) return false } } @@ -62,16 +62,16 @@ func AreCPUResourcesWholeUnits(p *Pod) bool { cpuLimitsMillis := cut.Resources.Limits.Cpu().MilliValue() if cpuRequestsMillis == 0 || cpuLimitsMillis == 0 { - logrus.Debugf("%s has been found with undefined requests or limits.", cut.String()) + log.Debug("%s has been found with undefined requests or limits.", cut.String()) return false } if !isInteger(cpuRequestsMillis) { - logrus.Debugf("%s has CPU requests %d (milli) that has to be a whole unit.", cut.String(), cpuRequestsMillis) + log.Debug("%s has CPU requests %d (milli) that has to be a whole unit.", cut.String(), cpuRequestsMillis) return false } if !isInteger(cpuLimitsMillis) { - logrus.Debugf("%s has CPU limits %d (milli) that has to be a whole unit.", cut.String(), cpuLimitsMillis) + log.Debug("%s has CPU limits %d (milli) that has to be a whole unit.", cut.String(), cpuLimitsMillis) return false } } @@ -91,20 +91,20 @@ func LoadBalancingDisabled(p *Pod) bool { if v == disableVar { cpuLoadBalancingDisabled = true } else { - logrus.Debugf("Annotation cpu-load-balancing.crio.io has an invalid value for CPU isolation. Must be 'disable'.") + log.Debug("Annotation cpu-load-balancing.crio.io has an invalid value for CPU isolation. Must be 'disable'.") } } else { - logrus.Debugf("Annotation cpu-load-balancing.crio.io is missing.") + log.Debug("Annotation cpu-load-balancing.crio.io is missing.") } if v, ok := p.ObjectMeta.Annotations["irq-load-balancing.crio.io"]; ok { if v == disableVar { irqLoadBalancingDisabled = true } else { - logrus.Debugf("Annotation irq-load-balancing.crio.io has an invalid value for CPU isolation. Must be 'disable'.") + log.Debug("Annotation irq-load-balancing.crio.io has an invalid value for CPU isolation. Must be 'disable'.") } } else { - logrus.Debugf("Annotation irq-load-balancing.crio.io is missing.") + log.Debug("Annotation irq-load-balancing.crio.io is missing.") } // Both conditions have to be set to 'disable' diff --git a/pkg/provider/operators.go b/pkg/provider/operators.go index f58dabcfa..62dc81446 100644 --- a/pkg/provider/operators.go +++ b/pkg/provider/operators.go @@ -21,7 +21,7 @@ import ( "context" "errors" "fmt" - "log" + defaultLog "log" "os" "sort" "strings" @@ -32,8 +32,8 @@ import ( "github.com/redhat-openshift-ecosystem/openshift-preflight/artifacts" plibRuntime "github.com/redhat-openshift-ecosystem/openshift-preflight/certification" plibOperator "github.com/redhat-openshift-ecosystem/openshift-preflight/operator" - "github.com/sirupsen/logrus" "github.com/test-network-function/cnf-certification-test/internal/clientsholder" + "github.com/test-network-function/cnf-certification-test/internal/log" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -70,7 +70,7 @@ func (op *Operator) String() string { func (op *Operator) SetPreflightResults(env *TestEnvironment) error { if len(op.InstallPlans) == 0 { - logrus.Warnf("%s has no InstallPlans. Skipping setting preflight results", op.String()) + log.Warn("%s has no InstallPlans. Skipping setting preflight results", op.String()) return nil } @@ -87,13 +87,13 @@ func (op *Operator) SetPreflightResults(env *TestEnvironment) error { opts := []plibOperator.Option{} opts = append(opts, plibOperator.WithDockerConfigJSONFromFile(env.GetDockerConfigFile())) if env.IsPreflightInsecureAllowed() { - logrus.Info("Insecure connections are being allowed to preflight") + log.Info("Insecure connections are being allowed to preflight") opts = append(opts, plibOperator.WithInsecureConnection()) } // Add logger output to the context logbytes := bytes.NewBuffer([]byte{}) - checklogger := log.Default() + checklogger := defaultLog.Default() checklogger.SetOutput(logbytes) logger := stdr.New(checklogger) ctx = logr.NewContext(ctx, logger) @@ -101,19 +101,20 @@ func (op *Operator) SetPreflightResults(env *TestEnvironment) error { check := plibOperator.NewCheck(bundleImage, indexImage, oc.KubeConfig, opts...) results, runtimeErr := check.Run(ctx) if runtimeErr != nil { - logrus.Error(runtimeErr) + log.Error("%v", runtimeErr) return runtimeErr } - // Take all of the preflight logs and stick them into logrus. - logrus.Info(logbytes.String()) + // Take all of the preflight logs and stick them into our log. + log.Info(logbytes.String()) e := os.RemoveAll("artifacts/") if e != nil { - logrus.Fatal(e) + log.Error("%v", e) + os.Exit(1) } - logrus.Infof("Storing operator preflight results into object for %s", bundleImage) + log.Info("Storing operator preflight results into object for %s", bundleImage) op.PreflightResults = results return nil } @@ -127,9 +128,9 @@ func getUniqueCsvListByName(csvs []*olmv1Alpha.ClusterServiceVersion) []*olmv1Al } uniqueCsvsList := []*olmv1Alpha.ClusterServiceVersion{} - logrus.Infof("Found %d unique CSVs", len(uniqueCsvsMap)) + log.Info("Found %d unique CSVs", len(uniqueCsvsMap)) for name, csv := range uniqueCsvsMap { - logrus.Infof(" CSV: %s", name) + log.Info(" CSV: %s", name) uniqueCsvsList = append(uniqueCsvsList, csv) } @@ -165,7 +166,7 @@ func createOperators(csvs []*olmv1Alpha.ClusterServiceVersion, op.Phase = csv.Status.Phase packageAndVersion := strings.SplitN(csv.Name, ".", maxSize) if len(packageAndVersion) == 0 { - logrus.Tracef("Empty CSV Name (package.version), cannot extract a package or a version, skipping. Csv: %+v", csv) + log.Debug("Empty CSV Name (package.version), cannot extract a package or a version, skipping. Csv: %+v", csv) continue } op.PackageFromCsvName = packageAndVersion[0] @@ -174,15 +175,15 @@ func createOperators(csvs []*olmv1Alpha.ClusterServiceVersion, if getAtLeastOneSubscription(op, csv, subscriptions) { targetNamespaces, err := getOperatorTargetNamespaces(op.SubscriptionNamespace) if err != nil { - logrus.Errorf("Failed to get target namespaces for operator %s: %v", csv.Name, err) + log.Error("Failed to get target namespaces for operator %s: %v", csv.Name, err) } else { op.TargetNamespaces = targetNamespaces op.IsClusterWide = len(targetNamespaces) == 0 } } else { - logrus.Warnf("Subscription not found for CSV: %s (ns %s)", csv.Name, csv.Namespace) + log.Warn("Subscription not found for CSV: %s (ns %s)", csv.Name, csv.Namespace) } - logrus.Infof("Getting installplans for op %s (subs %s ns %s)", op.Name, op.SubscriptionName, op.SubscriptionNamespace) + log.Info("Getting installplans for op %s (subs %s ns %s)", op.Name, op.SubscriptionName, op.SubscriptionNamespace) // Get at least one Install Plan and update the Operator object with it. getAtLeastOneInstallPlan(op, csv, allInstallPlans, allCatalogSources) operators = append(operators, op) @@ -217,7 +218,7 @@ func getAtLeastOneCsv(csv *olmv1Alpha.ClusterServiceVersion, installPlan *olmv1A } if installPlan.Status.BundleLookups == nil { - logrus.Warnf("InstallPlan %s for csv %s (ns %s) does not have bundle lookups. It will be skipped.", installPlan.Name, csv.Name, csv.Namespace) + log.Warn("InstallPlan %s for csv %s (ns %s) does not have bundle lookups. It will be skipped.", installPlan.Name, csv.Name, csv.Namespace) continue } atLeastOneCsv = true @@ -240,7 +241,7 @@ func getAtLeastOneInstallPlan(op *Operator, csv *olmv1Alpha.ClusterServiceVersio indexImage, catalogErr := getCatalogSourceImageIndexFromInstallPlan(installPlan, allCatalogSources) if catalogErr != nil { - logrus.Tracef("failed to get installPlan image index for csv %s (ns %s) installPlan %s, err: %v", + log.Debug("failed to get installPlan image index for csv %s (ns %s) installPlan %s, err: %v", csv.Name, csv.Namespace, installPlan.Name, catalogErr) continue } diff --git a/pkg/provider/pods.go b/pkg/provider/pods.go index 130192440..d1d6ea399 100644 --- a/pkg/provider/pods.go +++ b/pkg/provider/pods.go @@ -23,9 +23,8 @@ import ( "strconv" "strings" - "github.com/sirupsen/logrus" "github.com/test-network-function/cnf-certification-test/internal/clientsholder" - "github.com/test-network-function/cnf-certification-test/pkg/tnf" + "github.com/test-network-function/cnf-certification-test/internal/log" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -54,12 +53,12 @@ func NewPod(aPod *corev1.Pod) (out Pod) { out.MultusIPs = make(map[string][]string) out.MultusIPs, err = GetPodIPsPerNet(aPod.GetAnnotations()[CniNetworksStatusKey]) if err != nil { - logrus.Errorf("Could not decode networks-status annotation, error: %v", err) + log.Error("Could not decode networks-status annotation, error: %v", err) } out.MultusPCIs, err = GetPciPerPod(aPod.GetAnnotations()[CniNetworksStatusKey]) if err != nil { - logrus.Errorf("Could not decode networks-status annotation, error: %v", err) + log.Error("Could not decode networks-status annotation, error: %v", err) } if _, ok := aPod.GetLabels()[skipConnectivityTestsLabel]; ok { @@ -92,12 +91,12 @@ func (p *Pod) IsCPUIsolationCompliant() bool { isCPUIsolated := true if !LoadBalancingDisabled(p) { - tnf.Logf(logrus.DebugLevel, "%s has been found to not have annotations set correctly for CPU isolation.", p) + log.Debug("%s has been found to not have annotations set correctly for CPU isolation.", p) isCPUIsolated = false } if !p.IsRuntimeClassNameSpecified() { - tnf.Logf(logrus.DebugLevel, "%s has been found to not have runtimeClassName specified.", p) + log.Debug("%s has been found to not have runtimeClassName specified.", p) isCPUIsolated = false } @@ -115,7 +114,7 @@ func (p *Pod) AffinityRequired() bool { if val, ok := p.Labels[AffinityRequiredKey]; ok { result, err := strconv.ParseBool(val) if err != nil { - logrus.Warnf("failure to parse bool %v", val) + log.Warn("failure to parse bool %v", val) return false } return result @@ -316,7 +315,7 @@ func isNetworkAttachmentDefinitionConfigTypeSRIOV(nadConfig string) (bool, error // If type is found, it's a single plugin CNI config. if cniConfig.Type != nil { - logrus.Tracef("Single plugin config type found: %+v, type=%s", cniConfig, *cniConfig.Type) + log.Debug("Single plugin config type found: %+v, type=%s", cniConfig, *cniConfig.Type) return *cniConfig.Type == typeSriov, nil } @@ -324,7 +323,7 @@ func isNetworkAttachmentDefinitionConfigTypeSRIOV(nadConfig string) (bool, error return false, fmt.Errorf("invalid multi-plugins cni config: %s", nadConfig) } - logrus.Tracef("CNI plugins: %+v", *cniConfig.Plugins) + log.Debug("CNI plugins: %+v", *cniConfig.Plugins) for i := range *cniConfig.Plugins { plugin := (*cniConfig.Plugins)[i] if plugin.Type == typeSriov { @@ -357,7 +356,7 @@ func (p *Pod) IsUsingSRIOV() (bool, error) { oc := clientsholder.GetClientsHolder() for _, networkName := range cncfNetworkNames { - logrus.Tracef("%s: Reviewing network-attachment definition %q", p, networkName) + log.Debug("%s: Reviewing network-attachment definition %q", p, networkName) nad, err := oc.CNCFNetworkingClient.NetworkAttachmentDefinitions(p.Namespace).Get(context.TODO(), networkName, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("failed to get NetworkAttachment %s: %v", networkName, err) @@ -368,7 +367,7 @@ func (p *Pod) IsUsingSRIOV() (bool, error) { return false, fmt.Errorf("failed to know if network-attachment %s is sriov: %v", networkName, err) } - logrus.Tracef("%s: NAD config: %s", p, nad.Spec.Config) + log.Debug("%s: NAD config: %s", p, nad.Spec.Config) if isSRIOV { return true, nil } @@ -381,7 +380,7 @@ func (p *Pod) IsUsingSRIOV() (bool, error) { func (p *Pod) IsUsingClusterRoleBinding(clusterRoleBindings []rbacv1.ClusterRoleBinding) (bool, string, error) { // This function accepts a list of clusterRoleBindings and checks to see if the pod's service account is // tied to any of them. If it is, then it returns true, otherwise it returns false. - logrus.Infof("Pod: %s/%s is using service account: %s", p.Pod.Namespace, p.Pod.Name, p.Pod.Spec.ServiceAccountName) + log.Info("Pod: %s/%s is using service account: %s", p.Pod.Namespace, p.Pod.Name, p.Pod.Spec.ServiceAccountName) // Loop through the service accounts in the namespace, looking for a match between the pod serviceAccountName and // the service account name. If there is a match, check to make sure that the SA is not a 'subject' of the cluster @@ -389,7 +388,7 @@ func (p *Pod) IsUsingClusterRoleBinding(clusterRoleBindings []rbacv1.ClusterRole for crbIndex := range clusterRoleBindings { for _, subject := range clusterRoleBindings[crbIndex].Subjects { if subject.Kind == rbacv1.ServiceAccountKind && subject.Name == p.Pod.Spec.ServiceAccountName && subject.Namespace == p.Pod.Namespace { - tnf.ClaimFilePrintf("Pod %s has service account %s that is tied to cluster role binding %s", p.Pod.Name, p.Pod.Spec.ServiceAccountName, clusterRoleBindings[crbIndex].Name) + log.Debug("Pod %s has service account %s that is tied to cluster role binding %s", p.Pod.Name, p.Pod.Spec.ServiceAccountName, clusterRoleBindings[crbIndex].Name) return true, clusterRoleBindings[crbIndex].RoleRef.Name, nil } } diff --git a/pkg/provider/provider.go b/pkg/provider/provider.go index fb1a8e492..0006a4467 100644 --- a/pkg/provider/provider.go +++ b/pkg/provider/provider.go @@ -30,8 +30,8 @@ import ( mcv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" olmv1Alpha "github.com/operator-framework/api/pkg/operators/v1alpha1" - "github.com/sirupsen/logrus" "github.com/test-network-function/cnf-certification-test/internal/clientsholder" + "github.com/test-network-function/cnf-certification-test/internal/log" "github.com/test-network-function/cnf-certification-test/pkg/autodiscover" "github.com/test-network-function/cnf-certification-test/pkg/configuration" k8sPrivilegedDs "github.com/test-network-function/privileged-daemonset" @@ -208,12 +208,13 @@ func buildTestEnvironment() { //nolint:funlen env.variables = *configuration.GetTestParameters() config, err := configuration.LoadConfiguration(env.variables.ConfigurationPath) if err != nil { - logrus.Fatalf("Cannot load configuration file: %v", err) + log.Error("Cannot load configuration file: %v", err) + os.Exit(1) } // Wait for the debug pods to be ready before the autodiscovery starts. if err := deployDaemonSet(config.DebugDaemonSetNamespace); err != nil { - logrus.Errorf("The TNF daemonset could not be deployed, err=%v", err) + log.Error("The TNF daemonset could not be deployed, err=%v", err) // Because of this failure, we are only able to run a certain amount of tests that do not rely // on the existence of the daemonset debug pods. env.DaemonsetFailedToSpawn = true @@ -296,19 +297,19 @@ func buildTestEnvironment() { //nolint:funlen operators := createOperators(data.Csvs, data.Subscriptions, data.AllInstallPlans, data.AllCatalogSources, false, true) env.Operators = operators - logrus.Infof("Operators found: %d", len(env.Operators)) + log.Info("Operators found: %d", len(env.Operators)) for _, pod := range env.Pods { isCreatedByDeploymentConfig, err := pod.CreatedByDeploymentConfig() if err != nil { - logrus.Warnf("Pod %s: failed to get parent resource: %v", pod.String(), err) + log.Warn("Pod %s: failed to get parent resource: %v", pod.String(), err) continue } if isCreatedByDeploymentConfig { - logrus.Warnf("Pod %s has been deployed using a DeploymentConfig, please use Deployment or StatefulSet instead.", pod.String()) + log.Warn("Pod %s has been deployed using a DeploymentConfig, please use Deployment or StatefulSet instead.", pod.String()) } } - logrus.Infof("Completed the test environment build process in %.2f seconds", time.Since(start).Seconds()) + log.Info("Completed the test environment build process in %.2f seconds", time.Since(start).Seconds()) } func updateCrUnderTest(scaleCrUnderTest []autodiscover.ScaleObject) []ScaleObject { @@ -340,7 +341,7 @@ func getPodContainers(aPod *corev1.Pod, useIgnoreList bool) (containerList []*Co // Warn if readiness probe did not succeeded yet. if !cutStatus.Ready { - logrus.Warnf("%s is not ready yet.", &container) + log.Warn("%s is not ready yet.", &container) } // Warn if container state is not running. @@ -356,7 +357,7 @@ func getPodContainers(aPod *corev1.Pod, useIgnoreList bool) (containerList []*Co reason = "waiting state reason unknown" } - logrus.Warnf("%s is not running (reason: %s, restarts %d): some test cases might fail.", + log.Warn("%s is not running (reason: %s, restarts %d): some test cases might fail.", &container, reason, cutStatus.RestartCount) } @@ -375,7 +376,7 @@ func isSkipHelmChart(helmName string, skipHelmChartList []configuration.SkipHelm } for _, helm := range skipHelmChartList { if helmName == helm.Name { - logrus.Infof("Helm chart with name %s was skipped", helmName) + log.Info("Helm chart with name %s was skipped", helmName) return true } } @@ -421,7 +422,7 @@ func buildContainerImageSource(urlImage, urlImageID string) (source ContainerIma source.Digest = match[3] } - logrus.Debugf("parsed image, repo: %s, name:%s, tag: %s, digest: %s", + log.Debug("parsed image, repo: %s, name:%s, tag: %s, digest: %s", source.Registry, source.Repository, source.Tag, @@ -557,20 +558,20 @@ func createNodes(nodes []corev1.Node) map[string]Node { if !IsOCPCluster() { // Avoid getting Mc info for non ocp clusters. wrapperNodes[node.Name] = Node{Data: node} - logrus.Warnf("Non-OCP cluster detected. MachineConfig retrieval for node %s skipped.", node.Name) + log.Warn("Non-OCP cluster detected. MachineConfig retrieval for node %s skipped.", node.Name) continue } // Get Node's machineConfig name mcName, exists := node.Annotations["machineconfiguration.openshift.io/currentConfig"] if !exists { - logrus.Errorf("Failed to get machineConfig name for node %s", node.Name) + log.Error("Failed to get machineConfig name for node %s", node.Name) continue } - logrus.Infof("Node %s - mc name: %s", node.Name, mcName) + log.Info("Node %s - mc name: %s", node.Name, mcName) mc, err := getMachineConfig(mcName, machineConfigs) if err != nil { - logrus.Errorf("Failed to get machineConfig %s, err: %v", mcName, err) + log.Error("Failed to get machineConfig %s, err: %v", mcName, err) continue } diff --git a/pkg/provider/scale_object.go b/pkg/provider/scale_object.go index 6e9fb62f3..059a5f7b5 100644 --- a/pkg/provider/scale_object.go +++ b/pkg/provider/scale_object.go @@ -3,7 +3,7 @@ package provider import ( "fmt" - "github.com/sirupsen/logrus" + "github.com/test-network-function/cnf-certification-test/internal/log" "github.com/test-network-function/cnf-certification-test/pkg/autodiscover" "k8s.io/apimachinery/pkg/runtime/schema" @@ -18,7 +18,7 @@ type CrScale struct { func (crScale CrScale) IsScaleObjectReady() bool { replicas := (crScale.Spec.Replicas) - logrus.Infof("replicas is %d status replica is %d", replicas, crScale.Status.Replicas) + log.Info("replicas is %d status replica is %d", replicas, crScale.Status.Replicas) return crScale.Status.Replicas == replicas } diff --git a/pkg/scheduling/scheduling.go b/pkg/scheduling/scheduling.go index ed022b089..0d1d924f1 100644 --- a/pkg/scheduling/scheduling.go +++ b/pkg/scheduling/scheduling.go @@ -21,12 +21,11 @@ import ( "strconv" "strings" - "github.com/sirupsen/logrus" "github.com/test-network-function/cnf-certification-test/internal/clientsholder" "github.com/test-network-function/cnf-certification-test/internal/crclient" + "github.com/test-network-function/cnf-certification-test/internal/log" "github.com/test-network-function/cnf-certification-test/pkg/provider" "github.com/test-network-function/cnf-certification-test/pkg/testhelper" - "github.com/test-network-function/cnf-certification-test/pkg/tnf" ) const ( @@ -70,7 +69,7 @@ func parseSchedulingPolicyAndPriority(chrtCommandOutput string) (schedPolicy str case strings.Contains(line, CurrentSchedulingPriority): schedPriority, err = strconv.Atoi(lastToken) if err != nil { - logrus.Errorf("Error obtained during strconv %v", err) + log.Error("Error obtained during strconv %v", err) return schedPolicy, InvalidPriority, err } default: @@ -89,7 +88,7 @@ func ProcessPidsCPUScheduling(processes []*crclient.Process, testContainer *prov for _, process := range processes { schedulePolicy, schedulePriority, err := GetProcessCPUSchedulingFn(process.Pid, testContainer) if err != nil { - logrus.Errorf("error getting the scheduling policy and priority : %v", err) + log.Error("error getting the scheduling policy and priority : %v", err) return compliantContainerPids, nonCompliantContainerPids } @@ -103,13 +102,13 @@ func ProcessPidsCPUScheduling(processes []*crclient.Process, testContainer *prov } if !hasCPUSchedulingConditionSuccess { - tnf.ClaimFilePrintf("pid=%d in %s with cpu scheduling policy=%s, priority=%s did not satisfy cpu scheduling requirements", process.Pid, testContainer, schedulePolicy, schedulePriority) + log.Debug("pid=%d in %s with cpu scheduling policy=%s, priority=%d did not satisfy cpu scheduling requirements", process.Pid, testContainer, schedulePolicy, schedulePriority) aPidOut := testhelper.NewContainerReportObject(testContainer.Namespace, testContainer.Podname, testContainer.Name, "process does not satisfy: "+schedulingRequirements[check], false). SetContainerProcessValues(schedulePolicy, fmt.Sprint(schedulePriority), process.Args) nonCompliantContainerPids = append(nonCompliantContainerPids, aPidOut) continue } - tnf.ClaimFilePrintf("pid=%d in %s with cpu scheduling policy=%s, priority=%s satisfies cpu scheduling requirements", process.Pid, testContainer, schedulePolicy, schedulePriority) + log.Debug("pid=%d in %s with cpu scheduling policy=%s, priority=%d satisfies cpu scheduling requirements", process.Pid, testContainer, schedulePolicy, schedulePriority) aPidOut := testhelper.NewContainerReportObject(testContainer.Namespace, testContainer.Podname, testContainer.Name, "process satisfies: "+schedulingRequirements[check], true). SetContainerProcessValues(schedulePolicy, fmt.Sprint(schedulePriority), process.Args) compliantContainerPids = append(compliantContainerPids, aPidOut) @@ -118,7 +117,7 @@ func ProcessPidsCPUScheduling(processes []*crclient.Process, testContainer *prov } func GetProcessCPUScheduling(pid int, testContainer *provider.Container) (schedulePolicy string, schedulePriority int, err error) { - logrus.Infof("Checking the scheduling policy/priority in %v for pid=%d", testContainer, pid) + log.Info("Checking the scheduling policy/priority in %v for pid=%d", testContainer, pid) command := fmt.Sprintf("chrt -p %d", pid) env := provider.GetTestEnvironment() @@ -139,7 +138,7 @@ func GetProcessCPUScheduling(pid int, testContainer *provider.Container) (schedu if err != nil { return schedulePolicy, InvalidPriority, fmt.Errorf("error getting the scheduling policy and priority for %v : %v", testContainer, err) } - logrus.Infof("pid %d in %v has the cpu scheduling policy %s, scheduling priority %d", pid, testContainer, schedulePolicy, schedulePriority) + log.Info("pid %d in %v has the cpu scheduling policy %s, scheduling priority %d", pid, testContainer, schedulePolicy, schedulePriority) return schedulePolicy, schedulePriority, err } diff --git a/pkg/testhelper/testhelper.go b/pkg/testhelper/testhelper.go index b7df30361..e2ff27cf4 100644 --- a/pkg/testhelper/testhelper.go +++ b/pkg/testhelper/testhelper.go @@ -21,8 +21,6 @@ import ( "fmt" "reflect" - "github.com/sirupsen/logrus" - "github.com/test-network-function/cnf-certification-test/pkg/provider" ) @@ -638,31 +636,6 @@ func NewSkipObject(object interface{}, name string) (skipObject [2]interface{}) return skipObject } -func AddTestResultLog(prefix string, object interface{}, log func(string, ...interface{}), fail func(string, ...int)) { - s := reflect.ValueOf(object) - if s.Kind() != reflect.Slice && s.Kind() != reflect.Map { - panic("AddTestResultLog object param is a non slice/map type") - } - if s.Len() > 0 { - log(fmt.Sprintf("%s %s: %v", prefix, reflect.TypeOf(object), object)) - fail(fmt.Sprintf("Number of %s %s = %d", prefix, reflect.TypeOf(object), s.Len())) - } -} - -func AddTestResultReason(compliantObject, nonCompliantObject []*ReportObject, log func(string, ...interface{}), fail func(string, ...int)) { - var aReason FailureReasonOut - aReason.CompliantObjectsOut = compliantObject - aReason.NonCompliantObjectsOut = nonCompliantObject - bytes, err := json.Marshal(aReason) - if err != nil { - logrus.Errorf("Could not Marshall FailureReason object, err=%s", err) - } - log(string(bytes)) - if len(aReason.NonCompliantObjectsOut) > 0 { - fail(string(bytes)) - } -} - func ResultObjectsToString(compliantObject, nonCompliantObject []*ReportObject) (string, error) { reason := FailureReasonOut{ CompliantObjectsOut: compliantObject, diff --git a/pkg/testhelper/testhelper_test.go b/pkg/testhelper/testhelper_test.go index 117f19f49..dadbd4629 100644 --- a/pkg/testhelper/testhelper_test.go +++ b/pkg/testhelper/testhelper_test.go @@ -107,49 +107,3 @@ func TestSkipIfEmptyFuncs(t *testing.T) { } } } - -func TestAddTestResultLog(t *testing.T) { - logFuncCtr := 0 - failFuncCtr := 0 - testCases := []struct { - testPrefix string - testObject interface{} - logFunc func(string, ...interface{}) - failFunc func(string, ...int) - expectedLogCtr int - expectedFailFuncCtr int - }{ - { - testPrefix: "test1", - testObject: []string{"fail1", "fail2"}, - logFunc: func(s string, i ...interface{}) { - logFuncCtr++ - }, - failFunc: func(s string, i ...int) { - failFuncCtr++ - }, - expectedLogCtr: 1, - expectedFailFuncCtr: 1, - }, - { - testPrefix: "test2", - testObject: []string{}, - logFunc: func(s string, i ...interface{}) { - logFuncCtr++ - }, - failFunc: func(s string, i ...int) { - failFuncCtr++ - }, - expectedLogCtr: 0, - expectedFailFuncCtr: 0, - }, - } - - for _, tc := range testCases { - AddTestResultLog(tc.testPrefix, tc.testObject, tc.logFunc, tc.failFunc) - assert.Equal(t, tc.expectedFailFuncCtr, failFuncCtr) - assert.Equal(t, tc.expectedLogCtr, logFuncCtr) - logFuncCtr = 0 - failFuncCtr = 0 - } -} diff --git a/pkg/tnf/doc.go b/pkg/tnf/doc.go deleted file mode 100644 index 7d300ecdf..000000000 --- a/pkg/tnf/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2020-2022 Red Hat, Inc. -// -// This program is free software; you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation; either version 2 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License along -// with this program; if not, write to the Free Software Foundation, Inc., -// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -/* -Package tnf contains the core Test runner interfaces. -*/ -package tnf diff --git a/pkg/tnf/doc_test.go b/pkg/tnf/doc_test.go deleted file mode 100644 index 7d300ecdf..000000000 --- a/pkg/tnf/doc_test.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2020-2022 Red Hat, Inc. -// -// This program is free software; you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation; either version 2 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License along -// with this program; if not, write to the Free Software Foundation, Inc., -// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -/* -Package tnf contains the core Test runner interfaces. -*/ -package tnf diff --git a/pkg/tnf/status.go b/pkg/tnf/status.go deleted file mode 100644 index d823af49e..000000000 --- a/pkg/tnf/status.go +++ /dev/null @@ -1,19 +0,0 @@ -package tnf - -import ( - "fmt" - - "github.com/sirupsen/logrus" -) - -// ClaimFilePrintf prints to stdout. -// ToDo: Remove? -func ClaimFilePrintf(format string, args ...interface{}) { - Logf(logrus.TraceLevel, format, args...) -} - -// Logf prints to stdout. -func Logf(level logrus.Level, format string, args ...interface{}) { - message := fmt.Sprintf(format+"\n", args...) - logrus.StandardLogger().Log(level, message) -} diff --git a/pkg/tnf/status_test.go b/pkg/tnf/status_test.go deleted file mode 100644 index 24566a039..000000000 --- a/pkg/tnf/status_test.go +++ /dev/null @@ -1 +0,0 @@ -package tnf diff --git a/run-cnf-suites.sh b/run-cnf-suites.sh index 4445ff0ad..1dbbd9eee 100755 --- a/run-cnf-suites.sh +++ b/run-cnf-suites.sh @@ -121,9 +121,7 @@ set -o pipefail # Do not double quote. # SC2086: Double quote to prevent globbing and word splitting. # shellcheck disable=SC2086 -./cnf-certification-test \ - "${LABEL_STRING}" \ - ${EXTRA_ARGS} |& tee $OUTPUT_LOC/tnf-execution.log +./cnf-certification-test "${LABEL_STRING}" ${EXTRA_ARGS} # preserving the exit status RESULT=$?