Skip to content

Commit

Permalink
Merge branch 'main' into update-rhcos-map
Browse files Browse the repository at this point in the history
  • Loading branch information
rdavid authored Jan 4, 2024
2 parents 0fc8356 + 9f2ca6b commit 3ee62ea
Show file tree
Hide file tree
Showing 6 changed files with 55 additions and 39 deletions.
8 changes: 4 additions & 4 deletions cnf-certification-test/accesscontrol/resources/resources.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,20 +42,20 @@ func HasRequestsAndLimitsSet(cut *provider.Container) bool {
}

// For more info on cpu management policies see https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/.
func HasExclusiveCPUsAssigned(cut *provider.Container) bool {
func HasExclusiveCPUsAssigned(cut *provider.Container, logger *log.Logger) bool {
cpuLimits := cut.Resources.Limits.Cpu()
memLimits := cut.Resources.Limits.Memory()

// if no cpu or memory limits are specified the container will run in the shared cpu pool
if cpuLimits.IsZero() || memLimits.IsZero() {
log.Debug("Container has been found missing cpu/memory resource limits: %s", cut.String())
logger.Debug("Container %q has been found missing cpu/memory resource limits", cut)
return false
}

// if the cpu limits quantity is not an integer the container will run in the shared cpu pool
cpuLimitsVal, isInteger := cpuLimits.AsInt64()
if !isInteger {
log.Debug("Container's cpu resource limit is not an integer: %s", cut.String())
logger.Debug("Container %q cpu resource limit is not an integer", cut)
return false
}

Expand All @@ -68,6 +68,6 @@ func HasExclusiveCPUsAssigned(cut *provider.Container) bool {
}

// if the cpu limits and request are different, the container will run in the shared cpu pool
log.Debug("Container's cpu/memory resources and limits are not equal to each other: %s", cut.String())
logger.Debug("Container %q cpu/memory resources and limits are not equal to each other", cut)
return false
}
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
package resources

import (
"strings"
"testing"

"github.com/stretchr/testify/assert"
"github.com/test-network-function/cnf-certification-test/internal/log"
"github.com/test-network-function/cnf-certification-test/pkg/provider"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
Expand Down Expand Up @@ -192,7 +194,9 @@ func TestHasExclusiveCPUsAssigned(t *testing.T) {
},
}

var logArchive strings.Builder
log.SetupLogger(&logArchive, "INFO")
for _, tc := range testCases {
assert.Equal(t, tc.expectedResult, HasExclusiveCPUsAssigned(tc.testContainer))
assert.Equal(t, tc.expectedResult, HasExclusiveCPUsAssigned(tc.testContainer, log.GetLogger()))
}
}
61 changes: 32 additions & 29 deletions cnf-certification-test/performance/suite.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@ var (
env provider.TestEnvironment

beforeEachFn = func(check *checksdb.Check) error {
check.LogInfo("Check %s: getting test environment.", check.ID)
env = provider.GetTestEnvironment()
return nil
}
Expand Down Expand Up @@ -81,7 +80,7 @@ var (
)

func LoadChecks() {
log.Debug("Loading %s checks", common.PerformanceTestKey)
log.Debug("Loading %s suite checks", common.PerformanceTestKey)

checksGroup := checksdb.NewChecksGroup(common.PerformanceTestKey).
WithBeforeEachFn(beforeEachFn)
Expand Down Expand Up @@ -136,17 +135,18 @@ func testLimitedUseOfExecProbes(check *checksdb.Check, env *provider.TestEnviron
counter := 0
for _, put := range env.Pods {
for _, cut := range put.Containers {
check.LogInfo("Testing Container %q", cut)
if cut.LivenessProbe != nil && cut.LivenessProbe.Exec != nil {
counter++
if cut.LivenessProbe.PeriodSeconds > minExecProbePeriodSeconds {
check.LogInfo("Container %s has a LivenessProbe with PeriodSeconds greater than %d (%d seconds)",
check.LogInfo("Container %q has a LivenessProbe with PeriodSeconds greater than %d (%d seconds)",
cut, minExecProbePeriodSeconds, cut.LivenessProbe.PeriodSeconds)

compliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(put.Namespace, put.Name,
cut.Name, fmt.Sprintf("LivenessProbe exec probe has a PeriodSeconds greater than 10 (%d seconds)",
cut.LivenessProbe.PeriodSeconds), true))
} else {
check.LogError("Container %s has a LivenessProbe with PeriodSeconds less than %d (%d seconds)",
check.LogError("Container %q has a LivenessProbe with PeriodSeconds less than %d (%d seconds)",
cut, minExecProbePeriodSeconds, cut.LivenessProbe.PeriodSeconds)

nonCompliantObjects = append(nonCompliantObjects,
Expand All @@ -158,14 +158,14 @@ func testLimitedUseOfExecProbes(check *checksdb.Check, env *provider.TestEnviron
if cut.StartupProbe != nil && cut.StartupProbe.Exec != nil {
counter++
if cut.StartupProbe.PeriodSeconds > minExecProbePeriodSeconds {
check.LogInfo("Container %s has a StartupProbe with PeriodSeconds greater than %d (%d seconds)",
check.LogInfo("Container %q has a StartupProbe with PeriodSeconds greater than %d (%d seconds)",
cut, minExecProbePeriodSeconds, cut.LivenessProbe.PeriodSeconds)

compliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(put.Namespace, put.Name,
cut.Name, fmt.Sprintf("StartupProbe exec probe has a PeriodSeconds greater than 10 (%d seconds)",
cut.StartupProbe.PeriodSeconds), true))
} else {
check.LogError("Container %s has a StartupProbe with PeriodSeconds less than %d (%d seconds)",
check.LogError("Container %q has a StartupProbe with PeriodSeconds less than %d (%d seconds)",
cut, minExecProbePeriodSeconds, cut.LivenessProbe.PeriodSeconds)

nonCompliantObjects = append(nonCompliantObjects,
Expand All @@ -177,14 +177,14 @@ func testLimitedUseOfExecProbes(check *checksdb.Check, env *provider.TestEnviron
if cut.ReadinessProbe != nil && cut.ReadinessProbe.Exec != nil {
counter++
if cut.ReadinessProbe.PeriodSeconds > minExecProbePeriodSeconds {
check.LogInfo("Container %s has a ReadinessProbe with PeriodSeconds greater than %d (%d seconds)",
check.LogInfo("Container %q has a ReadinessProbe with PeriodSeconds greater than %d (%d seconds)",
cut, minExecProbePeriodSeconds, cut.LivenessProbe.PeriodSeconds)

compliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(put.Namespace, put.Name,
cut.Name, fmt.Sprintf("ReadinessProbe exec probe has a PeriodSeconds greater than 10 (%d seconds)",
cut.ReadinessProbe.PeriodSeconds), true))
} else {
check.LogError("Container %s has a ReadinessProbe with PeriodSeconds less than %d (%d seconds)",
check.LogError("Container %q has a ReadinessProbe with PeriodSeconds less than %d (%d seconds)",
cut, minExecProbePeriodSeconds, cut.LivenessProbe.PeriodSeconds)

nonCompliantObjects = append(nonCompliantObjects,
Expand All @@ -198,12 +198,11 @@ func testLimitedUseOfExecProbes(check *checksdb.Check, env *provider.TestEnviron

// If there >=10 exec probes, mark the entire cluster as a failure
if counter >= maxNumberOfExecProbes {
check.LogDebug(fmt.Sprintf("CNF has %d exec probes", counter))
check.LogError("CNF has 10 or more exec probes (nb-exec-probes=%d)", counter)
nonCompliantObjects = append(nonCompliantObjects, testhelper.NewReportObject(fmt.Sprintf("CNF has 10 or more exec probes (%d exec probes)", counter), testhelper.CnfType, false))
} else {
// Compliant object
check.LogInfo("CNF has less than 10 exec probes (nb-exec-probes=%d)", counter)
compliantObjects = append(compliantObjects, testhelper.NewReportObject(fmt.Sprintf("CNF has less than 10 exec probes (%d exec probes)", counter), testhelper.CnfType, true))
check.LogDebug(fmt.Sprintf("CNF has less than %d exec probes", counter))
}

check.SetResult(compliantObjects, nonCompliantObjects)
Expand All @@ -217,7 +216,7 @@ func testExclusiveCPUPool(check *checksdb.Check, env *provider.TestEnvironment)
nBExclusiveCPUPoolContainers := 0
nBSharedCPUPoolContainers := 0
for _, cut := range put.Containers {
if resources.HasExclusiveCPUsAssigned(cut) {
if resources.HasExclusiveCPUsAssigned(cut, check.GetLoggger()) {
nBExclusiveCPUPoolContainers++
} else {
nBSharedCPUPoolContainers++
Expand All @@ -228,12 +227,13 @@ func testExclusiveCPUPool(check *checksdb.Check, env *provider.TestEnvironment)
exclusiveStr := strconv.Itoa(nBExclusiveCPUPoolContainers)
sharedStr := strconv.Itoa(nBSharedCPUPoolContainers)

check.LogDebug("Pod: %s has containers whose CPUs belong to different pools. Containers in the shared cpu pool: %d "+
"Containers in the exclusive cpu pool: %d", put.String(), nBSharedCPUPoolContainers, nBExclusiveCPUPoolContainers)
check.LogError("Pod %q has containers whose CPUs belong to different pools. Containers in the shared cpu pool: %d "+
"Containers in the exclusive cpu pool: %d", put, nBSharedCPUPoolContainers, nBExclusiveCPUPoolContainers)
nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "Pod has containers whose CPUs belong to different pools", false).
AddField("SharedCPUPoolContainers", sharedStr).
AddField("ExclusiveCPUPoolContainers", exclusiveStr))
} else {
check.LogInfo("Pod %q has no containers whose CPUs belong to different pools", put)
compliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "Pod has no containers whose CPUs belong to different pools", true))
}
}
Expand All @@ -245,34 +245,32 @@ func testSchedulingPolicyInCPUPool(check *checksdb.Check, env *provider.TestEnvi
podContainers []*provider.Container, schedulingType string) {
var compliantContainersPids []*testhelper.ReportObject
var nonCompliantContainersPids []*testhelper.ReportObject
for _, testContainer := range podContainers {
check.LogInfo("Processing %v", testContainer)
for _, cut := range podContainers {
check.LogInfo("Testing Container %q", cut)

// Get the pid namespace
pidNamespace, err := crclient.GetContainerPidNamespace(testContainer, env)
pidNamespace, err := crclient.GetContainerPidNamespace(cut, env)
if err != nil {
check.LogError("unable to get pid namespace for container %s, err: %v", testContainer, err)
check.LogError("Unable to get pid namespace for Container %q, err: %v", cut, err)
nonCompliantContainersPids = append(nonCompliantContainersPids,
testhelper.NewContainerReportObject(testContainer.Namespace, testContainer.Podname, testContainer.Name, fmt.Sprintf("Internal error, err=%s", err), false))
testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, fmt.Sprintf("Internal error, err=%s", err), false))
continue
}
check.LogDebug("Obtained pidNamespace for %s is %s", testContainer, pidNamespace)
check.LogDebug("PID namespace for Container %q is %q", cut, pidNamespace)

// Get the list of process ids running in the pid namespace
processes, err := crclient.GetPidsFromPidNamespace(pidNamespace, testContainer)

processes, err := crclient.GetPidsFromPidNamespace(pidNamespace, cut)
if err != nil {
check.LogError("Unable to get PIDs from PID namespace %q for Container %q, err: %v", pidNamespace, cut, err)
nonCompliantContainersPids = append(nonCompliantContainersPids,
testhelper.NewContainerReportObject(testContainer.Namespace, testContainer.Podname, testContainer.Name, fmt.Sprintf("Internal error, err=%s", err), false))
testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, fmt.Sprintf("Internal error, err=%s", err), false))
}

compliantPids, nonCompliantPids := scheduling.ProcessPidsCPUScheduling(processes, testContainer, schedulingType)
compliantPids, nonCompliantPids := scheduling.ProcessPidsCPUScheduling(processes, cut, schedulingType, check.GetLoggger())
// Check for the specified priority for each processes running in that pid namespace

compliantContainersPids = append(compliantContainersPids, compliantPids...)
nonCompliantContainersPids = append(nonCompliantContainersPids, nonCompliantPids...)

check.LogDebug("Processed %v", testContainer)
}

check.SetResult(compliantContainersPids, nonCompliantContainersPids)
Expand Down Expand Up @@ -309,14 +307,16 @@ func testRtAppsNoExecProbes(check *checksdb.Check, env *provider.TestEnvironment
var nonCompliantObjects []*testhelper.ReportObject
cuts := env.GetNonGuaranteedPodContainersWithoutHostPID()
for _, cut := range cuts {
check.LogInfo("Testing Container %q", cut)
if !cut.HasExecProbes() {
check.LogInfo("Container %q does not define exec probes", cut)
compliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, "Container does not define exec probes", true))
continue
}

processes, err := crclient.GetContainerProcesses(cut, env)
if err != nil {
check.LogDebug("Could not determine the processes pids for container %s, err: %v", cut, err)
check.LogError("Could not determine the processes pids for container %q, err: %v", cut, err)
nonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, "Could not determine the processes pids for container", false))
break
}
Expand All @@ -325,32 +325,35 @@ func testRtAppsNoExecProbes(check *checksdb.Check, env *provider.TestEnvironment
compliantObjects = append(compliantObjects, compliantObjectsProbes...)
allProcessesCompliant := true
for _, p := range notExecProbeProcesses {
check.LogInfo("Testing process %q", p)
schedPolicy, _, err := scheduling.GetProcessCPUScheduling(p.Pid, cut)
if err != nil {
// If the process does not exist anymore it means that it has finished since the time the process list
// was retrieved. In this case, just ignore the error and continue processing the rest of the processes.
if strings.Contains(err.Error(), noProcessFoundErrMsg) {
check.LogWarn("Container process %q disappeared", p)
compliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, "Container process disappeared", true).
AddField(testhelper.ProcessID, strconv.Itoa(p.Pid)).
AddField(testhelper.ProcessCommandLine, p.Args))
continue
}
check.LogDebug("Could not determine the scheduling policy for container %s (pid=%v), err: %v", cut, p.Pid, err)
check.LogError("Could not determine the scheduling policy for container %q (pid=%d), err: %v", cut, p.Pid, err)
nonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, "Could not determine the scheduling policy for container", false).
AddField(testhelper.ProcessID, strconv.Itoa(p.Pid)).
AddField(testhelper.ProcessCommandLine, p.Args))
allProcessesCompliant = false
continue
}
if scheduling.PolicyIsRT(schedPolicy) {
check.LogDebug("Pod %s/Container %s defines exec probes while having a RT scheduling policy for pid %d", cut.Podname, cut, p.Pid)
check.LogError("Container %q defines exec probes while having a RT scheduling policy for process %q", cut, p)
nonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, "Container defines exec probes while having a RT scheduling policy", false).
AddField(testhelper.ProcessID, strconv.Itoa(p.Pid)))
allProcessesCompliant = false
}
}

if allProcessesCompliant {
check.LogInfo("Container %q defines exec probes but does not have a RT scheduling policy", cut)
compliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, "Container defines exec probes but does not have a RT scheduling policy", true))
}
}
Expand Down
4 changes: 4 additions & 0 deletions internal/crclient/crclient.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,10 @@ const (
DockerInspectPID = "chroot /host docker inspect -f '{{.State.Pid}}' "
)

func (p *Process) String() string {
return fmt.Sprintf("cmd: %s, pid: %d, ppid: %d, pidNs: %d", p.Args, p.Pid, p.PPid, p.PidNs)
}

// Helper function to create the clientsholder.Context of the first container of the debug pod
// that runs in the give node. This context is usually needed to run shell commands that get
// information from a node where a pod/container under test is running.
Expand Down
9 changes: 5 additions & 4 deletions pkg/scheduling/scheduling.go
Original file line number Diff line number Diff line change
Expand Up @@ -83,12 +83,13 @@ var schedulingRequirements = map[string]string{SharedCPUScheduling: "SHARED_CPU_
ExclusiveCPUScheduling: "EXCLUSIVE_CPU_SCHEDULING: scheduling priority < 10 and scheduling policy == SCHED_RR or SCHED_FIFO",
IsolatedCPUScheduling: "ISOLATED_CPU_SCHEDULING: scheduling policy == SCHED_RR or SCHED_FIFO"}

func ProcessPidsCPUScheduling(processes []*crclient.Process, testContainer *provider.Container, check string) (compliantContainerPids, nonCompliantContainerPids []*testhelper.ReportObject) {
func ProcessPidsCPUScheduling(processes []*crclient.Process, testContainer *provider.Container, check string, logger *log.Logger) (compliantContainerPids, nonCompliantContainerPids []*testhelper.ReportObject) {
hasCPUSchedulingConditionSuccess := false
for _, process := range processes {
logger.Debug("Testing process %q", process)
schedulePolicy, schedulePriority, err := GetProcessCPUSchedulingFn(process.Pid, testContainer)
if err != nil {
log.Error("error getting the scheduling policy and priority : %v", err)
logger.Error("Unable to get the scheduling policy and priority : %v", err)
return compliantContainerPids, nonCompliantContainerPids
}

Expand All @@ -102,13 +103,13 @@ func ProcessPidsCPUScheduling(processes []*crclient.Process, testContainer *prov
}

if !hasCPUSchedulingConditionSuccess {
log.Debug("pid=%d in %s with cpu scheduling policy=%s, priority=%d did not satisfy cpu scheduling requirements", process.Pid, testContainer, schedulePolicy, schedulePriority)
logger.Error("Process %q in Container %q with cpu scheduling policy=%s, priority=%d did not satisfy cpu scheduling requirements", process, testContainer, schedulePolicy, schedulePriority)
aPidOut := testhelper.NewContainerReportObject(testContainer.Namespace, testContainer.Podname, testContainer.Name, "process does not satisfy: "+schedulingRequirements[check], false).
SetContainerProcessValues(schedulePolicy, fmt.Sprint(schedulePriority), process.Args)
nonCompliantContainerPids = append(nonCompliantContainerPids, aPidOut)
continue
}
log.Debug("pid=%d in %s with cpu scheduling policy=%s, priority=%d satisfies cpu scheduling requirements", process.Pid, testContainer, schedulePolicy, schedulePriority)
logger.Info("Process %q in Container %q with cpu scheduling policy=%s, priority=%d satisfies cpu scheduling requirements", process, testContainer, schedulePolicy, schedulePriority)
aPidOut := testhelper.NewContainerReportObject(testContainer.Namespace, testContainer.Podname, testContainer.Name, "process satisfies: "+schedulingRequirements[check], true).
SetContainerProcessValues(schedulePolicy, fmt.Sprint(schedulePriority), process.Args)
compliantContainerPids = append(compliantContainerPids, aPidOut)
Expand Down
6 changes: 5 additions & 1 deletion pkg/scheduling/scheduling_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,12 @@ package scheduling
import (
"fmt"
"reflect"
"strings"
"testing"

"github.com/stretchr/testify/assert"
"github.com/test-network-function/cnf-certification-test/internal/crclient"
"github.com/test-network-function/cnf-certification-test/internal/log"
"github.com/test-network-function/cnf-certification-test/pkg/provider"
"github.com/test-network-function/cnf-certification-test/pkg/testhelper"
corev1 "k8s.io/api/core/v1"
Expand Down Expand Up @@ -400,9 +402,11 @@ func TestProcessPidsCPUScheduling(t *testing.T) {

compliant: []testhelper.ReportObject{}},
}
var logArchive strings.Builder
log.SetupLogger(&logArchive, "INFO")
for _, tc := range testCases {
GetProcessCPUSchedulingFn = tc.mockGetProcessCPUScheduling
compliant, nonCompliant := ProcessPidsCPUScheduling(testPids, testContainer, tc.check)
compliant, nonCompliant := ProcessPidsCPUScheduling(testPids, testContainer, tc.check, log.GetLogger())

fmt.Printf(
"test=%s Actual compliant=%s,\n",
Expand Down

0 comments on commit 3ee62ea

Please sign in to comment.