Skip to content

Commit

Permalink
Merge branch 'main' into bug_fix_pod_cluster_role_bindings
Browse files Browse the repository at this point in the history
  • Loading branch information
jmontesi authored Jan 10, 2024
2 parents 9260f8c + 92700ac commit 3fc0142
Show file tree
Hide file tree
Showing 12 changed files with 579 additions and 523 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -44,13 +44,13 @@ func NewOwnerReference(put *corev1.Pod) *OwnerReference {

// func (o *OwnerReference) run the tests and store results in
// o.result
func (o *OwnerReference) RunTest() {
func (o *OwnerReference) RunTest(logger *log.Logger) {
for _, k := range o.put.OwnerReferences {
log.Debug("kind is %s", k.Kind)
if k.Kind == statefulSet || k.Kind == replicaSet {
logger.Info("Pod %q owner reference kind is %q", o.put, k.Kind)
o.result = testhelper.SUCCESS
} else {
log.Error("Pod %s has owner of type %s", o.put.Name, k.Kind)
logger.Error("Pod %q has owner of type %q (%q or %q expected)", o.put, k.Kind, replicaSet, statefulSet)
o.result = testhelper.FAILURE
return
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,12 @@
package ownerreference_test

import (
"strings"
"testing"

"github.com/stretchr/testify/assert"
"github.com/test-network-function/cnf-certification-test/cnf-certification-test/lifecycle/ownerreference"
"github.com/test-network-function/cnf-certification-test/internal/log"
"github.com/test-network-function/cnf-certification-test/pkg/testhelper"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand Down Expand Up @@ -60,7 +62,9 @@ func TestRunTest(t *testing.T) {

ownerRef := ownerreference.NewOwnerReference(testPod)
assert.NotNil(t, ownerRef)
ownerRef.RunTest()
var logArchive strings.Builder
log.SetupLogger(&logArchive, "INFO")
ownerRef.RunTest(log.GetLogger())
assert.Equal(t, tc.expectedResult, ownerRef.GetResults())
}
}
34 changes: 17 additions & 17 deletions cnf-certification-test/lifecycle/podsets/podsets.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,63 +31,63 @@ const (
StatefulsetString = "StatefulSet"
)

var WaitForDeploymentSetReady = func(ns, name string, timeout time.Duration) bool {
log.Debug("check if deployment %s:%s is ready", ns, name)
var WaitForDeploymentSetReady = func(ns, name string, timeout time.Duration, logger *log.Logger) bool {
logger.Info("Check if Deployment %s:%s is ready", ns, name)
clients := clientsholder.GetClientsHolder()
start := time.Now()
for time.Since(start) < timeout {
dp, err := provider.GetUpdatedDeployment(clients.K8sClient.AppsV1(), ns, name)
if err != nil {
log.Error("Error while getting deployment %s (ns: %s), err: %v", name, ns, err)
logger.Error("Error while getting Deployment %q, err: %v", name, err)
} else if !dp.IsDeploymentReady() {
log.Info("%s is not ready yet", dp.ToString())
logger.Warn("Deployment %q is not ready yet", dp.ToString())
} else {
log.Debug("%s is ready!", dp.ToString())
logger.Info("Deployment %q is ready!", dp.ToString())
return true
}

time.Sleep(time.Second)
}
log.Error("deployment %s:%s is not ready", ns, name)
logger.Error("Deployment %s:%s is not ready", ns, name)
return false
}

var WaitForScalingToComplete = func(ns, name string, timeout time.Duration, groupResourceSchema schema.GroupResource) bool {
log.Debug("check if scale object for crs %s:%s is ready", ns, name)
var WaitForScalingToComplete = func(ns, name string, timeout time.Duration, groupResourceSchema schema.GroupResource, logger *log.Logger) bool {
logger.Info("Check if scale object for CRs %s:%s is ready", ns, name)
clients := clientsholder.GetClientsHolder()
start := time.Now()
for time.Since(start) < timeout {
crScale, err := provider.GetUpdatedCrObject(clients.ScalingClient, ns, name, groupResourceSchema)
if err != nil {
log.Error("error while getting the scaling fields %v", err)
logger.Error("Error while getting the scaling fields %v", err)
} else if !crScale.IsScaleObjectReady() {
log.Error("%s is not ready yet", crScale.ToString())
logger.Warn("%s is not ready yet", crScale.ToString())
} else {
log.Debug("%s is ready!", crScale.ToString())
logger.Info("%s is ready!", crScale.ToString())
return true
}

time.Sleep(time.Second)
}
log.Error("timeout waiting for cr %s:%s scaling to be complete", ns, name)
logger.Error("Timeout waiting for CR %s:%s scaling to be complete", ns, name)
return false
}

func WaitForStatefulSetReady(ns, name string, timeout time.Duration) bool {
log.Debug("check if statefulset %s:%s is ready", ns, name)
func WaitForStatefulSetReady(ns, name string, timeout time.Duration, logger *log.Logger) bool {
logger.Debug("Check if statefulset %s:%s is ready", ns, name)
clients := clientsholder.GetClientsHolder()
start := time.Now()
for time.Since(start) < timeout {
ss, err := provider.GetUpdatedStatefulset(clients.K8sClient.AppsV1(), ns, name)
if err != nil {
log.Error("error while getting the %s, err: %v", ss.ToString(), err)
logger.Error("Error while getting the %s, err: %v", ss.ToString(), err)
} else if ss.IsStatefulSetReady() {
log.Debug("%s is ready", ss.ToString())
logger.Info("%s is ready", ss.ToString())
return true
}
time.Sleep(time.Second)
}
log.Error("statefulset %s:%s is not ready", ns, name)
logger.Error("Statefulset %s:%s is not ready", ns, name)
return false
}

Expand Down
70 changes: 35 additions & 35 deletions cnf-certification-test/lifecycle/scaling/crd_scaling.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,9 @@ import (
retry "k8s.io/client-go/util/retry"
)

func TestScaleCrd(crScale *provider.CrScale, groupResourceSchema schema.GroupResource, timeout time.Duration) bool {
func TestScaleCrd(crScale *provider.CrScale, groupResourceSchema schema.GroupResource, timeout time.Duration, logger *log.Logger) bool {
if crScale == nil {
log.Error("cc object is nill")
logger.Error("CR object is nill")
return false
}
clients := clientsholder.GetClientsHolder()
Expand All @@ -49,38 +49,38 @@ func TestScaleCrd(crScale *provider.CrScale, groupResourceSchema schema.GroupRes
if replicas <= 1 {
// scale up
replicas++
if !scaleCrHelper(clients.ScalingClient, groupResourceSchema, crScale, replicas, true, timeout) {
log.Error("Can not scale cr %s in namespace %s", name, namespace)
if !scaleCrHelper(clients.ScalingClient, groupResourceSchema, crScale, replicas, true, timeout, logger) {
logger.Error("Cannot scale CR %q in namespace %q", name, namespace)
return false
}
// scale down
replicas--
if !scaleCrHelper(clients.ScalingClient, groupResourceSchema, crScale, replicas, false, timeout) {
log.Error("Can not scale cr %s in namespace %s", name, namespace)
if !scaleCrHelper(clients.ScalingClient, groupResourceSchema, crScale, replicas, false, timeout, logger) {
logger.Error("Cannot scale CR %q in namespace %q", name, namespace)
return false
}
} else {
// scale down
replicas--
if !scaleCrHelper(clients.ScalingClient, groupResourceSchema, crScale, replicas, false, timeout) {
log.Error("Can not scale cr %s in namespace %s", name, namespace)
if !scaleCrHelper(clients.ScalingClient, groupResourceSchema, crScale, replicas, false, timeout, logger) {
logger.Error("Cannot scale CR %q in namespace %q", name, namespace)
return false
} // scale up
replicas++
if !scaleCrHelper(clients.ScalingClient, groupResourceSchema, crScale, replicas, true, timeout) {
log.Error("Can not scale cr %s in namespace %s", name, namespace)
if !scaleCrHelper(clients.ScalingClient, groupResourceSchema, crScale, replicas, true, timeout, logger) {
logger.Error("Cannot scale CR %q in namespace %q", name, namespace)
return false
}
}

return true
}

func scaleCrHelper(scalesGetter scale.ScalesGetter, rc schema.GroupResource, autoscalerpram *provider.CrScale, replicas int32, up bool, timeout time.Duration) bool {
func scaleCrHelper(scalesGetter scale.ScalesGetter, rc schema.GroupResource, autoscalerpram *provider.CrScale, replicas int32, up bool, timeout time.Duration, logger *log.Logger) bool {
if up {
log.Debug("scale UP CRS to %d replicas", replicas)
logger.Debug("Scale UP CRS to %d replicas", replicas)
} else {
log.Debug("scale DOWN CRS to %d replicas", replicas)
logger.Debug("Scale DOWN CRS to %d replicas", replicas)
}

retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {
Expand All @@ -94,25 +94,25 @@ func scaleCrHelper(scalesGetter scale.ScalesGetter, rc schema.GroupResource, aut
scalingObject.Spec.Replicas = replicas
_, err = scalesGetter.Scales(namespace).Update(context.TODO(), rc, scalingObject, metav1.UpdateOptions{})
if err != nil {
log.Error("Can not update DynamicClient ")
logger.Error("Cannot update DynamicClient, err=%v", err)
return err
}
if !podsets.WaitForScalingToComplete(namespace, name, timeout, rc) {
log.Error("can not update cr %s:%s", namespace, name)
if !podsets.WaitForScalingToComplete(namespace, name, timeout, rc, logger) {
logger.Error("Cannot update CR %s:%s", namespace, name)
return errors.New("can not update cr")
}
return nil
})
if retryErr != nil {
log.Error("Can not scale DynamicClient, err=%v", retryErr)
logger.Error("Can notscale DynamicClient, err=%v", retryErr)
return false
}
return true
}

func TestScaleHPACrd(cr *provider.CrScale, hpa *scalingv1.HorizontalPodAutoscaler, groupResourceSchema schema.GroupResource, timeout time.Duration) bool {
func TestScaleHPACrd(cr *provider.CrScale, hpa *scalingv1.HorizontalPodAutoscaler, groupResourceSchema schema.GroupResource, timeout time.Duration, logger *log.Logger) bool {
if cr == nil {
log.Error("cc object is nill")
logger.Error("CR object is nill")
return false
}
clients := clientsholder.GetClientsHolder()
Expand All @@ -131,61 +131,61 @@ func TestScaleHPACrd(cr *provider.CrScale, hpa *scalingv1.HorizontalPodAutoscale
if replicas <= 1 {
// scale up
replicas++
log.Debug("scale UP HPA %s:%s to min=%d max=%d", namespace, hpa.Name, replicas, replicas)
pass := scaleHpaCRDHelper(hpscaler, hpa.Name, name, namespace, replicas, replicas, timeout, groupResourceSchema)
logger.Debug("Scale UP HPA %s:%s to min=%d max=%d", namespace, hpa.Name, replicas, replicas)
pass := scaleHpaCRDHelper(hpscaler, hpa.Name, name, namespace, replicas, replicas, timeout, groupResourceSchema, logger)
if !pass {
return false
}
// scale down
replicas--
log.Debug("scale DOWN HPA %s:%s to min=%d max=%d", namespace, hpa.Name, replicas, replicas)
pass = scaleHpaCRDHelper(hpscaler, hpa.Name, name, namespace, min, max, timeout, groupResourceSchema)
logger.Debug("Scale DOWN HPA %s:%s to min=%d max=%d", namespace, hpa.Name, replicas, replicas)
pass = scaleHpaCRDHelper(hpscaler, hpa.Name, name, namespace, min, max, timeout, groupResourceSchema, logger)
if !pass {
return false
}
} else {
// scale down
replicas--
log.Debug("scale DOWN HPA %s:%s to min=%d max=%d", namespace, hpa.Name, replicas, replicas)
pass := scaleHpaCRDHelper(hpscaler, hpa.Name, name, namespace, replicas, replicas, timeout, groupResourceSchema)
logger.Debug("Scale DOWN HPA %s:%s to min=%d max=%d", namespace, hpa.Name, replicas, replicas)
pass := scaleHpaCRDHelper(hpscaler, hpa.Name, name, namespace, replicas, replicas, timeout, groupResourceSchema, logger)
if !pass {
return false
}
// scale up
replicas++
log.Debug("scale UP HPA %s:%s to min=%d max=%d", namespace, hpa.Name, replicas, replicas)
pass = scaleHpaCRDHelper(hpscaler, hpa.Name, name, namespace, replicas, replicas, timeout, groupResourceSchema)
logger.Debug("Scale UP HPA %s:%s to min=%d max=%d", namespace, hpa.Name, replicas, replicas)
pass = scaleHpaCRDHelper(hpscaler, hpa.Name, name, namespace, replicas, replicas, timeout, groupResourceSchema, logger)
if !pass {
return false
}
}
// back the min and the max value of the hpa
log.Debug("back HPA %s:%s to min=%d max=%d", namespace, hpa.Name, min, max)
return scaleHpaCRDHelper(hpscaler, hpa.Name, name, namespace, min, max, timeout, groupResourceSchema)
logger.Debug("Back HPA %s:%s to min=%d max=%d", namespace, hpa.Name, min, max)
return scaleHpaCRDHelper(hpscaler, hpa.Name, name, namespace, min, max, timeout, groupResourceSchema, logger)
}

func scaleHpaCRDHelper(hpscaler hps.HorizontalPodAutoscalerInterface, hpaName, crName, namespace string, min, max int32, timeout time.Duration, groupResourceSchema schema.GroupResource) bool {
func scaleHpaCRDHelper(hpscaler hps.HorizontalPodAutoscalerInterface, hpaName, crName, namespace string, min, max int32, timeout time.Duration, groupResourceSchema schema.GroupResource, logger *log.Logger) bool {
retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {
hpa, err := hpscaler.Get(context.TODO(), hpaName, metav1.GetOptions{})
if err != nil {
log.Error("Can not Update autoscaler to scale %s:%s, err=%v", namespace, crName, err)
logger.Error("Cannot update autoscaler to scale %s:%s, err=%v", namespace, crName, err)
return err
}
hpa.Spec.MinReplicas = &min
hpa.Spec.MaxReplicas = max
_, err = hpscaler.Update(context.TODO(), hpa, metav1.UpdateOptions{})
if err != nil {
log.Error("Can not Update autoscaler to scale %s:%s, err=%v", namespace, crName, err)
logger.Error("Cannot update autoscaler to scale %s:%s, err=%v", namespace, crName, err)
return err
}
if !podsets.WaitForScalingToComplete(namespace, crName, timeout, groupResourceSchema) {
log.Error("Can not update cr %s:%s", namespace, crName)
if !podsets.WaitForScalingToComplete(namespace, crName, timeout, groupResourceSchema, logger) {
logger.Error("Cannot update CR %s:%s", namespace, crName)
return errors.New("can not update cr")
}
return nil
})
if retryErr != nil {
log.Error("Can not scale hpa %s:%s, err=%v", namespace, hpaName, retryErr)
logger.Error("Cannot scale hpa %s:%s, err=%v", namespace, hpaName, retryErr)
return false
}
return true
Expand Down
Loading

0 comments on commit 3fc0142

Please sign in to comment.