Skip to content

Commit

Permalink
Recursively get top pod owners.
Browse files Browse the repository at this point in the history
If top owner is a CSV installed cluster-wide, allow cluster role-bindings
  • Loading branch information
edcdavid committed Nov 21, 2023
1 parent 1ee5694 commit ee35373
Show file tree
Hide file tree
Showing 3 changed files with 134 additions and 10 deletions.
68 changes: 58 additions & 10 deletions cnf-certification-test/accesscontrol/suite.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import (
"strings"

"github.com/onsi/ginkgo/v2"
"github.com/operator-framework/api/pkg/operators/v1alpha1"
"github.com/sirupsen/logrus"
"github.com/test-network-function/cnf-certification-test/cnf-certification-test/accesscontrol/namespace"
"github.com/test-network-function/cnf-certification-test/cnf-certification-test/accesscontrol/rbac"
Expand Down Expand Up @@ -557,31 +558,78 @@ func testPodClusterRoleBindings(env *provider.TestEnvironment) {
logrus.Infof("There were %d cluster role bindings found in the cluster.", len(env.ClusterRoleBindings))

for _, put := range env.Pods {
podIsCompliant := true
ginkgo.By(fmt.Sprintf("Testing cluster role binding for pod: %s namespace: %s", put.Name, put.Namespace))
result, roleRefName, err := put.IsUsingClusterRoleBinding(env.ClusterRoleBindings)
if err != nil {
logrus.Errorf("failed to determine if pod %s/%s is using a cluster role binding: %v", put.Namespace, put.Name, err)
podIsCompliant = false
nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, fmt.Sprintf("failed to determine if pod is using a cluster role binding: %v", err), false).
AddField(testhelper.ClusterRoleName, roleRefName))
}

// Pod was found to be using a cluster role binding. This is not allowed.
// Flagging this pod as a failed pod.
if result {
tnf.Logf(logrus.WarnLevel, "%s is using a cluster role binding", put.String())
podIsCompliant = false
topOwners, err := put.GetTopOwner()

if err != nil {
nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, fmt.Sprintf("Error getting top owners of this pod, err=%s", err), false).
AddField(testhelper.ClusterRoleName, roleRefName))
continue
}

if podIsCompliant {
compliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "Pod is not using a cluster role binding", true))
} else {
logrus.Debugf("topOwners=%v", topOwners)

csvNamespace, csvName, isOwnedByClusterWideOperator := OwnedByClusterWideOperator(topOwners, env)
// Pod is using a cluster role binding but is owned by a cluster wide operator, so it is ok
if isOwnedByClusterWideOperator && result {
tnf.Logf(logrus.InfoLevel, "%s is using a cluster role binding but is owned by CSV namespace=%s, name=%s", put.String(), csvNamespace, csvName)
compliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "Pod is using a cluster role binding but owned by a cluster-wide operator", true))
continue
}
if result {
// Pod was found to be using a cluster role binding. This is not allowed.
// Flagging this pod as a failed pod.
tnf.Logf(logrus.WarnLevel, "%s is using a cluster role binding", put.String())
nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "Pod is using a cluster role binding", false).
AddField(testhelper.ClusterRoleName, roleRefName))
continue
}
compliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, "Pod is not using a cluster role binding", true))
}
testhelper.AddTestResultReason(compliantObjects, nonCompliantObjects, tnf.ClaimFilePrintf, ginkgo.Fail)
}

// Returns true if object identified by namespace and name is a CSV created by a cluster-wide operator
func IsCSVAndClusterWide(aNamespace, name string, env *provider.TestEnvironment) bool {
for _, op := range env.Operators {
if op.Csv != nil &&
op.Csv.Namespace == aNamespace &&
op.Csv.Name == name &&
(op.IsClusterWide || IsInstallModeMultiNamespace(op.Csv.Spec.InstallModes)) {
return true
}
}
return false
}

// return true if CSV install mode contains multi namespaces or all namespaces
func IsInstallModeMultiNamespace(installModes []v1alpha1.InstallMode) bool {
for i := 0; i < len(installModes); i++ {
if installModes[i].Type == v1alpha1.InstallModeTypeAllNamespaces ||
installModes[i].Type == v1alpha1.InstallModeTypeMultiNamespace {
return true
}
}
return false
}

// Return true if one of the passed topOwners is a CSV that is installed by a cluster-wide operator
func OwnedByClusterWideOperator(topOwners map[string]provider.TopOwner, env *provider.TestEnvironment) (aNamespace, name string, found bool) {
for _, owner := range topOwners {
if IsCSVAndClusterWide(owner.Namespace, owner.Name, env) {
return owner.Namespace, owner.Name, true
}
}
return "", "", false
}

func testAutomountServiceToken(env *provider.TestEnvironment) {
ginkgo.By("Should have automountServiceAccountToken set to false")

Expand Down
7 changes: 7 additions & 0 deletions internal/clientsholder/clientsholder.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ import (
policyv1 "k8s.io/api/policy/v1"
rbacv1 "k8s.io/api/rbac/v1"
apiextv1fake "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sFakeClient "k8s.io/client-go/kubernetes/fake"
networkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1"
"k8s.io/client-go/rest"
Expand All @@ -63,6 +64,7 @@ type ClientsHolder struct {
MachineCfg ocpMachine.Interface
KubeConfig []byte
ready bool
GroupResources []*metav1.APIResourceList
}

var clientsHolder = ClientsHolder{}
Expand Down Expand Up @@ -292,6 +294,11 @@ func newClientsHolder(filenames ...string) (*ClientsHolder, error) { //nolint:fu
if err != nil {
return nil, fmt.Errorf("cannot instantiate discoveryClient: %s", err)
}
clientsHolder.GroupResources, err = discoveryClient.ServerPreferredResources()
if err != nil {
logrus.Errorf("Could not get list of resources in cluster")
}

resolver := scale.NewDiscoveryScaleKindResolver(discoveryClient)
gr, err := restmapper.GetAPIGroupResources(clientsHolder.K8sClient.Discovery())
if err != nil {
Expand Down
69 changes: 69 additions & 0 deletions pkg/provider/pods.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ import (
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
)

const (
Expand Down Expand Up @@ -404,3 +405,71 @@ func (p *Pod) IsRunAsUserID(uid int64) bool {
}
return *p.Pod.Spec.SecurityContext.RunAsUser == uid
}

// Get the list of top owners of pods
func (p *Pod) GetTopOwner() (topOwners map[string]TopOwner, err error) {
topOwners = make(map[string]TopOwner)
err = followOwnerReferences(topOwners, p.Namespace, p.OwnerReferences)
if err != nil {
return topOwners, fmt.Errorf("could not get top owners, err=%s", err)
}
return topOwners, nil
}

// Structure to describe a top owner of a pod
type TopOwner struct {
Kind string
Name string
Namespace string
}

// Recursively follow the ownership tree to find the top owners
func followOwnerReferences(topOwners map[string]TopOwner, namespace string, ownerRefs []metav1.OwnerReference) (err error) {
clients := clientsholder.GetClientsHolder()
for _, ownerRef := range ownerRefs {
fmt.Printf("-> Owner: %s/%s\n", ownerRef.Kind, ownerRef.Name)
// Get group resource version
gvr := getResourceSchema(ownerRef.APIVersion, ownerRef.Kind)
// Get the owner resources
resource, err := clients.DynamicClient.Resource(gvr).Namespace(namespace).Get(context.Background(), ownerRef.Name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("could not get object indicated by owner references")
}
// Get owner references of the unstructured object
ownerReferences := resource.GetOwnerReferences()
if err != nil {
return fmt.Errorf("error getting owner references. err= %s", err)
}
// if no owner references, we have reached the top record it
if len(ownerReferences) == 0 {
logrus.Info("reached the top of this branch")
topOwners[ownerRef.Name] = TopOwner{Kind: ownerRef.Kind, Name: ownerRef.Name, Namespace: namespace}
}
// if not continue following other branches
err = followOwnerReferences(topOwners, namespace, ownerReferences)
if err != nil {
return fmt.Errorf("error following owners")
}
}
return nil
}

// Get the Group Version Resource based on APIVersion and kind
func getResourceSchema(apiVersion, kind string) (gvr schema.GroupVersionResource) {
const groupVersionComponentsNumber = 2
clients := clientsholder.GetClientsHolder()
for _, gr := range clients.GroupResources {
for i := 0; i < len(gr.APIResources); i++ {
if gr.APIResources[i].Kind == kind && gr.GroupVersion == apiVersion {
groupSplit := strings.Split(gr.GroupVersion, "/")
if len(groupSplit) == groupVersionComponentsNumber {
gvr.Group = groupSplit[0]
gvr.Version = groupSplit[1]
gvr.Resource = gr.APIResources[i].Name
}
return gvr
}
}
}
return gvr
}

0 comments on commit ee35373

Please sign in to comment.