From ee00b08927f48733b657d87ae84869239f1f5bf4 Mon Sep 17 00:00:00 2001 From: Enrico Candino Date: Thu, 2 Jan 2025 22:22:18 +0100 Subject: [PATCH] Add real node resources to virtual node (#169) * add real nodes capacity to virtual node * distinguish capacity from allocatable node resources --- k3k-kubelet/kubelet.go | 11 +++- k3k-kubelet/provider/configure.go | 104 +++++++++++++++++++++++++++--- 2 files changed, 105 insertions(+), 10 deletions(-) diff --git a/k3k-kubelet/kubelet.go b/k3k-kubelet/kubelet.go index a5b0c48..41e95fe 100644 --- a/k3k-kubelet/kubelet.go +++ b/k3k-kubelet/kubelet.go @@ -51,6 +51,8 @@ func init() { } type kubelet struct { + virtualCluster v1alpha1.Cluster + name string port int hostConfig *rest.Config @@ -136,7 +138,14 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet return nil, errors.New("failed to get the DNS service for the cluster: " + err.Error()) } + var virtualCluster v1alpha1.Cluster + if err := hostClient.Get(ctx, types.NamespacedName{Name: c.ClusterName, Namespace: c.ClusterNamespace}, &virtualCluster); err != nil { + return nil, errors.New("failed to get virtualCluster spec: " + err.Error()) + } + return &kubelet{ + virtualCluster: virtualCluster, + name: c.NodeName, hostConfig: hostConfig, hostClient: hostClient, @@ -215,7 +224,7 @@ func (k *kubelet) newProviderFunc(namespace, name, hostname, agentIP, serverIP, return nil, nil, errors.New("unable to make nodeutil provider: " + err.Error()) } - provider.ConfigureNode(pc.Node, hostname, k.port, agentIP) + provider.ConfigureNode(k.logger, pc.Node, hostname, k.port, agentIP, utilProvider.CoreClient, utilProvider.VirtualClient, k.virtualCluster) return utilProvider, &provider.Node{}, nil } diff --git a/k3k-kubelet/provider/configure.go b/k3k-kubelet/provider/configure.go index 67e19c0..669465e 100644 --- a/k3k-kubelet/provider/configure.go +++ b/k3k-kubelet/provider/configure.go @@ -1,12 +1,21 @@ package provider import ( + "context" + "time" + + "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" + k3klog "github.com/rancher/k3k/pkg/log" + corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + typedv1 "k8s.io/client-go/kubernetes/typed/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" ) -func ConfigureNode(node *v1.Node, hostname string, servicePort int, ip string) { +func ConfigureNode(logger *k3klog.Logger, node *v1.Node, hostname string, servicePort int, ip string, coreClient typedv1.CoreV1Interface, virtualClient client.Client, virtualCluster v1alpha1.Cluster) { node.Status.Conditions = nodeConditions() node.Status.DaemonEndpoints.KubeletEndpoint.Port = int32(servicePort) node.Status.Addresses = []v1.NodeAddress{ @@ -19,15 +28,20 @@ func ConfigureNode(node *v1.Node, hostname string, servicePort int, ip string) { Address: ip, }, } - node.Status.Capacity = v1.ResourceList{ - // TODO: Make this more dynamic based on the sum of existing nodes - v1.ResourceCPU: resource.MustParse("8"), - v1.ResourceMemory: resource.MustParse("326350752922"), - v1.ResourcePods: resource.MustParse("110"), - } - node.Status.Allocatable = node.Status.Capacity + node.Labels["node.kubernetes.io/exclude-from-external-load-balancers"] = "true" node.Labels["kubernetes.io/os"] = "linux" + + updateNodeCapacityInterval := 10 * time.Second + ticker := time.NewTicker(updateNodeCapacityInterval) + + go func() { + for range ticker.C { + if err := updateNodeCapacity(coreClient, virtualClient, node.Name, virtualCluster.Spec.NodeSelector); err != nil { + logger.Error("error updating node capacity", err) + } + } + }() } // nodeConditions returns the basic conditions which mark the node as ready @@ -75,3 +89,75 @@ func nodeConditions() []v1.NodeCondition { }, } } + +// updateNodeCapacity will update the virtual node capacity (and the allocatable field) with the sum of all the resource in the host nodes. +// If the nodeLabels are specified only the matching nodes will be considered. +func updateNodeCapacity(coreClient typedv1.CoreV1Interface, virtualClient client.Client, virtualNodeName string, nodeLabels map[string]string) error { + ctx := context.Background() + + capacity, allocatable, err := getResourcesFromNodes(ctx, coreClient, nodeLabels) + if err != nil { + return err + } + + var virtualNode corev1.Node + if err := virtualClient.Get(ctx, types.NamespacedName{Name: virtualNodeName}, &virtualNode); err != nil { + return err + } + + virtualNode.Status.Capacity = capacity + virtualNode.Status.Allocatable = allocatable + + return virtualClient.Status().Update(ctx, &virtualNode) +} + +// getResourcesFromNodes will return a sum of all the resource capacity of the host nodes, and the allocatable resources. +// If some node labels are specified only the matching nodes will be considered. +func getResourcesFromNodes(ctx context.Context, coreClient typedv1.CoreV1Interface, nodeLabels map[string]string) (v1.ResourceList, v1.ResourceList, error) { + listOpts := metav1.ListOptions{} + if nodeLabels != nil { + labelSelector := metav1.LabelSelector{MatchLabels: nodeLabels} + listOpts.LabelSelector = labels.Set(labelSelector.MatchLabels).String() + } + + nodeList, err := coreClient.Nodes().List(ctx, listOpts) + if err != nil { + return nil, nil, err + } + + // sum all + virtualCapacityResources := corev1.ResourceList{} + virtualAvailableResources := corev1.ResourceList{} + + for _, node := range nodeList.Items { + + // check if the node is Ready + for _, condition := range node.Status.Conditions { + if condition.Type != corev1.NodeReady { + continue + } + + // if the node is not Ready then we can skip it + if condition.Status != corev1.ConditionTrue { + break + } + } + + // add all the available metrics to the virtual node + for resourceName, resourceQuantity := range node.Status.Capacity { + virtualResource := virtualCapacityResources[resourceName] + + (&virtualResource).Add(resourceQuantity) + virtualCapacityResources[resourceName] = virtualResource + } + + for resourceName, resourceQuantity := range node.Status.Allocatable { + virtualResource := virtualAvailableResources[resourceName] + + (&virtualResource).Add(resourceQuantity) + virtualAvailableResources[resourceName] = virtualResource + } + } + + return virtualCapacityResources, virtualAvailableResources, nil +}