From 1e5abca4037d90de5e3034a5ecdd25ba7baddd68 Mon Sep 17 00:00:00 2001 From: galal-hussein Date: Tue, 15 Oct 2024 18:42:33 +0300 Subject: [PATCH 01/15] Virtual kubelet controller integration Signed-off-by: galal-hussein --- charts/k3k/crds/k3k.io_clusters.yaml | 10 + main.go | 1 + pkg/apis/k3k.io/v1alpha1/types.go | 4 + pkg/controller/cluster/agent/agent.go | 261 +--------------------- pkg/controller/cluster/agent/service.go | 30 --- pkg/controller/cluster/agent/shared.go | 222 +++++++++++++++++++ pkg/controller/cluster/agent/virtual.go | 214 ++++++++++++++++++ pkg/controller/cluster/cluster.go | 60 ++--- pkg/controller/cluster/config/agent.go | 34 --- pkg/controller/cluster/config/server.go | 8 + pkg/controller/util/util.go | 1 + virtual-kubelet/main.go | 265 +++++++--------------- virtual-kubelet/pkg/config/config.go | 72 ++++++ virtual-kubelet/pkg/kubelet/kubelet.go | 278 ++++++++++++++++++++++++ 14 files changed, 918 insertions(+), 542 deletions(-) delete mode 100644 pkg/controller/cluster/agent/service.go create mode 100644 pkg/controller/cluster/agent/shared.go create mode 100644 pkg/controller/cluster/agent/virtual.go delete mode 100644 pkg/controller/cluster/config/agent.go create mode 100644 virtual-kubelet/pkg/config/config.go create mode 100644 virtual-kubelet/pkg/kubelet/kubelet.go diff --git a/charts/k3k/crds/k3k.io_clusters.yaml b/charts/k3k/crds/k3k.io_clusters.yaml index 41402b8..feecf19 100644 --- a/charts/k3k/crds/k3k.io_clusters.yaml +++ b/charts/k3k/crds/k3k.io_clusters.yaml @@ -135,6 +135,15 @@ spec: description: NodeSelector is the node selector that will be applied to all server/agent pods type: object + mode: + description: Mode is the cluster provisioning mode which can be either + "virtual" or "shared". Defaults to "shared" + type: string + x-kubernetes-validations: + - message: mode is immutable + rule: self == oldSelf + - message: invalid value for mode + rule: self == "virtual" || self == "shared" persistence: description: |- Persistence contains options controlling how the etcd data of the virtual cluster is persisted. By default, no data @@ -191,6 +200,7 @@ spec: type: string required: - agents + - mode - servers - token - version diff --git a/main.go b/main.go index caade45..35e32b8 100644 --- a/main.go +++ b/main.go @@ -16,6 +16,7 @@ import ( "k8s.io/client-go/tools/clientcmd" "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/manager" ) diff --git a/pkg/apis/k3k.io/v1alpha1/types.go b/pkg/apis/k3k.io/v1alpha1/types.go index 665a977..e971ca3 100644 --- a/pkg/apis/k3k.io/v1alpha1/types.go +++ b/pkg/apis/k3k.io/v1alpha1/types.go @@ -52,6 +52,10 @@ type ClusterSpec struct { TLSSANs []string `json:"tlsSANs,omitempty"` // Addons is a list of secrets containing raw YAML which will be deployed in the virtual K3k cluster on startup. Addons []Addon `json:"addons,omitempty"` + // +kubebuilder:validation:XValidation:message="mode is immutable",rule="self == oldSelf" + // +kubebuilder:validation:XValidation:message="invalid value for mode",rule="self == virtual || self == shared" + // Mode is the cluster provisioning mode which can be either "virtual" or "shared". Defaults to "shared" + Mode string `json:"mode"` // Persistence contains options controlling how the etcd data of the virtual cluster is persisted. By default, no data // persistence is guaranteed, so restart of a virtual cluster pod may result in data loss without this field. diff --git a/pkg/controller/cluster/agent/agent.go b/pkg/controller/cluster/agent/agent.go index 4b96f98..e849f6c 100644 --- a/pkg/controller/cluster/agent/agent.go +++ b/pkg/controller/cluster/agent/agent.go @@ -2,260 +2,19 @@ package agent import ( "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" - "github.com/rancher/k3k/pkg/controller/util" - apps "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/ptr" + "github.com/rancher/k3k/pkg/controller/cluster/config" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" ) -const agentName = "k3k-agent" - -type Agent struct { - cluster *v1alpha1.Cluster -} - -func New(cluster *v1alpha1.Cluster) *Agent { - return &Agent{ - cluster: cluster, - } -} - -func (a *Agent) Deploy() *apps.Deployment { - image := util.K3SImage(a.cluster) - - const name = "k3k-agent" - selector := metav1.LabelSelector{ - MatchLabels: map[string]string{ - "cluster": a.cluster.Name, - "type": "agent", - }, - } - return &apps.Deployment{ - TypeMeta: metav1.TypeMeta{ - Kind: "Deployment", - APIVersion: "apps/v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: a.cluster.Name + "-" + name, - Namespace: util.ClusterNamespace(a.cluster), - Labels: selector.MatchLabels, - }, - Spec: apps.DeploymentSpec{ - Replicas: a.cluster.Spec.Agents, - Selector: &selector, - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: selector.MatchLabels, - }, - Spec: a.podSpec(image, name, a.cluster.Spec.AgentArgs, false, &selector), - }, - }, - } +type Agent interface { + Config() (ctrlruntimeclient.Object, error) + Resources() ([]ctrlruntimeclient.Object, error) } -func (a *Agent) StatefulAgent(cluster *v1alpha1.Cluster) *apps.StatefulSet { - image := util.K3SImage(cluster) - - selector := metav1.LabelSelector{ - MatchLabels: map[string]string{ - "cluster": cluster.Name, - "type": "agent", - }, +func New(cluster *v1alpha1.Cluster, serviceIP string) Agent { + if cluster.Spec.Mode == config.VirtualNodeMode { + return NewVirtualAgent(cluster, serviceIP) + } else { + return NewSharedAgent(cluster, serviceIP) } - return &apps.StatefulSet{ - TypeMeta: metav1.TypeMeta{ - Kind: "Statefulset", - APIVersion: "apps/v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: cluster.Name + "-" + agentName, - Namespace: util.ClusterNamespace(cluster), - Labels: selector.MatchLabels, - }, - Spec: apps.StatefulSetSpec{ - ServiceName: cluster.Name + "-" + agentName + "-headless", - Replicas: cluster.Spec.Agents, - Selector: &selector, - VolumeClaimTemplates: []v1.PersistentVolumeClaim{ - { - TypeMeta: metav1.TypeMeta{ - Kind: "PersistentVolumeClaim", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "varlibrancherk3s", - Namespace: util.ClusterNamespace(cluster), - }, - Spec: v1.PersistentVolumeClaimSpec{ - AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, - StorageClassName: &cluster.Status.Persistence.StorageClassName, - Resources: v1.VolumeResourceRequirements{ - Requests: v1.ResourceList{ - "storage": resource.MustParse(cluster.Status.Persistence.StorageRequestSize), - }, - }, - }, - }, - { - TypeMeta: metav1.TypeMeta{ - Kind: "PersistentVolumeClaim", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "varlibkubelet", - Namespace: util.ClusterNamespace(cluster), - }, - Spec: v1.PersistentVolumeClaimSpec{ - Resources: v1.VolumeResourceRequirements{ - Requests: v1.ResourceList{ - "storage": resource.MustParse(cluster.Status.Persistence.StorageRequestSize), - }, - }, - AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, - StorageClassName: &cluster.Status.Persistence.StorageClassName, - }, - }, - }, - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: selector.MatchLabels, - }, - Spec: a.podSpec(image, agentName, cluster.Spec.AgentArgs, true, &selector), - }, - }, - } -} - -func (a *Agent) podSpec(image, name string, args []string, statefulSet bool, affinitySelector *metav1.LabelSelector) v1.PodSpec { - var limit v1.ResourceList - if a.cluster.Spec.Limit != nil && a.cluster.Spec.Limit.ServerLimit != nil { - limit = a.cluster.Spec.Limit.ServerLimit - } - args = append([]string{"agent", "--config", "/opt/rancher/k3s/config.yaml"}, args...) - podSpec := v1.PodSpec{ - NodeSelector: a.cluster.Spec.NodeSelector, - Affinity: &v1.Affinity{ - PodAntiAffinity: &v1.PodAntiAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ - { - LabelSelector: affinitySelector, - TopologyKey: "kubernetes.io/hostname", - }, - }, - }, - }, - Volumes: []v1.Volume{ - { - Name: "config", - VolumeSource: v1.VolumeSource{ - Secret: &v1.SecretVolumeSource{ - SecretName: util.AgentConfigName(a.cluster), - Items: []v1.KeyToPath{ - { - Key: "config.yaml", - Path: "config.yaml", - }, - }, - }, - }, - }, - { - Name: "run", - VolumeSource: v1.VolumeSource{ - EmptyDir: &v1.EmptyDirVolumeSource{}, - }, - }, - { - Name: "varrun", - VolumeSource: v1.VolumeSource{ - EmptyDir: &v1.EmptyDirVolumeSource{}, - }, - }, - { - Name: "varlibcni", - VolumeSource: v1.VolumeSource{ - EmptyDir: &v1.EmptyDirVolumeSource{}, - }, - }, - { - Name: "varlog", - VolumeSource: v1.VolumeSource{ - EmptyDir: &v1.EmptyDirVolumeSource{}, - }, - }, - }, - Containers: []v1.Container{ - { - Name: name, - Image: image, - SecurityContext: &v1.SecurityContext{ - Privileged: ptr.To(true), - }, - Args: args, - Command: []string{ - "/bin/k3s", - }, - Resources: v1.ResourceRequirements{ - Limits: limit, - }, - VolumeMounts: []v1.VolumeMount{ - { - Name: "config", - MountPath: "/opt/rancher/k3s/", - ReadOnly: false, - }, - { - Name: "run", - MountPath: "/run", - ReadOnly: false, - }, - { - Name: "varrun", - MountPath: "/var/run", - ReadOnly: false, - }, - { - Name: "varlibcni", - MountPath: "/var/lib/cni", - ReadOnly: false, - }, - { - Name: "varlibkubelet", - MountPath: "/var/lib/kubelet", - ReadOnly: false, - }, - { - Name: "varlibrancherk3s", - MountPath: "/var/lib/rancher/k3s", - ReadOnly: false, - }, - { - Name: "varlog", - MountPath: "/var/log", - ReadOnly: false, - }, - }, - }, - }, - } - - if !statefulSet { - podSpec.Volumes = append(podSpec.Volumes, v1.Volume{ - Name: "varlibkubelet", - VolumeSource: v1.VolumeSource{ - EmptyDir: &v1.EmptyDirVolumeSource{}, - }, - }, v1.Volume{ - - Name: "varlibrancherk3s", - VolumeSource: v1.VolumeSource{ - EmptyDir: &v1.EmptyDirVolumeSource{}, - }, - }, - ) - } - - return podSpec } diff --git a/pkg/controller/cluster/agent/service.go b/pkg/controller/cluster/agent/service.go deleted file mode 100644 index b4a1d8c..0000000 --- a/pkg/controller/cluster/agent/service.go +++ /dev/null @@ -1,30 +0,0 @@ -package agent - -import ( - "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" - "github.com/rancher/k3k/pkg/controller/util" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func (a *Agent) StatefulAgentService(cluster *v1alpha1.Cluster) *v1.Service { - return &v1.Service{ - TypeMeta: metav1.TypeMeta{ - Kind: "Service", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: cluster.Name + "-" + agentName + "-headless", - Namespace: util.ClusterNamespace(cluster), - }, - Spec: v1.ServiceSpec{ - Type: v1.ServiceTypeClusterIP, - ClusterIP: v1.ClusterIPNone, - Selector: map[string]string{ - "cluster": cluster.Name, - "role": "agent", - }, - Ports: []v1.ServicePort{}, - }, - } -} diff --git a/pkg/controller/cluster/agent/shared.go b/pkg/controller/cluster/agent/shared.go new file mode 100644 index 0000000..b2b5412 --- /dev/null +++ b/pkg/controller/cluster/agent/shared.go @@ -0,0 +1,222 @@ +package agent + +import ( + "fmt" + + "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" + "github.com/rancher/k3k/pkg/controller/util" + apps "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + virtualKubeletImage = "husseingalal/k3k:k3k-kubelet" +) + +type SharedAgent struct { + cluster *v1alpha1.Cluster + serviceIP string +} + +func NewSharedAgent(cluster *v1alpha1.Cluster, serviceIP string) Agent { + return &SharedAgent{ + cluster: cluster, + serviceIP: serviceIP, + } +} + +func (s *SharedAgent) Config() (ctrlruntimeclient.Object, error) { + config := sharedAgentData(s.cluster) + + return &v1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: util.AgentConfigName(s.cluster), + Namespace: util.ClusterNamespace(s.cluster), + }, + Data: map[string][]byte{ + "config.yaml": []byte(config), + }, + }, nil +} + +func sharedAgentData(cluster *v1alpha1.Cluster) string { + return fmt.Sprintf(`clusterName: %s +clusterNamespace: %s +nodeName: %s +token: %s`, cluster.Name, cluster.Namespace, cluster.Name+"-"+"k3k-kubelet", cluster.Spec.Token) +} + +func (s *SharedAgent) Resources() ([]ctrlruntimeclient.Object, error) { + var objs []ctrlruntimeclient.Object + objs = append(objs, s.serviceAccount(), s.role(), s.roleBinding(), s.deployment()) + return objs, nil +} + +func (s *SharedAgent) deployment() *apps.Deployment { + selector := metav1.LabelSelector{ + MatchLabels: map[string]string{ + "cluster": s.cluster.Name, + "type": "agent", + "mode": "shared", + }, + } + name := s.cluster.Name + "-" + "k3k-kubelet" + return &apps.Deployment{ + TypeMeta: metav1.TypeMeta{ + Kind: "Deployment", + APIVersion: "apps/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: util.ClusterNamespace(s.cluster), + Labels: selector.MatchLabels, + }, + Spec: apps.DeploymentSpec{ + Selector: &selector, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: selector.MatchLabels, + }, + Spec: s.podSpec(virtualKubeletImage, name, &selector), + }, + }, + } +} + +func (s *SharedAgent) podSpec(image, name string, affinitySelector *metav1.LabelSelector) v1.PodSpec { + args := []string{"--config", "/opt/rancher/k3k/config.yaml"} + var limit v1.ResourceList + podSpec := v1.PodSpec{ + Affinity: &v1.Affinity{ + PodAntiAffinity: &v1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ + { + LabelSelector: affinitySelector, + TopologyKey: "kubernetes.io/hostname", + }, + }, + }, + }, + ServiceAccountName: s.cluster.Name + "-" + "k3k-kubelet", + Volumes: []v1.Volume{ + { + Name: "config", + VolumeSource: v1.VolumeSource{ + Secret: &v1.SecretVolumeSource{ + SecretName: util.AgentConfigName(s.cluster), + Items: []v1.KeyToPath{ + { + Key: "config.yaml", + Path: "config.yaml", + }, + }, + }, + }, + }, + }, + Containers: []v1.Container{ + { + Name: name, + Image: image, + ImagePullPolicy: v1.PullAlways, + Resources: v1.ResourceRequirements{ + Limits: limit, + }, + Args: args, + Env: []v1.EnvVar{ + { + Name: "AGENT_POD_IP", + ValueFrom: &v1.EnvVarSource{ + FieldRef: &v1.ObjectFieldSelector{ + FieldPath: "status.podIP", + }, + }, + }, + }, + VolumeMounts: []v1.VolumeMount{ + { + Name: "config", + MountPath: "/opt/rancher/k3k/", + ReadOnly: false, + }, + }, + }, + }} + + return podSpec +} + +func (s *SharedAgent) serviceAccount() *v1.ServiceAccount { + return &v1.ServiceAccount{ + TypeMeta: metav1.TypeMeta{ + Kind: "ServiceAccount", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: s.cluster.Name + "-" + "k3k-kubelet", + Namespace: util.ClusterNamespace(s.cluster), + }, + } +} + +func (s *SharedAgent) role() *rbacv1.Role { + return &rbacv1.Role{ + TypeMeta: metav1.TypeMeta{ + Kind: "Role", + APIVersion: "rbac.authorization.k8s.io/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: s.cluster.Name + "-" + "k3k-kubelet", + Namespace: util.ClusterNamespace(s.cluster), + }, + Rules: []rbacv1.PolicyRule{ + { + Verbs: []string{"*"}, + APIGroups: []string{""}, + Resources: []string{"pods"}, + }, + { + Verbs: []string{"get", "watch", "list"}, + APIGroups: []string{""}, + Resources: []string{"secrets", "services"}, + }, + { + Verbs: []string{"get", "watch", "list"}, + APIGroups: []string{"k3k.io"}, + Resources: []string{"clusters"}, + }, + }, + } +} + +func (s *SharedAgent) roleBinding() *rbacv1.RoleBinding { + return &rbacv1.RoleBinding{ + TypeMeta: metav1.TypeMeta{ + Kind: "RoleBinding", + APIVersion: "rbac.authorization.k8s.io/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: s.cluster.Name + "-" + "k3k-kubelet", + Namespace: util.ClusterNamespace(s.cluster), + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "Role", + Name: s.cluster.Name + "-" + "k3k-kubelet", + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: s.cluster.Name + "-" + "k3k-kubelet", + Namespace: util.ClusterNamespace(s.cluster), + }, + }, + } +} diff --git a/pkg/controller/cluster/agent/virtual.go b/pkg/controller/cluster/agent/virtual.go new file mode 100644 index 0000000..05d12ec --- /dev/null +++ b/pkg/controller/cluster/agent/virtual.go @@ -0,0 +1,214 @@ +package agent + +import ( + "fmt" + + "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" + "github.com/rancher/k3k/pkg/controller/util" + apps "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" +) + +type VirtualAgent struct { + cluster *v1alpha1.Cluster + serviceIP string +} + +func NewVirtualAgent(cluster *v1alpha1.Cluster, serviceIP string) Agent { + return &VirtualAgent{ + cluster: cluster, + serviceIP: cluster.Spec.Mode, + } +} + +func (v *VirtualAgent) Config() (ctrlruntimeclient.Object, error) { + config := virtualAgentData(v.serviceIP, v.cluster.Spec.Token) + + return &v1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: util.AgentConfigName(v.cluster), + Namespace: util.ClusterNamespace(v.cluster), + }, + Data: map[string][]byte{ + "config.yaml": []byte(config), + }, + }, nil +} + +func (v *VirtualAgent) Resources() ([]ctrlruntimeclient.Object, error) { + var objs []ctrlruntimeclient.Object + objs = append(objs, v.deployment()) + return objs, nil +} + +func virtualAgentData(serviceIP, token string) string { + return fmt.Sprintf(`server: https://%s:6443 +token: %s +with-node-id: true`, serviceIP, token) +} + +func (v *VirtualAgent) deployment() *apps.Deployment { + image := util.K3SImage(v.cluster) + + const name = "k3k-agent" + selector := metav1.LabelSelector{ + MatchLabels: map[string]string{ + "cluster": v.cluster.Name, + "type": "agent", + "mode": "virtual", + }, + } + return &apps.Deployment{ + TypeMeta: metav1.TypeMeta{ + Kind: "Deployment", + APIVersion: "apps/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: v.cluster.Name + "-" + name, + Namespace: util.ClusterNamespace(v.cluster), + Labels: selector.MatchLabels, + }, + Spec: apps.DeploymentSpec{ + Replicas: v.cluster.Spec.Agents, + Selector: &selector, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: selector.MatchLabels, + }, + Spec: v.podSpec(image, name, v.cluster.Spec.AgentArgs, &selector), + }, + }, + } +} + +func (v *VirtualAgent) podSpec(image, name string, args []string, affinitySelector *metav1.LabelSelector) v1.PodSpec { + var limit v1.ResourceList + args = append([]string{"agent", "--config", "/opt/rancher/k3s/config.yaml"}, args...) + podSpec := v1.PodSpec{ + Affinity: &v1.Affinity{ + PodAntiAffinity: &v1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ + { + LabelSelector: affinitySelector, + TopologyKey: "kubernetes.io/hostname", + }, + }, + }, + }, + Volumes: []v1.Volume{ + { + Name: "config", + VolumeSource: v1.VolumeSource{ + Secret: &v1.SecretVolumeSource{ + SecretName: util.AgentConfigName(v.cluster), + Items: []v1.KeyToPath{ + { + Key: "config.yaml", + Path: "config.yaml", + }, + }, + }, + }, + }, + { + Name: "run", + VolumeSource: v1.VolumeSource{ + EmptyDir: &v1.EmptyDirVolumeSource{}, + }, + }, + { + Name: "varrun", + VolumeSource: v1.VolumeSource{ + EmptyDir: &v1.EmptyDirVolumeSource{}, + }, + }, + { + Name: "varlibcni", + VolumeSource: v1.VolumeSource{ + EmptyDir: &v1.EmptyDirVolumeSource{}, + }, + }, + { + Name: "varlog", + VolumeSource: v1.VolumeSource{ + EmptyDir: &v1.EmptyDirVolumeSource{}, + }, + }, + { + Name: "varlibkubelet", + VolumeSource: v1.VolumeSource{ + EmptyDir: &v1.EmptyDirVolumeSource{}, + }, + }, + { + Name: "varlibrancherk3s", + VolumeSource: v1.VolumeSource{ + EmptyDir: &v1.EmptyDirVolumeSource{}, + }, + }, + }, + Containers: []v1.Container{ + { + Name: name, + Image: image, + ImagePullPolicy: v1.PullAlways, + SecurityContext: &v1.SecurityContext{ + Privileged: ptr.To(true), + }, + Args: args, + Command: []string{ + "/bin/k3s", + }, + Resources: v1.ResourceRequirements{ + Limits: limit, + }, + VolumeMounts: []v1.VolumeMount{ + { + Name: "config", + MountPath: "/opt/rancher/k3s/", + ReadOnly: false, + }, + { + Name: "run", + MountPath: "/run", + ReadOnly: false, + }, + { + Name: "varrun", + MountPath: "/var/run", + ReadOnly: false, + }, + { + Name: "varlibcni", + MountPath: "/var/lib/cni", + ReadOnly: false, + }, + { + Name: "varlibkubelet", + MountPath: "/var/lib/kubelet", + ReadOnly: false, + }, + { + Name: "varlibrancherk3s", + MountPath: "/var/lib/rancher/k3s", + ReadOnly: false, + }, + { + Name: "varlog", + MountPath: "/var/log", + ReadOnly: false, + }, + }, + }, + }, + } + + return podSpec +} diff --git a/pkg/controller/cluster/cluster.go b/pkg/controller/cluster/cluster.go index 6c4b03f..77023ed 100644 --- a/pkg/controller/cluster/cluster.go +++ b/pkg/controller/cluster/cluster.go @@ -159,7 +159,7 @@ func (c *ClusterReconciler) createCluster(ctx context.Context, cluster *v1alpha1 return util.LogAndReturnErr("failed to create servers", err) } - if err := c.agent(ctx, cluster); err != nil { + if err := c.agent(ctx, cluster, serviceIP); err != nil { return util.LogAndReturnErr("failed to create agents", err) } @@ -223,17 +223,6 @@ func (c *ClusterReconciler) createClusterConfigs(ctx context.Context, cluster *v } } - // create agents configuration - agentsConfig := agentConfig(cluster, serviceIP) - if err := controllerutil.SetControllerReference(cluster, &agentsConfig, c.Scheme); err != nil { - return err - } - if err := c.Client.Create(ctx, &agentsConfig); err != nil { - if !apierrors.IsAlreadyExists(err) { - return err - } - } - return nil } @@ -290,41 +279,22 @@ func (c *ClusterReconciler) server(ctx context.Context, cluster *v1alpha1.Cluste return nil } -func (c *ClusterReconciler) agent(ctx context.Context, cluster *v1alpha1.Cluster) error { - agent := agent.New(cluster) +func (c *ClusterReconciler) agent(ctx context.Context, cluster *v1alpha1.Cluster, serviceIP string) error { + agent := agent.New(cluster, serviceIP) - agentsDeployment := agent.Deploy() - if err := controllerutil.SetControllerReference(cluster, agentsDeployment, c.Scheme); err != nil { + agentsConfig, err := agent.Config() + if err != nil { return err } - if err := c.ensure(ctx, agentsDeployment, false); err != nil { + agentResources, err := agent.Resources() + if err != nil { return err } - return nil -} -func agentConfig(cluster *v1alpha1.Cluster, serviceIP string) v1.Secret { - config := agentData(serviceIP, cluster.Spec.Token) - - return v1.Secret{ - TypeMeta: metav1.TypeMeta{ - Kind: "Secret", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: util.AgentConfigName(cluster), - Namespace: util.ClusterNamespace(cluster), - }, - Data: map[string][]byte{ - "config.yaml": []byte(config), - }, - } -} + agentResources = append(agentResources, agentsConfig) -func agentData(serviceIP, token string) string { - return fmt.Sprintf(`server: https://%s:6443 -token: %s`, serviceIP, token) + return c.ensureAll(ctx, cluster, agentResources) } func (c *ClusterReconciler) validate(cluster *v1alpha1.Cluster) error { @@ -334,6 +304,18 @@ func (c *ClusterReconciler) validate(cluster *v1alpha1.Cluster) error { return nil } +func (c *ClusterReconciler) ensureAll(ctx context.Context, cluster *v1alpha1.Cluster, objs []ctrlruntimeclient.Object) error { + for _, obj := range objs { + if err := controllerutil.SetControllerReference(cluster, obj, c.Scheme); err != nil { + return err + } + if err := c.ensure(ctx, obj, false); err != nil { + return err + } + } + return nil +} + func (c *ClusterReconciler) ensure(ctx context.Context, obj ctrlruntimeclient.Object, requiresRecreate bool) error { exists := true existingObject := obj.DeepCopyObject().(ctrlruntimeclient.Object) diff --git a/pkg/controller/cluster/config/agent.go b/pkg/controller/cluster/config/agent.go deleted file mode 100644 index 4f139de..0000000 --- a/pkg/controller/cluster/config/agent.go +++ /dev/null @@ -1,34 +0,0 @@ -package config - -import ( - "fmt" - - "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" - "github.com/rancher/k3k/pkg/controller/util" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func Agent(cluster *v1alpha1.Cluster, serviceIP string) v1.Secret { - config := agentData(serviceIP, cluster.Spec.Token) - - return v1.Secret{ - TypeMeta: metav1.TypeMeta{ - Kind: "Secret", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: util.AgentConfigName(cluster), - Namespace: util.ClusterNamespace(cluster), - }, - Data: map[string][]byte{ - "config.yaml": []byte(config), - }, - } -} - -func agentData(serviceIP, token string) string { - return fmt.Sprintf(`server: https://%s:6443 -token: %s -with-node-id: true`, serviceIP, token) -} diff --git a/pkg/controller/cluster/config/server.go b/pkg/controller/cluster/config/server.go index 18993b8..9c1f1be 100644 --- a/pkg/controller/cluster/config/server.go +++ b/pkg/controller/cluster/config/server.go @@ -9,6 +9,11 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +const ( + VirtualKubeletMode = "shared" + VirtualNodeMode = "virtual" +) + // Server returns the secret for the server's config. Note that this doesn't set the ownerRef on the secret // to tie it back to the cluster. func Server(cluster *v1alpha1.Cluster, init bool, serviceIP string) (*v1.Secret, error) { @@ -72,6 +77,9 @@ func serverOptions(cluster *v1alpha1.Cluster) string { opts = opts + "- " + addr + "\n" } } + if cluster.Spec.Mode == VirtualKubeletMode { + opts = opts + "disable-agent: true\negress-selector-mode: disabled\n" + } // TODO: Add extra args to the options return opts diff --git a/pkg/controller/util/util.go b/pkg/controller/util/util.go index 9352310..4c86651 100644 --- a/pkg/controller/util/util.go +++ b/pkg/controller/util/util.go @@ -13,6 +13,7 @@ import ( const ( namespacePrefix = "k3k-" k3SImageName = "rancher/k3s" + AdminCommonName = "system:admin" ServerPort = 6443 ) diff --git a/virtual-kubelet/main.go b/virtual-kubelet/main.go index 22ae471..2e214e0 100644 --- a/virtual-kubelet/main.go +++ b/virtual-kubelet/main.go @@ -2,213 +2,102 @@ package main import ( "context" - "crypto/tls" - "crypto/x509" - "encoding/json" "fmt" - "net" - "net/http" "os" - "time" - certutil "github.com/rancher/dynamiclistener/cert" - "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" - "github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap" - "github.com/rancher/k3k/pkg/controller/kubeconfig" - "github.com/rancher/k3k/pkg/controller/util" - "github.com/rancher/k3k/virtual-kubelet/pkg/provider" - "github.com/virtual-kubelet/virtual-kubelet/log" - "github.com/virtual-kubelet/virtual-kubelet/node" - "github.com/virtual-kubelet/virtual-kubelet/node/nodeutil" - "go.uber.org/zap" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" + "github.com/rancher/k3k/virtual-kubelet/pkg/config" + "github.com/rancher/k3k/virtual-kubelet/pkg/kubelet" + "github.com/sirupsen/logrus" + "github.com/urfave/cli" ) -const ( - clusterNameEnv = "CLUSTER_NAME" - clusterNamespaceEnv = "CLUSTER_NAMESPACE" - hostKubeconfigEnv = "HOST_KUBECONFIG" - virtKubeconfigEnv = "VIRT_KUBECONFIG" - podIPEnv = "VIRT_POD_IP" - srvPort = 9443 - nodeName = "virtual-node" +var ( + configFile string + c config.Type ) func main() { - name, ok := os.LookupEnv(clusterNameEnv) - if !ok { - fmt.Printf("env var %s is required but was not provided \n", clusterNameEnv) - os.Exit(-1) - } - namespace, ok := os.LookupEnv(clusterNamespaceEnv) - if !ok { - fmt.Printf("env var %s is required but was not provided \n", clusterNamespaceEnv) - os.Exit(-1) - } - hostKubeconfigPath, ok := os.LookupEnv(hostKubeconfigEnv) - if !ok { - fmt.Printf("env var %s is required but was not provided \n", hostKubeconfigEnv) - os.Exit(-1) - } - virtKubeconfigPath, ok := os.LookupEnv(virtKubeconfigEnv) - if !ok { - fmt.Printf("env var %s is required but was not provided \n", hostKubeconfigPath) - os.Exit(-1) + app := cli.NewApp() + app.Name = "k3k-kubelet" + app.Usage = "virtual kubelet implementation k3k" + app.Flags = []cli.Flag{ + cli.StringFlag{ + Name: "cluster-name", + Usage: "Name of the k3k cluster", + Destination: &c.ClusterName, + EnvVar: "CLUSTER_NAME", + }, + cli.StringFlag{ + Name: "cluster-namespace", + Usage: "Namespace of the k3k cluster", + Destination: &c.ClusterNamespace, + EnvVar: "CLUSTER_NAMESPACE", + }, + cli.StringFlag{ + Name: "cluster-token", + Usage: "K3S token of the k3k cluster", + Destination: &c.Token, + EnvVar: "CLUSTER_Token", + }, + cli.StringFlag{ + Name: "host-config-path", + Usage: "Path to the host kubeconfig, if empty then virtual-kubelet will use incluster config", + Destination: &c.HostConfigPath, + EnvVar: "HOST_KUBECONFIG", + }, + cli.StringFlag{ + Name: "virtual-config-path", + Usage: "Path to the k3k cluster kubeconfig, if empty then virtual-kubelet will create its own config from k3k cluster", + Destination: &c.VirtualConfigPath, + EnvVar: "CLUSTER_NAME", + }, + cli.StringFlag{ + Name: "kubelet-port", + Usage: "kubelet API port number", + Destination: &c.KubeletPort, + EnvVar: "SERVER_PORT", + Value: "9443", + }, + cli.StringFlag{ + Name: "agent-pod-ip", + Usage: "Agent Pod IP used for TLS SAN for the kubelet server", + Destination: &c.AgentPodIP, + EnvVar: "AGENT_POD_IP", + }, + cli.StringFlag{ + Name: "config", + Usage: "Path to k3k-kubelet config file", + Destination: &configFile, + EnvVar: "CONFIG_FILE", + Value: "/etc/rancher/k3k/config.yaml", + }, } - podIP, ok := os.LookupEnv(podIPEnv) - if !ok { - fmt.Printf("env var %s is required but was not provided \n", podIPEnv) - os.Exit(-1) + app.Action = Run + if err := app.Run(os.Args); err != nil { + logrus.Fatal(err) } - hostConfig, err := clientcmd.BuildConfigFromFlags("", hostKubeconfigPath) - if err != nil { - fmt.Printf("unable to load host kubeconfig at path %s, %s \n", hostKubeconfigPath, err) +} + +func Run(clx *cli.Context) { + if err := c.Parse(configFile); err != nil { + fmt.Printf("failed to parse config file %s: %v", configFile, err) os.Exit(-1) } - virtConfig, err := clientcmd.BuildConfigFromFlags("", virtKubeconfigPath) - if err != nil { - fmt.Printf("unable to load virtual kubeconfig at path %s, %s \n", virtKubeconfigPath, err) + + if err := c.Validate(); err != nil { + fmt.Printf("failed to validate config: %v", err) os.Exit(-1) } - virtClientset, err := kubernetes.NewForConfig(virtConfig) + k, err := kubelet.New(&c) if err != nil { - fmt.Printf("unable to load virtual kubeconfig into kubernetes interface %s \n", err) + fmt.Printf("failed to create new virtual kubelet instance: %v", err) os.Exit(-1) } - node, err := nodeutil.NewNode("virtual-node", func(pc nodeutil.ProviderConfig) (nodeutil.Provider, node.NodeProvider, error) { - utilProvider, err := provider.New(*hostConfig, namespace, name) - if err != nil { - return nil, nil, fmt.Errorf("unable to make nodeutil provider %w", err) - } - nodeProvider := provider.Node{} - provider.ConfigureNode(pc.Node, podIP, srvPort) - return utilProvider, &nodeProvider, nil - }, - nodeutil.WithClient(virtClientset), - func(c *nodeutil.NodeConfig) error { - c.HTTPListenAddr = fmt.Sprintf(":%d", srvPort) - // set up the routes - mux := http.NewServeMux() - err := nodeutil.AttachProviderRoutes(mux)(c) - if err != nil { - return fmt.Errorf("unable to attach routes: %w", err) - } - c.Handler = mux - ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) - defer cancel() - tlsConfig, err := loadTLSConfig(ctx, hostConfig, name, namespace, nodeName, podIP) - if err != nil { - return fmt.Errorf("unable to get tls config: %w", err) - } - c.TLSConfig = tlsConfig - return nil - }, - ) - if err != nil { - fmt.Printf("unable to start kubelet: %s \n", err.Error()) - os.Exit(-1) - } - // run the node async so that we can wait for it to be ready in another call - go func() { - ctx := context.Background() - logger, err := zap.NewProduction() - if err != nil { - fmt.Printf("unable to create logger: %s", err.Error()) - os.Exit(-1) - } - wrapped := LogWrapper{ - *logger.Sugar(), - } - ctx = log.WithLogger(ctx, &wrapped) - err = node.Run(ctx) - if err != nil { - fmt.Printf("node errored when running: %s \n", err.Error()) - os.Exit(-1) - } - }() - if err := node.WaitReady(context.Background(), time.Minute*1); err != nil { - fmt.Printf("node was not ready within timeout of 1 minute: %s \n", err.Error()) + if err := k.RegisterNode(c.KubeletPort, c.ClusterNamespace, c.ClusterName, c.AgentPodIP); err != nil { + fmt.Printf("failed to register new node: %v", err) os.Exit(-1) } - <-node.Done() - if err := node.Err(); err != nil { - fmt.Printf("node stopped with an error: %s \n", err.Error()) - os.Exit(-1) - } - fmt.Printf("node exited without an error") -} - -type LogWrapper struct { - zap.SugaredLogger -} - -func (l *LogWrapper) WithError(err error) log.Logger { - return l -} - -func (l *LogWrapper) WithField(string, interface{}) log.Logger { - return l -} -func (l *LogWrapper) WithFields(field log.Fields) log.Logger { - return l -} - -func loadTLSConfig(ctx context.Context, hostConfig *rest.Config, clusterName, clusterNamespace, nodeName, ipStr string) (*tls.Config, error) { - dynamic, err := dynamic.NewForConfig(hostConfig) - if err != nil { - return nil, fmt.Errorf("unable to get clientset for kubeconfig: %w", err) - } - clusterGVR := schema.GroupVersionResource{ - Group: "k3k.io", - Version: "v1alpha1", - Resource: "clusters", - } - dynCluster, err := dynamic.Resource(clusterGVR).Namespace(clusterNamespace).Get(ctx, clusterName, metav1.GetOptions{}) - if err != nil { - return nil, fmt.Errorf("unable to get cluster: %w", err) - } - var cluster v1alpha1.Cluster - bytes, err := json.Marshal(dynCluster) - if err != nil { - return nil, fmt.Errorf("unable to marshall cluster: %w", err) - } - err = json.Unmarshal(bytes, &cluster) - if err != nil { - return nil, fmt.Errorf("unable to unmarshall cluster: %w", err) - } - - endpoint := fmt.Sprintf("%s.%s", util.ServerSvcName(&cluster), util.ClusterNamespace(&cluster)) - b, err := bootstrap.DecodedBootstrap(cluster.Spec.Token, endpoint) - if err != nil { - return nil, fmt.Errorf("unable to decode bootstrap: %w", err) - } - altNames := certutil.AltNames{ - IPs: []net.IP{net.ParseIP(ipStr)}, - } - cert, key, err := kubeconfig.CreateClientCertKey(nodeName, nil, &altNames, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, 0, b.ServerCA.Content, b.ServerCAKey.Content) - if err != nil { - return nil, fmt.Errorf("unable to get cert and key: %w", err) - } - clientCert, err := tls.X509KeyPair(cert, key) - if err != nil { - return nil, fmt.Errorf("unable to get key pair: %w", err) - } - // create rootCA CertPool - certs, err := certutil.ParseCertsPEM([]byte(b.ServerCA.Content)) - if err != nil { - return nil, fmt.Errorf("unable to create certs: %w", err) - } - pool := x509.NewCertPool() - pool.AddCert(certs[0]) - return &tls.Config{ - RootCAs: pool, - Certificates: []tls.Certificate{clientCert}, - }, nil + k.Start(context.Background()) } diff --git a/virtual-kubelet/pkg/config/config.go b/virtual-kubelet/pkg/config/config.go new file mode 100644 index 0000000..9134d9a --- /dev/null +++ b/virtual-kubelet/pkg/config/config.go @@ -0,0 +1,72 @@ +package config + +import ( + "errors" + "os" + + "gopkg.in/yaml.v2" +) + +// Config has all virtual-kubelet startup options +type Type struct { + ClusterName string `yaml:"clusterName"` + ClusterNamespace string `yaml:"clusterNamespace"` + HostConfigPath string `yaml:"hostConfigPath"` + VirtualConfigPath string `yaml:"virtualConfigPath"` + KubeletPort string `yaml:"kubeletPort"` + NodeName string `yaml:"nodeName"` + AgentPodIP string `yaml:"agentPodIP"` + Token string `yaml:"token"` +} + +func (t *Type) UnmarshalYAML(data []byte) error { + var c Type + if err := yaml.Unmarshal(data, &c); err != nil { + return err + } + if t.ClusterName == "" { + t.ClusterName = c.ClusterName + } + if t.ClusterNamespace == "" { + t.ClusterNamespace = c.ClusterNamespace + } + if t.HostConfigPath == "" { + t.HostConfigPath = c.HostConfigPath + } + if t.VirtualConfigPath == "" { + t.VirtualConfigPath = c.VirtualConfigPath + } + if t.KubeletPort == "" { + t.KubeletPort = c.KubeletPort + } + if t.NodeName == "" { + t.NodeName = c.NodeName + } + + return nil +} + +func (t *Type) Validate() error { + if t.ClusterName == "" { + return errors.New("cluster name is not provided") + } + if t.ClusterNamespace == "" { + return errors.New("cluster namespace is not provided") + } + if t.AgentPodIP == "" { + return errors.New("agent POD IP is not provided") + } + return nil +} + +func (t *Type) Parse(path string) error { + if _, err := os.Stat(path); os.IsNotExist(err) { + return nil + } + + configFileBytes, err := os.ReadFile(path) + if err != nil { + return err + } + return t.UnmarshalYAML(configFileBytes) +} diff --git a/virtual-kubelet/pkg/kubelet/kubelet.go b/virtual-kubelet/pkg/kubelet/kubelet.go new file mode 100644 index 0000000..f4d505f --- /dev/null +++ b/virtual-kubelet/pkg/kubelet/kubelet.go @@ -0,0 +1,278 @@ +package kubelet + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "net" + "net/http" + "os" + "time" + + certutil "github.com/rancher/dynamiclistener/cert" + "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" + "github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap" + "github.com/rancher/k3k/pkg/controller/kubeconfig" + "github.com/rancher/k3k/pkg/controller/util" + "github.com/rancher/k3k/virtual-kubelet/pkg/config" + "github.com/rancher/k3k/virtual-kubelet/pkg/provider" + "github.com/virtual-kubelet/virtual-kubelet/log" + "github.com/virtual-kubelet/virtual-kubelet/node" + "github.com/virtual-kubelet/virtual-kubelet/node/nodeutil" + "go.uber.org/zap" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/client-go/kubernetes" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" +) + +var ( + Scheme = runtime.NewScheme() + backoff = wait.Backoff{ + Steps: 5, + Duration: 20 * time.Second, + Factor: 2, + Jitter: 0.1, + } +) + +func init() { + _ = clientgoscheme.AddToScheme(Scheme) + _ = v1alpha1.AddToScheme(Scheme) +} + +type Kubelet struct { + Name string + ServerName string + Port int + TLSConfig *tls.Config + HostConfig *rest.Config + HostClient ctrlruntimeclient.Client + VirtClient kubernetes.Interface + Node *nodeutil.Node +} + +func New(c *config.Type) (*Kubelet, error) { + hostConfig, err := clientcmd.BuildConfigFromFlags("", c.HostConfigPath) + if err != nil { + return nil, err + } + + hostClient, err := ctrlruntimeclient.New(hostConfig, ctrlruntimeclient.Options{ + Scheme: Scheme, + }) + if err != nil { + return nil, err + } + + virtConfig, err := virtRestConfig(context.Background(), c.VirtualConfigPath, hostClient, c.ClusterName, c.ClusterNamespace) + if err != nil { + return nil, err + } + + virtClient, err := kubernetes.NewForConfig(virtConfig) + if err != nil { + return nil, err + } + return &Kubelet{ + Name: c.NodeName, + HostConfig: hostConfig, + HostClient: hostClient, + VirtClient: virtClient, + }, nil +} + +func (k *Kubelet) RegisterNode(srvPort, namespace, name, podIP string) error { + providerFunc := k.newProviderFunc(namespace, name, podIP) + nodeOpts := k.nodeOpts(srvPort, namespace, name, podIP) + + var err error + k.Node, err = nodeutil.NewNode(k.Name, providerFunc, nodeutil.WithClient(k.VirtClient), nodeOpts) + if err != nil { + return fmt.Errorf("unable to start kubelet: %v", err) + } + return nil +} + +func (k *Kubelet) Start(ctx context.Context) { + go func() { + ctx := context.Background() + logger, err := zap.NewProduction() + if err != nil { + fmt.Printf("unable to create logger: %s", err.Error()) + os.Exit(-1) + } + wrapped := LogWrapper{ + *logger.Sugar(), + } + ctx = log.WithLogger(ctx, &wrapped) + err = k.Node.Run(ctx) + if err != nil { + fmt.Printf("node errored when running: %s \n", err.Error()) + os.Exit(-1) + } + }() + if err := k.Node.WaitReady(context.Background(), time.Minute*1); err != nil { + fmt.Printf("node was not ready within timeout of 1 minute: %s \n", err.Error()) + os.Exit(-1) + } + <-k.Node.Done() + if err := k.Node.Err(); err != nil { + fmt.Printf("node stopped with an error: %s \n", err.Error()) + os.Exit(-1) + } + fmt.Printf("node exited without an error") +} + +func (k *Kubelet) newProviderFunc(namespace, name, podIP string) nodeutil.NewProviderFunc { + return func(pc nodeutil.ProviderConfig) (nodeutil.Provider, node.NodeProvider, error) { + utilProvider, err := provider.New(*k.HostConfig, namespace, name) + if err != nil { + return nil, nil, fmt.Errorf("unable to make nodeutil provider %w", err) + } + nodeProvider := provider.Node{} + + provider.ConfigureNode(pc.Node, podIP, k.Port) + return utilProvider, &nodeProvider, nil + } +} + +func (k *Kubelet) nodeOpts(srvPort, namespace, name, podIP string) nodeutil.NodeOpt { + return func(c *nodeutil.NodeConfig) error { + c.HTTPListenAddr = fmt.Sprintf(":%s", srvPort) + // set up the routes + mux := http.NewServeMux() + err := nodeutil.AttachProviderRoutes(mux)(c) + if err != nil { + return fmt.Errorf("unable to attach routes: %w", err) + } + c.Handler = mux + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + tlsConfig, err := loadTLSConfig(ctx, k.HostClient, name, namespace, k.Name, podIP) + if err != nil { + return fmt.Errorf("unable to get tls config: %w", err) + } + c.TLSConfig = tlsConfig + return nil + } +} + +func virtRestConfig(ctx context.Context, virtualConfigPath string, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace string) (*rest.Config, error) { + if virtualConfigPath != "" { + return clientcmd.BuildConfigFromFlags("", virtualConfigPath) + } + // virtual kubeconfig file is empty, trying to fetch the k3k cluster kubeconfig + var cluster v1alpha1.Cluster + if err := hostClient.Get(ctx, types.NamespacedName{Namespace: clusterNamespace, Name: clusterName}, &cluster); err != nil { + return nil, err + } + endpoint := fmt.Sprintf("%s.%s", util.ServerSvcName(&cluster), util.ClusterNamespace(&cluster)) + b, err := bootstrap.DecodedBootstrap(cluster.Spec.Token, endpoint) + if err != nil { + return nil, fmt.Errorf("unable to decode bootstrap: %w", err) + } + adminCert, adminKey, err := kubeconfig.CreateClientCertKey( + util.AdminCommonName, []string{user.SystemPrivilegedGroup}, + nil, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, time.Hour*24*time.Duration(356), + b.ClientCA.Content, + b.ClientCAKey.Content) + if err != nil { + return nil, err + } + + url := fmt.Sprintf("https://%s:%d", util.ServerSvcName(&cluster), util.ServerPort) + kubeconfigData, err := kubeconfigBytes(url, []byte(b.ServerCA.Content), adminCert, adminKey) + if err != nil { + return nil, err + } + return clientcmd.RESTConfigFromKubeConfig(kubeconfigData) +} + +func kubeconfigBytes(url string, serverCA, clientCert, clientKey []byte) ([]byte, error) { + config := clientcmdapi.NewConfig() + + cluster := clientcmdapi.NewCluster() + cluster.CertificateAuthorityData = serverCA + cluster.Server = url + + authInfo := clientcmdapi.NewAuthInfo() + authInfo.ClientCertificateData = clientCert + authInfo.ClientKeyData = clientKey + + context := clientcmdapi.NewContext() + context.AuthInfo = "default" + context.Cluster = "default" + + config.Clusters["default"] = cluster + config.AuthInfos["default"] = authInfo + config.Contexts["default"] = context + config.CurrentContext = "default" + + kubeconfig, err := clientcmd.Write(*config) + if err != nil { + return nil, err + } + + return kubeconfig, nil +} + +func loadTLSConfig(ctx context.Context, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, nodeName, ipStr string) (*tls.Config, error) { + var cluster v1alpha1.Cluster + if err := hostClient.Get(ctx, types.NamespacedName{Name: clusterName, Namespace: clusterNamespace}, &cluster); err != nil { + return nil, err + } + + endpoint := fmt.Sprintf("%s.%s", util.ServerSvcName(&cluster), util.ClusterNamespace(&cluster)) + b, err := bootstrap.DecodedBootstrap(cluster.Spec.Token, endpoint) + if err != nil { + return nil, fmt.Errorf("unable to decode bootstrap: %w", err) + } + altNames := certutil.AltNames{ + IPs: []net.IP{net.ParseIP(ipStr)}, + } + cert, key, err := kubeconfig.CreateClientCertKey(nodeName, nil, &altNames, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, 0, b.ServerCA.Content, b.ServerCAKey.Content) + if err != nil { + return nil, fmt.Errorf("unable to get cert and key: %w", err) + } + clientCert, err := tls.X509KeyPair(cert, key) + if err != nil { + return nil, fmt.Errorf("unable to get key pair: %w", err) + } + // create rootCA CertPool + certs, err := certutil.ParseCertsPEM([]byte(b.ServerCA.Content)) + if err != nil { + return nil, fmt.Errorf("unable to create certs: %w", err) + } + pool := x509.NewCertPool() + pool.AddCert(certs[0]) + + return &tls.Config{ + RootCAs: pool, + Certificates: []tls.Certificate{clientCert}, + }, nil +} + +type LogWrapper struct { + zap.SugaredLogger +} + +func (l *LogWrapper) WithError(err error) log.Logger { + return l +} + +func (l *LogWrapper) WithField(string, interface{}) log.Logger { + return l +} + +func (l *LogWrapper) WithFields(field log.Fields) log.Logger { + return l +} From d58829c9c7a0dbdaa5035d49eb282761c3a237bd Mon Sep 17 00:00:00 2001 From: galal-hussein Date: Tue, 15 Oct 2024 23:53:07 +0300 Subject: [PATCH 02/15] Add k3k-kubelet image to the release workflow Signed-off-by: galal-hussein --- k3k-kubelet/main.go | 103 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 103 insertions(+) create mode 100644 k3k-kubelet/main.go diff --git a/k3k-kubelet/main.go b/k3k-kubelet/main.go new file mode 100644 index 0000000..b48c71d --- /dev/null +++ b/k3k-kubelet/main.go @@ -0,0 +1,103 @@ +package main + +import ( + "context" + "fmt" + "os" + + "github.com/rancher/k3k/k3k-kubelet/pkg/config" + "github.com/rancher/k3k/k3k-kubelet/pkg/kubelet" + "github.com/sirupsen/logrus" + "github.com/urfave/cli" +) + +var ( + configFile string + c config.Type +) + +func main() { + app := cli.NewApp() + app.Name = "k3k-kubelet" + app.Usage = "virtual kubelet implementation k3k" + app.Flags = []cli.Flag{ + cli.StringFlag{ + Name: "cluster-name", + Usage: "Name of the k3k cluster", + Destination: &c.ClusterName, + EnvVar: "CLUSTER_NAME", + }, + cli.StringFlag{ + Name: "cluster-namespace", + Usage: "Namespace of the k3k cluster", + Destination: &c.ClusterNamespace, + EnvVar: "CLUSTER_NAMESPACE", + }, + cli.StringFlag{ + Name: "cluster-token", + Usage: "K3S token of the k3k cluster", + Destination: &c.Token, + EnvVar: "CLUSTER_Token", + }, + cli.StringFlag{ + Name: "host-config-path", + Usage: "Path to the host kubeconfig, if empty then virtual-kubelet will use incluster config", + Destination: &c.HostConfigPath, + EnvVar: "HOST_KUBECONFIG", + }, + cli.StringFlag{ + Name: "virtual-config-path", + Usage: "Path to the k3k cluster kubeconfig, if empty then virtual-kubelet will create its own config from k3k cluster", + Destination: &c.VirtualConfigPath, + EnvVar: "CLUSTER_NAME", + }, + cli.StringFlag{ + Name: "kubelet-port", + Usage: "kubelet API port number", + Destination: &c.KubeletPort, + EnvVar: "SERVER_PORT", + Value: "9443", + }, + cli.StringFlag{ + Name: "agent-pod-ip", + Usage: "Agent Pod IP used for TLS SAN for the kubelet server", + Destination: &c.AgentPodIP, + EnvVar: "AGENT_POD_IP", + }, + cli.StringFlag{ + Name: "config", + Usage: "Path to k3k-kubelet config file", + Destination: &configFile, + EnvVar: "CONFIG_FILE", + Value: "/etc/rancher/k3k/config.yaml", + }, + } + app.Action = Run + if err := app.Run(os.Args); err != nil { + logrus.Fatal(err) + } +} + +func Run(clx *cli.Context) { + if err := c.Parse(configFile); err != nil { + fmt.Printf("failed to parse config file %s: %v", configFile, err) + os.Exit(-1) + } + + if err := c.Validate(); err != nil { + fmt.Printf("failed to validate config: %v", err) + os.Exit(-1) + } + k, err := kubelet.New(&c) + if err != nil { + fmt.Printf("failed to create new virtual kubelet instance: %v", err) + os.Exit(-1) + } + + if err := k.RegisterNode(c.KubeletPort, c.ClusterNamespace, c.ClusterName, c.AgentPodIP); err != nil { + fmt.Printf("failed to register new node: %v", err) + os.Exit(-1) + } + + k.Start(context.Background()) +} From 1dccdb4bf45f04b592f981c669483a2d389300a2 Mon Sep 17 00:00:00 2001 From: galal-hussein Date: Tue, 15 Oct 2024 23:53:07 +0300 Subject: [PATCH 03/15] Add k3k-kubelet image to the release workflow Signed-off-by: galal-hussein --- .github/workflows/release.yml | 11 ++++++- {virtual-kubelet => k3k-kubelet}/README.md | 0 .../pkg/config/config.go | 0 .../pkg/kubelet/kubelet.go | 32 +++++++++++++------ .../pkg/provider/configure.go | 0 .../pkg/provider/node.go | 0 .../pkg/provider/provider.go | 0 .../pkg/provider/util.go | 0 pkg/controller/cluster/agent/shared.go | 2 +- virtual-kubelet/main.go | 4 +-- 10 files changed, 36 insertions(+), 13 deletions(-) rename {virtual-kubelet => k3k-kubelet}/README.md (100%) rename {virtual-kubelet => k3k-kubelet}/pkg/config/config.go (100%) rename {virtual-kubelet => k3k-kubelet}/pkg/kubelet/kubelet.go (91%) rename {virtual-kubelet => k3k-kubelet}/pkg/provider/configure.go (100%) rename {virtual-kubelet => k3k-kubelet}/pkg/provider/node.go (100%) rename {virtual-kubelet => k3k-kubelet}/pkg/provider/provider.go (100%) rename {virtual-kubelet => k3k-kubelet}/pkg/provider/util.go (100%) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 44c8c0e..ca3b8e0 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -40,7 +40,7 @@ jobs: username: ${{ env.DOCKER_USERNAME }} password: ${{ env.DOCKER_PASSWORD }} - - name: Build container image + - name: Build controller image uses: docker/build-push-action@v5 with: context: . @@ -48,6 +48,15 @@ jobs: tags: rancher/k3k:${{ github.ref_name }} file: package/Dockerfile platforms: linux/amd64 + + - name: Build Virtual Kubelet image + uses: docker/build-push-action@v5 + with: + context: . + push: true + tags: rancher/k3k:k3k-kubelet + file: package/Dockerfile + platforms: linux/amd64 \ No newline at end of file diff --git a/virtual-kubelet/README.md b/k3k-kubelet/README.md similarity index 100% rename from virtual-kubelet/README.md rename to k3k-kubelet/README.md diff --git a/virtual-kubelet/pkg/config/config.go b/k3k-kubelet/pkg/config/config.go similarity index 100% rename from virtual-kubelet/pkg/config/config.go rename to k3k-kubelet/pkg/config/config.go diff --git a/virtual-kubelet/pkg/kubelet/kubelet.go b/k3k-kubelet/pkg/kubelet/kubelet.go similarity index 91% rename from virtual-kubelet/pkg/kubelet/kubelet.go rename to k3k-kubelet/pkg/kubelet/kubelet.go index f4d505f..82684a3 100644 --- a/virtual-kubelet/pkg/kubelet/kubelet.go +++ b/k3k-kubelet/pkg/kubelet/kubelet.go @@ -11,12 +11,12 @@ import ( "time" certutil "github.com/rancher/dynamiclistener/cert" + "github.com/rancher/k3k/k3k-kubelet/pkg/config" + "github.com/rancher/k3k/k3k-kubelet/pkg/provider" "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" "github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap" "github.com/rancher/k3k/pkg/controller/kubeconfig" "github.com/rancher/k3k/pkg/controller/util" - "github.com/rancher/k3k/virtual-kubelet/pkg/config" - "github.com/rancher/k3k/virtual-kubelet/pkg/provider" "github.com/virtual-kubelet/virtual-kubelet/log" "github.com/virtual-kubelet/virtual-kubelet/node" "github.com/virtual-kubelet/virtual-kubelet/node/nodeutil" @@ -30,6 +30,7 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + "k8s.io/client-go/util/retry" ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -37,7 +38,7 @@ var ( Scheme = runtime.NewScheme() backoff = wait.Backoff{ Steps: 5, - Duration: 20 * time.Second, + Duration: 5 * time.Second, Factor: 2, Jitter: 0.1, } @@ -176,8 +177,14 @@ func virtRestConfig(ctx context.Context, virtualConfigPath string, hostClient ct return nil, err } endpoint := fmt.Sprintf("%s.%s", util.ServerSvcName(&cluster), util.ClusterNamespace(&cluster)) - b, err := bootstrap.DecodedBootstrap(cluster.Spec.Token, endpoint) - if err != nil { + var b *bootstrap.ControlRuntimeBootstrap + if err := retry.OnError(backoff, func(err error) bool { + return err == nil + }, func() error { + var err error + b, err = bootstrap.DecodedBootstrap(cluster.Spec.Token, endpoint) + return err + }); err != nil { return nil, fmt.Errorf("unable to decode bootstrap: %w", err) } adminCert, adminKey, err := kubeconfig.CreateClientCertKey( @@ -226,14 +233,21 @@ func kubeconfigBytes(url string, serverCA, clientCert, clientKey []byte) ([]byte } func loadTLSConfig(ctx context.Context, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, nodeName, ipStr string) (*tls.Config, error) { - var cluster v1alpha1.Cluster + var ( + cluster v1alpha1.Cluster + b *bootstrap.ControlRuntimeBootstrap + ) if err := hostClient.Get(ctx, types.NamespacedName{Name: clusterName, Namespace: clusterNamespace}, &cluster); err != nil { return nil, err } - endpoint := fmt.Sprintf("%s.%s", util.ServerSvcName(&cluster), util.ClusterNamespace(&cluster)) - b, err := bootstrap.DecodedBootstrap(cluster.Spec.Token, endpoint) - if err != nil { + if err := retry.OnError(backoff, func(err error) bool { + return err != nil + }, func() error { + var err error + b, err = bootstrap.DecodedBootstrap(cluster.Spec.Token, endpoint) + return err + }); err != nil { return nil, fmt.Errorf("unable to decode bootstrap: %w", err) } altNames := certutil.AltNames{ diff --git a/virtual-kubelet/pkg/provider/configure.go b/k3k-kubelet/pkg/provider/configure.go similarity index 100% rename from virtual-kubelet/pkg/provider/configure.go rename to k3k-kubelet/pkg/provider/configure.go diff --git a/virtual-kubelet/pkg/provider/node.go b/k3k-kubelet/pkg/provider/node.go similarity index 100% rename from virtual-kubelet/pkg/provider/node.go rename to k3k-kubelet/pkg/provider/node.go diff --git a/virtual-kubelet/pkg/provider/provider.go b/k3k-kubelet/pkg/provider/provider.go similarity index 100% rename from virtual-kubelet/pkg/provider/provider.go rename to k3k-kubelet/pkg/provider/provider.go diff --git a/virtual-kubelet/pkg/provider/util.go b/k3k-kubelet/pkg/provider/util.go similarity index 100% rename from virtual-kubelet/pkg/provider/util.go rename to k3k-kubelet/pkg/provider/util.go diff --git a/pkg/controller/cluster/agent/shared.go b/pkg/controller/cluster/agent/shared.go index b2b5412..d693b05 100644 --- a/pkg/controller/cluster/agent/shared.go +++ b/pkg/controller/cluster/agent/shared.go @@ -13,7 +13,7 @@ import ( ) const ( - virtualKubeletImage = "husseingalal/k3k:k3k-kubelet" + virtualKubeletImage = "rancher/k3k:k3k-kubelet" ) type SharedAgent struct { diff --git a/virtual-kubelet/main.go b/virtual-kubelet/main.go index 2e214e0..b48c71d 100644 --- a/virtual-kubelet/main.go +++ b/virtual-kubelet/main.go @@ -5,8 +5,8 @@ import ( "fmt" "os" - "github.com/rancher/k3k/virtual-kubelet/pkg/config" - "github.com/rancher/k3k/virtual-kubelet/pkg/kubelet" + "github.com/rancher/k3k/k3k-kubelet/pkg/config" + "github.com/rancher/k3k/k3k-kubelet/pkg/kubelet" "github.com/sirupsen/logrus" "github.com/urfave/cli" ) From 0f82e3cddf6c531312fa8096e193be39009998a5 Mon Sep 17 00:00:00 2001 From: galal-hussein Date: Wed, 16 Oct 2024 00:19:50 +0300 Subject: [PATCH 04/15] Fix build/release workflow Signed-off-by: galal-hussein --- .github/workflows/release.yml | 2 +- ops/build | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index ca3b8e0..f36d59a 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -55,7 +55,7 @@ jobs: context: . push: true tags: rancher/k3k:k3k-kubelet - file: package/Dockerfile + file: package/Dockerfile.kubelet platforms: linux/amd64 diff --git a/ops/build b/ops/build index 1a9d9ca..7bbf7cc 100755 --- a/ops/build +++ b/ops/build @@ -24,7 +24,7 @@ if [ "$CROSS" = "true" ] && [ "$ARCH" = "amd64" ]; then fi # build k3k-kubelet -CGO_ENABLED=0 go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3k-kubelet ./virtual-kubelet +CGO_ENABLED=0 go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3k-kubelet ./k3k-kubelet if [ "$CROSS" = "true" ] && [ "$ARCH" = "amd64" ]; then CGO_ENABLED=0 GOOS=linux GOARCH=s390x go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3k-kubelet-s390x CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3k-kubelet-arm64 From be255f7d46119b1cff1f9295bcbdce7f9b2e1d68 Mon Sep 17 00:00:00 2001 From: galal-hussein Date: Wed, 16 Oct 2024 01:13:02 +0300 Subject: [PATCH 05/15] Remove pkg directory in k3k-kubelet Signed-off-by: galal-hussein --- k3k-kubelet/{pkg => }/config/config.go | 0 k3k-kubelet/{pkg => }/kubelet/kubelet.go | 4 +- k3k-kubelet/main.go | 4 +- k3k-kubelet/{pkg => }/provider/configure.go | 0 k3k-kubelet/{pkg => }/provider/node.go | 0 k3k-kubelet/{pkg => }/provider/provider.go | 0 k3k-kubelet/{pkg => }/provider/util.go | 0 virtual-kubelet/main.go | 103 -------------------- 8 files changed, 4 insertions(+), 107 deletions(-) rename k3k-kubelet/{pkg => }/config/config.go (100%) rename k3k-kubelet/{pkg => }/kubelet/kubelet.go (98%) rename k3k-kubelet/{pkg => }/provider/configure.go (100%) rename k3k-kubelet/{pkg => }/provider/node.go (100%) rename k3k-kubelet/{pkg => }/provider/provider.go (100%) rename k3k-kubelet/{pkg => }/provider/util.go (100%) delete mode 100644 virtual-kubelet/main.go diff --git a/k3k-kubelet/pkg/config/config.go b/k3k-kubelet/config/config.go similarity index 100% rename from k3k-kubelet/pkg/config/config.go rename to k3k-kubelet/config/config.go diff --git a/k3k-kubelet/pkg/kubelet/kubelet.go b/k3k-kubelet/kubelet/kubelet.go similarity index 98% rename from k3k-kubelet/pkg/kubelet/kubelet.go rename to k3k-kubelet/kubelet/kubelet.go index 82684a3..ee54ec9 100644 --- a/k3k-kubelet/pkg/kubelet/kubelet.go +++ b/k3k-kubelet/kubelet/kubelet.go @@ -11,8 +11,8 @@ import ( "time" certutil "github.com/rancher/dynamiclistener/cert" - "github.com/rancher/k3k/k3k-kubelet/pkg/config" - "github.com/rancher/k3k/k3k-kubelet/pkg/provider" + "github.com/rancher/k3k/k3k-kubelet/config" + "github.com/rancher/k3k/k3k-kubelet/provider" "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" "github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap" "github.com/rancher/k3k/pkg/controller/kubeconfig" diff --git a/k3k-kubelet/main.go b/k3k-kubelet/main.go index b48c71d..302529f 100644 --- a/k3k-kubelet/main.go +++ b/k3k-kubelet/main.go @@ -5,8 +5,8 @@ import ( "fmt" "os" - "github.com/rancher/k3k/k3k-kubelet/pkg/config" - "github.com/rancher/k3k/k3k-kubelet/pkg/kubelet" + "github.com/rancher/k3k/k3k-kubelet/config" + "github.com/rancher/k3k/k3k-kubelet/kubelet" "github.com/sirupsen/logrus" "github.com/urfave/cli" ) diff --git a/k3k-kubelet/pkg/provider/configure.go b/k3k-kubelet/provider/configure.go similarity index 100% rename from k3k-kubelet/pkg/provider/configure.go rename to k3k-kubelet/provider/configure.go diff --git a/k3k-kubelet/pkg/provider/node.go b/k3k-kubelet/provider/node.go similarity index 100% rename from k3k-kubelet/pkg/provider/node.go rename to k3k-kubelet/provider/node.go diff --git a/k3k-kubelet/pkg/provider/provider.go b/k3k-kubelet/provider/provider.go similarity index 100% rename from k3k-kubelet/pkg/provider/provider.go rename to k3k-kubelet/provider/provider.go diff --git a/k3k-kubelet/pkg/provider/util.go b/k3k-kubelet/provider/util.go similarity index 100% rename from k3k-kubelet/pkg/provider/util.go rename to k3k-kubelet/provider/util.go diff --git a/virtual-kubelet/main.go b/virtual-kubelet/main.go deleted file mode 100644 index b48c71d..0000000 --- a/virtual-kubelet/main.go +++ /dev/null @@ -1,103 +0,0 @@ -package main - -import ( - "context" - "fmt" - "os" - - "github.com/rancher/k3k/k3k-kubelet/pkg/config" - "github.com/rancher/k3k/k3k-kubelet/pkg/kubelet" - "github.com/sirupsen/logrus" - "github.com/urfave/cli" -) - -var ( - configFile string - c config.Type -) - -func main() { - app := cli.NewApp() - app.Name = "k3k-kubelet" - app.Usage = "virtual kubelet implementation k3k" - app.Flags = []cli.Flag{ - cli.StringFlag{ - Name: "cluster-name", - Usage: "Name of the k3k cluster", - Destination: &c.ClusterName, - EnvVar: "CLUSTER_NAME", - }, - cli.StringFlag{ - Name: "cluster-namespace", - Usage: "Namespace of the k3k cluster", - Destination: &c.ClusterNamespace, - EnvVar: "CLUSTER_NAMESPACE", - }, - cli.StringFlag{ - Name: "cluster-token", - Usage: "K3S token of the k3k cluster", - Destination: &c.Token, - EnvVar: "CLUSTER_Token", - }, - cli.StringFlag{ - Name: "host-config-path", - Usage: "Path to the host kubeconfig, if empty then virtual-kubelet will use incluster config", - Destination: &c.HostConfigPath, - EnvVar: "HOST_KUBECONFIG", - }, - cli.StringFlag{ - Name: "virtual-config-path", - Usage: "Path to the k3k cluster kubeconfig, if empty then virtual-kubelet will create its own config from k3k cluster", - Destination: &c.VirtualConfigPath, - EnvVar: "CLUSTER_NAME", - }, - cli.StringFlag{ - Name: "kubelet-port", - Usage: "kubelet API port number", - Destination: &c.KubeletPort, - EnvVar: "SERVER_PORT", - Value: "9443", - }, - cli.StringFlag{ - Name: "agent-pod-ip", - Usage: "Agent Pod IP used for TLS SAN for the kubelet server", - Destination: &c.AgentPodIP, - EnvVar: "AGENT_POD_IP", - }, - cli.StringFlag{ - Name: "config", - Usage: "Path to k3k-kubelet config file", - Destination: &configFile, - EnvVar: "CONFIG_FILE", - Value: "/etc/rancher/k3k/config.yaml", - }, - } - app.Action = Run - if err := app.Run(os.Args); err != nil { - logrus.Fatal(err) - } -} - -func Run(clx *cli.Context) { - if err := c.Parse(configFile); err != nil { - fmt.Printf("failed to parse config file %s: %v", configFile, err) - os.Exit(-1) - } - - if err := c.Validate(); err != nil { - fmt.Printf("failed to validate config: %v", err) - os.Exit(-1) - } - k, err := kubelet.New(&c) - if err != nil { - fmt.Printf("failed to create new virtual kubelet instance: %v", err) - os.Exit(-1) - } - - if err := k.RegisterNode(c.KubeletPort, c.ClusterNamespace, c.ClusterName, c.AgentPodIP); err != nil { - fmt.Printf("failed to register new node: %v", err) - os.Exit(-1) - } - - k.Start(context.Background()) -} From 6b02fd9634b1a0d7d2d23f24e71b7c6c862371a3 Mon Sep 17 00:00:00 2001 From: galal-hussein Date: Wed, 16 Oct 2024 01:14:05 +0300 Subject: [PATCH 06/15] rename Type to Config Signed-off-by: galal-hussein --- k3k-kubelet/config/config.go | 10 +++++----- k3k-kubelet/kubelet/kubelet.go | 2 +- k3k-kubelet/main.go | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/k3k-kubelet/config/config.go b/k3k-kubelet/config/config.go index 9134d9a..c5e200c 100644 --- a/k3k-kubelet/config/config.go +++ b/k3k-kubelet/config/config.go @@ -8,7 +8,7 @@ import ( ) // Config has all virtual-kubelet startup options -type Type struct { +type Config struct { ClusterName string `yaml:"clusterName"` ClusterNamespace string `yaml:"clusterNamespace"` HostConfigPath string `yaml:"hostConfigPath"` @@ -19,8 +19,8 @@ type Type struct { Token string `yaml:"token"` } -func (t *Type) UnmarshalYAML(data []byte) error { - var c Type +func (t *Config) UnmarshalYAML(data []byte) error { + var c Config if err := yaml.Unmarshal(data, &c); err != nil { return err } @@ -46,7 +46,7 @@ func (t *Type) UnmarshalYAML(data []byte) error { return nil } -func (t *Type) Validate() error { +func (t *Config) Validate() error { if t.ClusterName == "" { return errors.New("cluster name is not provided") } @@ -59,7 +59,7 @@ func (t *Type) Validate() error { return nil } -func (t *Type) Parse(path string) error { +func (t *Config) Parse(path string) error { if _, err := os.Stat(path); os.IsNotExist(err) { return nil } diff --git a/k3k-kubelet/kubelet/kubelet.go b/k3k-kubelet/kubelet/kubelet.go index ee54ec9..80ebf95 100644 --- a/k3k-kubelet/kubelet/kubelet.go +++ b/k3k-kubelet/kubelet/kubelet.go @@ -60,7 +60,7 @@ type Kubelet struct { Node *nodeutil.Node } -func New(c *config.Type) (*Kubelet, error) { +func New(c *config.Config) (*Kubelet, error) { hostConfig, err := clientcmd.BuildConfigFromFlags("", c.HostConfigPath) if err != nil { return nil, err diff --git a/k3k-kubelet/main.go b/k3k-kubelet/main.go index 302529f..dd88a31 100644 --- a/k3k-kubelet/main.go +++ b/k3k-kubelet/main.go @@ -13,7 +13,7 @@ import ( var ( configFile string - c config.Type + c config.Config ) func main() { From 4035566f1ca797822af04c310b6481c7a93ea55c Mon Sep 17 00:00:00 2001 From: galal-hussein Date: Wed, 16 Oct 2024 01:35:13 +0300 Subject: [PATCH 07/15] Move the kubelet and config outside of pkg Signed-off-by: galal-hussein --- k3k-kubelet/config.go | 72 ++++++++++++++++++++++++++++ k3k-kubelet/config/config.go | 72 ---------------------------- k3k-kubelet/{kubelet => }/kubelet.go | 59 +++++++++++------------ k3k-kubelet/main.go | 22 ++++----- 4 files changed, 110 insertions(+), 115 deletions(-) create mode 100644 k3k-kubelet/config.go delete mode 100644 k3k-kubelet/config/config.go rename k3k-kubelet/{kubelet => }/kubelet.go (84%) diff --git a/k3k-kubelet/config.go b/k3k-kubelet/config.go new file mode 100644 index 0000000..e75ae87 --- /dev/null +++ b/k3k-kubelet/config.go @@ -0,0 +1,72 @@ +package main + +import ( + "errors" + "os" + + "gopkg.in/yaml.v2" +) + +// Config has all virtual-kubelet startup options +type config struct { + clusterName string `yaml:"clusterName"` + clusterNamespace string `yaml:"clusterNamespace"` + hostConfigPath string `yaml:"hostConfigPath"` + virtualConfigPath string `yaml:"virtualConfigPath"` + kubeletPort string `yaml:"kubeletPort"` + nodeName string `yaml:"nodeName"` + agentPodIP string `yaml:"agentPodIP"` + token string `yaml:"token"` +} + +func (t *config) UnmarshalYAML(data []byte) error { + var c config + if err := yaml.Unmarshal(data, &c); err != nil { + return err + } + if t.clusterName == "" { + t.clusterName = c.clusterName + } + if t.clusterNamespace == "" { + t.clusterNamespace = c.clusterNamespace + } + if t.hostConfigPath == "" { + t.hostConfigPath = c.hostConfigPath + } + if t.virtualConfigPath == "" { + t.virtualConfigPath = c.virtualConfigPath + } + if t.kubeletPort == "" { + t.kubeletPort = c.kubeletPort + } + if t.nodeName == "" { + t.nodeName = c.nodeName + } + + return nil +} + +func (t *config) Validate() error { + if t.clusterName == "" { + return errors.New("cluster name is not provided") + } + if t.clusterNamespace == "" { + return errors.New("cluster namespace is not provided") + } + if t.agentPodIP == "" { + return errors.New("agent POD IP is not provided") + } + return nil +} + +func (t *config) Parse(path string) error { + if _, err := os.Stat(path); os.IsNotExist(err) { + return nil + } + + configFileBytes, err := os.ReadFile(path) + if err != nil { + return err + } + return t.UnmarshalYAML(configFileBytes) +} diff --git a/k3k-kubelet/config/config.go b/k3k-kubelet/config/config.go deleted file mode 100644 index c5e200c..0000000 --- a/k3k-kubelet/config/config.go +++ /dev/null @@ -1,72 +0,0 @@ -package config - -import ( - "errors" - "os" - - "gopkg.in/yaml.v2" -) - -// Config has all virtual-kubelet startup options -type Config struct { - ClusterName string `yaml:"clusterName"` - ClusterNamespace string `yaml:"clusterNamespace"` - HostConfigPath string `yaml:"hostConfigPath"` - VirtualConfigPath string `yaml:"virtualConfigPath"` - KubeletPort string `yaml:"kubeletPort"` - NodeName string `yaml:"nodeName"` - AgentPodIP string `yaml:"agentPodIP"` - Token string `yaml:"token"` -} - -func (t *Config) UnmarshalYAML(data []byte) error { - var c Config - if err := yaml.Unmarshal(data, &c); err != nil { - return err - } - if t.ClusterName == "" { - t.ClusterName = c.ClusterName - } - if t.ClusterNamespace == "" { - t.ClusterNamespace = c.ClusterNamespace - } - if t.HostConfigPath == "" { - t.HostConfigPath = c.HostConfigPath - } - if t.VirtualConfigPath == "" { - t.VirtualConfigPath = c.VirtualConfigPath - } - if t.KubeletPort == "" { - t.KubeletPort = c.KubeletPort - } - if t.NodeName == "" { - t.NodeName = c.NodeName - } - - return nil -} - -func (t *Config) Validate() error { - if t.ClusterName == "" { - return errors.New("cluster name is not provided") - } - if t.ClusterNamespace == "" { - return errors.New("cluster namespace is not provided") - } - if t.AgentPodIP == "" { - return errors.New("agent POD IP is not provided") - } - return nil -} - -func (t *Config) Parse(path string) error { - if _, err := os.Stat(path); os.IsNotExist(err) { - return nil - } - - configFileBytes, err := os.ReadFile(path) - if err != nil { - return err - } - return t.UnmarshalYAML(configFileBytes) -} diff --git a/k3k-kubelet/kubelet/kubelet.go b/k3k-kubelet/kubelet.go similarity index 84% rename from k3k-kubelet/kubelet/kubelet.go rename to k3k-kubelet/kubelet.go index 80ebf95..98866c5 100644 --- a/k3k-kubelet/kubelet/kubelet.go +++ b/k3k-kubelet/kubelet.go @@ -1,4 +1,4 @@ -package kubelet +package main import ( "context" @@ -11,7 +11,6 @@ import ( "time" certutil "github.com/rancher/dynamiclistener/cert" - "github.com/rancher/k3k/k3k-kubelet/config" "github.com/rancher/k3k/k3k-kubelet/provider" "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" "github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap" @@ -49,19 +48,17 @@ func init() { _ = v1alpha1.AddToScheme(Scheme) } -type Kubelet struct { - Name string - ServerName string - Port int - TLSConfig *tls.Config - HostConfig *rest.Config - HostClient ctrlruntimeclient.Client - VirtClient kubernetes.Interface - Node *nodeutil.Node +type kubelet struct { + name string + port int + hostConfig *rest.Config + hostClient ctrlruntimeclient.Client + virtClient kubernetes.Interface + node *nodeutil.Node } -func New(c *config.Config) (*Kubelet, error) { - hostConfig, err := clientcmd.BuildConfigFromFlags("", c.HostConfigPath) +func newKubelet(c *config) (*kubelet, error) { + hostConfig, err := clientcmd.BuildConfigFromFlags("", c.hostConfigPath) if err != nil { return nil, err } @@ -73,7 +70,7 @@ func New(c *config.Config) (*Kubelet, error) { return nil, err } - virtConfig, err := virtRestConfig(context.Background(), c.VirtualConfigPath, hostClient, c.ClusterName, c.ClusterNamespace) + virtConfig, err := virtRestConfig(context.Background(), c.virtualConfigPath, hostClient, c.clusterName, c.clusterNamespace) if err != nil { return nil, err } @@ -82,27 +79,27 @@ func New(c *config.Config) (*Kubelet, error) { if err != nil { return nil, err } - return &Kubelet{ - Name: c.NodeName, - HostConfig: hostConfig, - HostClient: hostClient, - VirtClient: virtClient, + return &kubelet{ + name: c.nodeName, + hostConfig: hostConfig, + hostClient: hostClient, + virtClient: virtClient, }, nil } -func (k *Kubelet) RegisterNode(srvPort, namespace, name, podIP string) error { +func (k *kubelet) RegisterNode(srvPort, namespace, name, podIP string) error { providerFunc := k.newProviderFunc(namespace, name, podIP) nodeOpts := k.nodeOpts(srvPort, namespace, name, podIP) var err error - k.Node, err = nodeutil.NewNode(k.Name, providerFunc, nodeutil.WithClient(k.VirtClient), nodeOpts) + k.node, err = nodeutil.NewNode(k.name, providerFunc, nodeutil.WithClient(k.virtClient), nodeOpts) if err != nil { return fmt.Errorf("unable to start kubelet: %v", err) } return nil } -func (k *Kubelet) Start(ctx context.Context) { +func (k *kubelet) Start(ctx context.Context) { go func() { ctx := context.Background() logger, err := zap.NewProduction() @@ -114,38 +111,38 @@ func (k *Kubelet) Start(ctx context.Context) { *logger.Sugar(), } ctx = log.WithLogger(ctx, &wrapped) - err = k.Node.Run(ctx) + err = k.node.Run(ctx) if err != nil { fmt.Printf("node errored when running: %s \n", err.Error()) os.Exit(-1) } }() - if err := k.Node.WaitReady(context.Background(), time.Minute*1); err != nil { + if err := k.node.WaitReady(context.Background(), time.Minute*1); err != nil { fmt.Printf("node was not ready within timeout of 1 minute: %s \n", err.Error()) os.Exit(-1) } - <-k.Node.Done() - if err := k.Node.Err(); err != nil { + <-k.node.Done() + if err := k.node.Err(); err != nil { fmt.Printf("node stopped with an error: %s \n", err.Error()) os.Exit(-1) } fmt.Printf("node exited without an error") } -func (k *Kubelet) newProviderFunc(namespace, name, podIP string) nodeutil.NewProviderFunc { +func (k *kubelet) newProviderFunc(namespace, name, podIP string) nodeutil.NewProviderFunc { return func(pc nodeutil.ProviderConfig) (nodeutil.Provider, node.NodeProvider, error) { - utilProvider, err := provider.New(*k.HostConfig, namespace, name) + utilProvider, err := provider.New(*k.hostConfig, namespace, name) if err != nil { return nil, nil, fmt.Errorf("unable to make nodeutil provider %w", err) } nodeProvider := provider.Node{} - provider.ConfigureNode(pc.Node, podIP, k.Port) + provider.ConfigureNode(pc.Node, podIP, k.port) return utilProvider, &nodeProvider, nil } } -func (k *Kubelet) nodeOpts(srvPort, namespace, name, podIP string) nodeutil.NodeOpt { +func (k *kubelet) nodeOpts(srvPort, namespace, name, podIP string) nodeutil.NodeOpt { return func(c *nodeutil.NodeConfig) error { c.HTTPListenAddr = fmt.Sprintf(":%s", srvPort) // set up the routes @@ -158,7 +155,7 @@ func (k *Kubelet) nodeOpts(srvPort, namespace, name, podIP string) nodeutil.Node ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) defer cancel() - tlsConfig, err := loadTLSConfig(ctx, k.HostClient, name, namespace, k.Name, podIP) + tlsConfig, err := loadTLSConfig(ctx, k.hostClient, name, namespace, k.name, podIP) if err != nil { return fmt.Errorf("unable to get tls config: %w", err) } diff --git a/k3k-kubelet/main.go b/k3k-kubelet/main.go index dd88a31..9138819 100644 --- a/k3k-kubelet/main.go +++ b/k3k-kubelet/main.go @@ -5,15 +5,13 @@ import ( "fmt" "os" - "github.com/rancher/k3k/k3k-kubelet/config" - "github.com/rancher/k3k/k3k-kubelet/kubelet" "github.com/sirupsen/logrus" "github.com/urfave/cli" ) var ( configFile string - c config.Config + c config ) func main() { @@ -24,44 +22,44 @@ func main() { cli.StringFlag{ Name: "cluster-name", Usage: "Name of the k3k cluster", - Destination: &c.ClusterName, + Destination: &c.clusterName, EnvVar: "CLUSTER_NAME", }, cli.StringFlag{ Name: "cluster-namespace", Usage: "Namespace of the k3k cluster", - Destination: &c.ClusterNamespace, + Destination: &c.clusterNamespace, EnvVar: "CLUSTER_NAMESPACE", }, cli.StringFlag{ Name: "cluster-token", Usage: "K3S token of the k3k cluster", - Destination: &c.Token, + Destination: &c.token, EnvVar: "CLUSTER_Token", }, cli.StringFlag{ Name: "host-config-path", Usage: "Path to the host kubeconfig, if empty then virtual-kubelet will use incluster config", - Destination: &c.HostConfigPath, + Destination: &c.hostConfigPath, EnvVar: "HOST_KUBECONFIG", }, cli.StringFlag{ Name: "virtual-config-path", Usage: "Path to the k3k cluster kubeconfig, if empty then virtual-kubelet will create its own config from k3k cluster", - Destination: &c.VirtualConfigPath, + Destination: &c.virtualConfigPath, EnvVar: "CLUSTER_NAME", }, cli.StringFlag{ Name: "kubelet-port", Usage: "kubelet API port number", - Destination: &c.KubeletPort, + Destination: &c.kubeletPort, EnvVar: "SERVER_PORT", Value: "9443", }, cli.StringFlag{ Name: "agent-pod-ip", Usage: "Agent Pod IP used for TLS SAN for the kubelet server", - Destination: &c.AgentPodIP, + Destination: &c.agentPodIP, EnvVar: "AGENT_POD_IP", }, cli.StringFlag{ @@ -88,13 +86,13 @@ func Run(clx *cli.Context) { fmt.Printf("failed to validate config: %v", err) os.Exit(-1) } - k, err := kubelet.New(&c) + k, err := newKubelet(&c) if err != nil { fmt.Printf("failed to create new virtual kubelet instance: %v", err) os.Exit(-1) } - if err := k.RegisterNode(c.KubeletPort, c.ClusterNamespace, c.ClusterName, c.AgentPodIP); err != nil { + if err := k.RegisterNode(c.kubeletPort, c.clusterNamespace, c.clusterName, c.agentPodIP); err != nil { fmt.Printf("failed to register new node: %v", err) os.Exit(-1) } From d17ae34cf985736e34281751894f5abfafa9cfa4 Mon Sep 17 00:00:00 2001 From: galal-hussein Date: Wed, 16 Oct 2024 22:00:56 +0300 Subject: [PATCH 08/15] fix comments Signed-off-by: galal-hussein --- pkg/controller/cluster/agent/shared.go | 9 ++++----- pkg/controller/cluster/config/server.go | 5 ++--- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/pkg/controller/cluster/agent/shared.go b/pkg/controller/cluster/agent/shared.go index d693b05..39dbcb3 100644 --- a/pkg/controller/cluster/agent/shared.go +++ b/pkg/controller/cluster/agent/shared.go @@ -13,7 +13,8 @@ import ( ) const ( - virtualKubeletImage = "rancher/k3k:k3k-kubelet" + virtualKubeletImage = "rancher/k3k:k3k-kubelet" + virtualKubeletConfigPath = "/opt/rancher/k3k/config.yaml" ) type SharedAgent struct { @@ -91,9 +92,9 @@ func (s *SharedAgent) deployment() *apps.Deployment { } func (s *SharedAgent) podSpec(image, name string, affinitySelector *metav1.LabelSelector) v1.PodSpec { - args := []string{"--config", "/opt/rancher/k3k/config.yaml"} + args := []string{"--config", virtualKubeletConfigPath} var limit v1.ResourceList - podSpec := v1.PodSpec{ + return v1.PodSpec{ Affinity: &v1.Affinity{ PodAntiAffinity: &v1.PodAntiAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ @@ -149,8 +150,6 @@ func (s *SharedAgent) podSpec(image, name string, affinitySelector *metav1.Label }, }, }} - - return podSpec } func (s *SharedAgent) serviceAccount() *v1.ServiceAccount { diff --git a/pkg/controller/cluster/config/server.go b/pkg/controller/cluster/config/server.go index 9c1f1be..3d563c3 100644 --- a/pkg/controller/cluster/config/server.go +++ b/pkg/controller/cluster/config/server.go @@ -10,8 +10,7 @@ import ( ) const ( - VirtualKubeletMode = "shared" - VirtualNodeMode = "virtual" + VirtualNodeMode = "virtual" ) // Server returns the secret for the server's config. Note that this doesn't set the ownerRef on the secret @@ -77,7 +76,7 @@ func serverOptions(cluster *v1alpha1.Cluster) string { opts = opts + "- " + addr + "\n" } } - if cluster.Spec.Mode == VirtualKubeletMode { + if cluster.Spec.Mode != VirtualNodeMode { opts = opts + "disable-agent: true\negress-selector-mode: disabled\n" } // TODO: Add extra args to the options From 070389c6c76f6804ae810890b3ad79ff94047edd Mon Sep 17 00:00:00 2001 From: galal-hussein Date: Fri, 18 Oct 2024 02:16:24 +0300 Subject: [PATCH 09/15] Fix naming throughout the package Signed-off-by: galal-hussein --- .github/workflows/release.yml | 2 +- charts/k3k/templates/deployment.yaml | 4 +- charts/k3k/values.yaml | 6 + cli/cmds/cluster/create.go | 22 +--- cli/cmds/kubeconfig/kubeconfig.go | 27 ++--- go.mod | 2 +- k3k-kubelet/config.go | 58 +++++----- k3k-kubelet/kubelet.go | 57 ++++------ k3k-kubelet/main.go | 24 ++-- k3k-kubelet/provider/configure.go | 6 +- k3k-kubelet/provider/provider.go | 24 +--- main.go | 22 ++-- pkg/controller/cluster/agent/agent.go | 18 ++- pkg/controller/cluster/agent/shared.go | 100 +++++++++++------ pkg/controller/cluster/agent/virtual.go | 23 ++-- pkg/controller/cluster/cluster.go | 67 +++++------ pkg/controller/cluster/pod.go | 21 ++-- .../cluster/server/bootstrap/bootstrap.go | 8 +- .../{config/server.go => server/config.go} | 40 ++++--- pkg/controller/cluster/server/ingress.go | 19 ++-- pkg/controller/cluster/server/server.go | 52 ++++----- pkg/controller/cluster/server/service.go | 30 +++-- pkg/controller/clusterset/clusterset.go | 4 +- pkg/controller/clusterset/node.go | 4 +- pkg/controller/controller.go | 106 ++++++++++++++++++ pkg/controller/kubeconfig/kubeconfig.go | 13 ++- pkg/controller/util/util.go | 87 -------------- 27 files changed, 453 insertions(+), 393 deletions(-) rename pkg/controller/cluster/{config/server.go => server/config.go} (66%) create mode 100644 pkg/controller/controller.go delete mode 100644 pkg/controller/util/util.go diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index f36d59a..91e7bdd 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -54,7 +54,7 @@ jobs: with: context: . push: true - tags: rancher/k3k:k3k-kubelet + tags: rancher/k3k:k3k-kubelet-dev file: package/Dockerfile.kubelet platforms: linux/amd64 diff --git a/charts/k3k/templates/deployment.yaml b/charts/k3k/templates/deployment.yaml index fa42807..9e749d9 100644 --- a/charts/k3k/templates/deployment.yaml +++ b/charts/k3k/templates/deployment.yaml @@ -19,9 +19,11 @@ spec: - image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy }} name: {{ .Chart.Name }} - environment: + env: - name: CLUSTER_CIDR value: {{ .Values.host.clusterCIDR }} + - name: SHARED_AGENT_IMAGE + value: "{{ .Values.sharedAgent.image.repository }}:{{ .Values.sharedAgent.image.tag }}" ports: - containerPort: 8080 name: https diff --git a/charts/k3k/values.yaml b/charts/k3k/values.yaml index 224bdf3..4fcb08a 100644 --- a/charts/k3k/values.yaml +++ b/charts/k3k/values.yaml @@ -22,3 +22,9 @@ serviceAccount: # The name of the service account to use. # If not set and create is true, a name is generated using the fullname template name: "" + +# configuration related to the shared agent mode in k3k +sharedAgent: + image: + repository: "rancher/k3k" + tag: "k3k-kubelet-dev" \ No newline at end of file diff --git a/cli/cmds/cluster/create.go b/cli/cmds/cluster/create.go index bd2b9b8..6c95374 100644 --- a/cli/cmds/cluster/create.go +++ b/cli/cmds/cluster/create.go @@ -7,20 +7,18 @@ import ( "os" "path/filepath" "strings" - "time" "github.com/rancher/k3k/cli/cmds" "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" + "github.com/rancher/k3k/pkg/controller" "github.com/rancher/k3k/pkg/controller/cluster" "github.com/rancher/k3k/pkg/controller/cluster/server" "github.com/rancher/k3k/pkg/controller/kubeconfig" - "github.com/rancher/k3k/pkg/controller/util" "github.com/sirupsen/logrus" "github.com/urfave/cli" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/authentication/user" clientgoscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/clientcmd" @@ -28,15 +26,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -var ( - Scheme = runtime.NewScheme() - backoff = wait.Backoff{ - Steps: 5, - Duration: 20 * time.Second, - Factor: 2, - Jitter: 0.1, - } -) +var Scheme = runtime.NewScheme() func init() { _ = clientgoscheme.AddToScheme(Scheme) @@ -120,7 +110,7 @@ var ( func create(clx *cli.Context) error { ctx := context.Background() - if err := validateCreateFlags(clx); err != nil { + if err := validateCreateFlags(); err != nil { return err } @@ -173,13 +163,13 @@ func create(clx *cli.Context) error { logrus.Infof("Extracting Kubeconfig for [%s] cluster", name) cfg := &kubeconfig.KubeConfig{ - CN: util.AdminCommonName, + CN: controller.AdminCommonName, ORG: []string{user.SystemPrivilegedGroup}, ExpiryDate: 0, } logrus.Infof("waiting for cluster to be available..") var kubeconfig []byte - if err := retry.OnError(backoff, apierrors.IsNotFound, func() error { + if err := retry.OnError(controller.Backoff, apierrors.IsNotFound, func() error { kubeconfig, err = cfg.Extract(ctx, ctrlClient, cluster, host[0]) if err != nil { return err @@ -203,7 +193,7 @@ func create(clx *cli.Context) error { return os.WriteFile(cluster.Name+"-kubeconfig.yaml", kubeconfig, 0644) } -func validateCreateFlags(clx *cli.Context) error { +func validateCreateFlags() error { if persistenceType != server.EphermalNodesType && persistenceType != server.DynamicNodesType { return errors.New("invalid persistence type") diff --git a/cli/cmds/kubeconfig/kubeconfig.go b/cli/cmds/kubeconfig/kubeconfig.go index a78e15d..d527abb 100644 --- a/cli/cmds/kubeconfig/kubeconfig.go +++ b/cli/cmds/kubeconfig/kubeconfig.go @@ -10,14 +10,13 @@ import ( "github.com/rancher/k3k/cli/cmds" "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" + "github.com/rancher/k3k/pkg/controller" "github.com/rancher/k3k/pkg/controller/kubeconfig" - "github.com/rancher/k3k/pkg/controller/util" "github.com/sirupsen/logrus" "github.com/urfave/cli" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/authentication/user" clientgoscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/clientcmd" @@ -31,19 +30,13 @@ func init() { } var ( - Scheme = runtime.NewScheme() - name string - cn string - org cli.StringSlice - altNames cli.StringSlice - expirationDays int64 - configName string - backoff = wait.Backoff{ - Steps: 5, - Duration: 20 * time.Second, - Factor: 2, - Jitter: 0.1, - } + Scheme = runtime.NewScheme() + name string + cn string + org cli.StringSlice + altNames cli.StringSlice + expirationDays int64 + configName string generateKubeconfigFlags = []cli.Flag{ cli.StringFlag{ Name: "name", @@ -59,7 +52,7 @@ var ( Name: "cn", Usage: "Common name (CN) of the generated certificates for the kubeconfig", Destination: &cn, - Value: util.AdminCommonName, + Value: controller.AdminCommonName, }, cli.StringSliceFlag{ Name: "org", @@ -141,7 +134,7 @@ func generate(clx *cli.Context) error { } logrus.Infof("waiting for cluster to be available..") var kubeconfig []byte - if err := retry.OnError(backoff, apierrors.IsNotFound, func() error { + if err := retry.OnError(controller.Backoff, apierrors.IsNotFound, func() error { kubeconfig, err = cfg.Extract(ctx, ctrlClient, &cluster, host[0]) if err != nil { return err diff --git a/go.mod b/go.mod index 5651293..64204ef 100644 --- a/go.mod +++ b/go.mod @@ -25,6 +25,7 @@ require ( go.etcd.io/etcd/api/v3 v3.5.14 go.etcd.io/etcd/client/v3 v3.5.14 go.uber.org/zap v1.26.0 + gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.31.1 k8s.io/apimachinery v0.31.1 k8s.io/apiserver v0.31.0 @@ -120,7 +121,6 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/apiextensions-apiserver v0.29.2 // indirect k8s.io/component-base v0.29.2 // indirect k8s.io/kms v0.29.2 // indirect diff --git a/k3k-kubelet/config.go b/k3k-kubelet/config.go index e75ae87..c4beba1 100644 --- a/k3k-kubelet/config.go +++ b/k3k-kubelet/config.go @@ -9,52 +9,56 @@ import ( // Config has all virtual-kubelet startup options type config struct { - clusterName string `yaml:"clusterName"` - clusterNamespace string `yaml:"clusterNamespace"` - hostConfigPath string `yaml:"hostConfigPath"` - virtualConfigPath string `yaml:"virtualConfigPath"` - kubeletPort string `yaml:"kubeletPort"` - nodeName string `yaml:"nodeName"` - agentPodIP string `yaml:"agentPodIP"` - token string `yaml:"token"` + ClusterName string `yaml:"clusterName,omitempty"` + ClusterNamespace string `yaml:"clusterNamespace,omitempty"` + NodeName string `yaml:"nodeName,omitempty"` + Token string `yaml:"token,omitempty"` + AgentHostname string `yaml:"agentHostname,omitempty"` + HostConfigPath string `yaml:"hostConfigPath,omitempty"` + VirtualConfigPath string `yaml:"virtualConfigPath,omitempty"` + KubeletPort string `yaml:"kubeletPort,omitempty"` } -func (t *config) UnmarshalYAML(data []byte) error { +func (t *config) unmarshalYAML(data []byte) error { var c config + if err := yaml.Unmarshal(data, &c); err != nil { return err } - if t.clusterName == "" { - t.clusterName = c.clusterName + + if t.ClusterName == "" { + t.ClusterName = c.ClusterName + } + if t.ClusterNamespace == "" { + t.ClusterNamespace = c.ClusterNamespace } - if t.clusterNamespace == "" { - t.clusterNamespace = c.clusterNamespace + if t.HostConfigPath == "" { + t.HostConfigPath = c.HostConfigPath } - if t.hostConfigPath == "" { - t.hostConfigPath = c.hostConfigPath + if t.VirtualConfigPath == "" { + t.VirtualConfigPath = c.VirtualConfigPath } - if t.virtualConfigPath == "" { - t.virtualConfigPath = c.virtualConfigPath + if t.KubeletPort == "" { + t.KubeletPort = c.KubeletPort } - if t.kubeletPort == "" { - t.kubeletPort = c.kubeletPort + if t.AgentHostname == "" { + t.AgentHostname = c.AgentHostname } - if t.nodeName == "" { - t.nodeName = c.nodeName + if t.NodeName == "" { + t.NodeName = c.NodeName } - return nil } func (t *config) Validate() error { - if t.clusterName == "" { + if t.ClusterName == "" { return errors.New("cluster name is not provided") } - if t.clusterNamespace == "" { + if t.ClusterNamespace == "" { return errors.New("cluster namespace is not provided") } - if t.agentPodIP == "" { - return errors.New("agent POD IP is not provided") + if t.AgentHostname == "" { + return errors.New("agent Hostname is not provided") } return nil } @@ -68,5 +72,5 @@ func (t *config) Parse(path string) error { if err != nil { return err } - return t.UnmarshalYAML(configFileBytes) + return t.unmarshalYAML(configFileBytes) } diff --git a/k3k-kubelet/kubelet.go b/k3k-kubelet/kubelet.go index 98866c5..884d2a9 100644 --- a/k3k-kubelet/kubelet.go +++ b/k3k-kubelet/kubelet.go @@ -5,7 +5,6 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "net" "net/http" "os" "time" @@ -13,16 +12,16 @@ import ( certutil "github.com/rancher/dynamiclistener/cert" "github.com/rancher/k3k/k3k-kubelet/provider" "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" + "github.com/rancher/k3k/pkg/controller" + "github.com/rancher/k3k/pkg/controller/cluster/server" "github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap" "github.com/rancher/k3k/pkg/controller/kubeconfig" - "github.com/rancher/k3k/pkg/controller/util" "github.com/virtual-kubelet/virtual-kubelet/log" "github.com/virtual-kubelet/virtual-kubelet/node" "github.com/virtual-kubelet/virtual-kubelet/node/nodeutil" "go.uber.org/zap" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/client-go/kubernetes" clientgoscheme "k8s.io/client-go/kubernetes/scheme" @@ -33,15 +32,7 @@ import ( ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" ) -var ( - Scheme = runtime.NewScheme() - backoff = wait.Backoff{ - Steps: 5, - Duration: 5 * time.Second, - Factor: 2, - Jitter: 0.1, - } -) +var Scheme = runtime.NewScheme() func init() { _ = clientgoscheme.AddToScheme(Scheme) @@ -58,7 +49,7 @@ type kubelet struct { } func newKubelet(c *config) (*kubelet, error) { - hostConfig, err := clientcmd.BuildConfigFromFlags("", c.hostConfigPath) + hostConfig, err := clientcmd.BuildConfigFromFlags("", c.HostConfigPath) if err != nil { return nil, err } @@ -70,7 +61,7 @@ func newKubelet(c *config) (*kubelet, error) { return nil, err } - virtConfig, err := virtRestConfig(context.Background(), c.virtualConfigPath, hostClient, c.clusterName, c.clusterNamespace) + virtConfig, err := virtRestConfig(context.Background(), c.VirtualConfigPath, hostClient, c.ClusterName, c.ClusterNamespace) if err != nil { return nil, err } @@ -80,16 +71,16 @@ func newKubelet(c *config) (*kubelet, error) { return nil, err } return &kubelet{ - name: c.nodeName, + name: c.NodeName, hostConfig: hostConfig, hostClient: hostClient, virtClient: virtClient, }, nil } -func (k *kubelet) RegisterNode(srvPort, namespace, name, podIP string) error { - providerFunc := k.newProviderFunc(namespace, name, podIP) - nodeOpts := k.nodeOpts(srvPort, namespace, name, podIP) +func (k *kubelet) RegisterNode(srvPort, namespace, name, hostname string) error { + providerFunc := k.newProviderFunc(namespace, name, hostname) + nodeOpts := k.nodeOpts(srvPort, namespace, name, hostname) var err error k.node, err = nodeutil.NewNode(k.name, providerFunc, nodeutil.WithClient(k.virtClient), nodeOpts) @@ -129,7 +120,7 @@ func (k *kubelet) Start(ctx context.Context) { fmt.Printf("node exited without an error") } -func (k *kubelet) newProviderFunc(namespace, name, podIP string) nodeutil.NewProviderFunc { +func (k *kubelet) newProviderFunc(namespace, name, hostname string) nodeutil.NewProviderFunc { return func(pc nodeutil.ProviderConfig) (nodeutil.Provider, node.NodeProvider, error) { utilProvider, err := provider.New(*k.hostConfig, namespace, name) if err != nil { @@ -137,12 +128,12 @@ func (k *kubelet) newProviderFunc(namespace, name, podIP string) nodeutil.NewPro } nodeProvider := provider.Node{} - provider.ConfigureNode(pc.Node, podIP, k.port) + provider.ConfigureNode(pc.Node, hostname, k.port) return utilProvider, &nodeProvider, nil } } -func (k *kubelet) nodeOpts(srvPort, namespace, name, podIP string) nodeutil.NodeOpt { +func (k *kubelet) nodeOpts(srvPort, namespace, name, hostname string) nodeutil.NodeOpt { return func(c *nodeutil.NodeConfig) error { c.HTTPListenAddr = fmt.Sprintf(":%s", srvPort) // set up the routes @@ -155,7 +146,7 @@ func (k *kubelet) nodeOpts(srvPort, namespace, name, podIP string) nodeutil.Node ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) defer cancel() - tlsConfig, err := loadTLSConfig(ctx, k.hostClient, name, namespace, k.name, podIP) + tlsConfig, err := loadTLSConfig(ctx, k.hostClient, name, namespace, k.name, hostname) if err != nil { return fmt.Errorf("unable to get tls config: %w", err) } @@ -164,18 +155,18 @@ func (k *kubelet) nodeOpts(srvPort, namespace, name, podIP string) nodeutil.Node } } -func virtRestConfig(ctx context.Context, virtualConfigPath string, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace string) (*rest.Config, error) { - if virtualConfigPath != "" { - return clientcmd.BuildConfigFromFlags("", virtualConfigPath) +func virtRestConfig(ctx context.Context, VirtualConfigPath string, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace string) (*rest.Config, error) { + if VirtualConfigPath != "" { + return clientcmd.BuildConfigFromFlags("", VirtualConfigPath) } // virtual kubeconfig file is empty, trying to fetch the k3k cluster kubeconfig var cluster v1alpha1.Cluster if err := hostClient.Get(ctx, types.NamespacedName{Namespace: clusterNamespace, Name: clusterName}, &cluster); err != nil { return nil, err } - endpoint := fmt.Sprintf("%s.%s", util.ServerSvcName(&cluster), util.ClusterNamespace(&cluster)) + endpoint := fmt.Sprintf("%s.%s", server.ServiceName(cluster.Name), cluster.Namespace) var b *bootstrap.ControlRuntimeBootstrap - if err := retry.OnError(backoff, func(err error) bool { + if err := retry.OnError(controller.Backoff, func(err error) bool { return err == nil }, func() error { var err error @@ -185,7 +176,7 @@ func virtRestConfig(ctx context.Context, virtualConfigPath string, hostClient ct return nil, fmt.Errorf("unable to decode bootstrap: %w", err) } adminCert, adminKey, err := kubeconfig.CreateClientCertKey( - util.AdminCommonName, []string{user.SystemPrivilegedGroup}, + controller.AdminCommonName, []string{user.SystemPrivilegedGroup}, nil, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, time.Hour*24*time.Duration(356), b.ClientCA.Content, b.ClientCAKey.Content) @@ -193,7 +184,7 @@ func virtRestConfig(ctx context.Context, virtualConfigPath string, hostClient ct return nil, err } - url := fmt.Sprintf("https://%s:%d", util.ServerSvcName(&cluster), util.ServerPort) + url := fmt.Sprintf("https://%s:%d", server.ServiceName(cluster.Name), server.ServerPort) kubeconfigData, err := kubeconfigBytes(url, []byte(b.ServerCA.Content), adminCert, adminKey) if err != nil { return nil, err @@ -229,7 +220,7 @@ func kubeconfigBytes(url string, serverCA, clientCert, clientKey []byte) ([]byte return kubeconfig, nil } -func loadTLSConfig(ctx context.Context, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, nodeName, ipStr string) (*tls.Config, error) { +func loadTLSConfig(ctx context.Context, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, nodeName, hostname string) (*tls.Config, error) { var ( cluster v1alpha1.Cluster b *bootstrap.ControlRuntimeBootstrap @@ -237,8 +228,8 @@ func loadTLSConfig(ctx context.Context, hostClient ctrlruntimeclient.Client, clu if err := hostClient.Get(ctx, types.NamespacedName{Name: clusterName, Namespace: clusterNamespace}, &cluster); err != nil { return nil, err } - endpoint := fmt.Sprintf("%s.%s", util.ServerSvcName(&cluster), util.ClusterNamespace(&cluster)) - if err := retry.OnError(backoff, func(err error) bool { + endpoint := fmt.Sprintf("%s.%s", server.ServiceName(cluster.Name), cluster.Namespace) + if err := retry.OnError(controller.Backoff, func(err error) bool { return err != nil }, func() error { var err error @@ -248,7 +239,7 @@ func loadTLSConfig(ctx context.Context, hostClient ctrlruntimeclient.Client, clu return nil, fmt.Errorf("unable to decode bootstrap: %w", err) } altNames := certutil.AltNames{ - IPs: []net.IP{net.ParseIP(ipStr)}, + DNSNames: []string{hostname}, } cert, key, err := kubeconfig.CreateClientCertKey(nodeName, nil, &altNames, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, 0, b.ServerCA.Content, b.ServerCAKey.Content) if err != nil { diff --git a/k3k-kubelet/main.go b/k3k-kubelet/main.go index 9138819..a6ddf84 100644 --- a/k3k-kubelet/main.go +++ b/k3k-kubelet/main.go @@ -22,45 +22,45 @@ func main() { cli.StringFlag{ Name: "cluster-name", Usage: "Name of the k3k cluster", - Destination: &c.clusterName, + Destination: &c.ClusterName, EnvVar: "CLUSTER_NAME", }, cli.StringFlag{ Name: "cluster-namespace", Usage: "Namespace of the k3k cluster", - Destination: &c.clusterNamespace, + Destination: &c.ClusterNamespace, EnvVar: "CLUSTER_NAMESPACE", }, cli.StringFlag{ Name: "cluster-token", Usage: "K3S token of the k3k cluster", - Destination: &c.token, - EnvVar: "CLUSTER_Token", + Destination: &c.Token, + EnvVar: "CLUSTER_TOKEN", }, cli.StringFlag{ Name: "host-config-path", Usage: "Path to the host kubeconfig, if empty then virtual-kubelet will use incluster config", - Destination: &c.hostConfigPath, + Destination: &c.HostConfigPath, EnvVar: "HOST_KUBECONFIG", }, cli.StringFlag{ Name: "virtual-config-path", Usage: "Path to the k3k cluster kubeconfig, if empty then virtual-kubelet will create its own config from k3k cluster", - Destination: &c.virtualConfigPath, + Destination: &c.VirtualConfigPath, EnvVar: "CLUSTER_NAME", }, cli.StringFlag{ Name: "kubelet-port", Usage: "kubelet API port number", - Destination: &c.kubeletPort, + Destination: &c.KubeletPort, EnvVar: "SERVER_PORT", Value: "9443", }, cli.StringFlag{ - Name: "agent-pod-ip", - Usage: "Agent Pod IP used for TLS SAN for the kubelet server", - Destination: &c.agentPodIP, - EnvVar: "AGENT_POD_IP", + Name: "agent-hostname", + Usage: "Agent Hostname used for TLS SAN for the kubelet server", + Destination: &c.AgentHostname, + EnvVar: "AGENT_HOSTNAME", }, cli.StringFlag{ Name: "config", @@ -92,7 +92,7 @@ func Run(clx *cli.Context) { os.Exit(-1) } - if err := k.RegisterNode(c.kubeletPort, c.clusterNamespace, c.clusterName, c.agentPodIP); err != nil { + if err := k.RegisterNode(c.KubeletPort, c.ClusterNamespace, c.ClusterName, c.AgentHostname); err != nil { fmt.Printf("failed to register new node: %v", err) os.Exit(-1) } diff --git a/k3k-kubelet/provider/configure.go b/k3k-kubelet/provider/configure.go index 44db2f5..45d93a4 100644 --- a/k3k-kubelet/provider/configure.go +++ b/k3k-kubelet/provider/configure.go @@ -6,13 +6,13 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -func ConfigureNode(node *v1.Node, podIP string, servicePort int) { +func ConfigureNode(node *v1.Node, hostname string, servicePort int) { node.Status.Conditions = nodeConditions() node.Status.DaemonEndpoints.KubeletEndpoint.Port = int32(servicePort) node.Status.Addresses = []v1.NodeAddress{ { - Type: v1.NodeExternalIP, - Address: podIP, + Type: v1.NodeHostName, + Address: hostname, }, } node.Status.Capacity = v1.ResourceList{ diff --git a/k3k-kubelet/provider/provider.go b/k3k-kubelet/provider/provider.go index f836dcc..e8fbf86 100644 --- a/k3k-kubelet/provider/provider.go +++ b/k3k-kubelet/provider/provider.go @@ -2,15 +2,13 @@ package provider import ( "context" - "crypto/sha256" - "encoding/hex" "fmt" "io" "net/http" "strconv" - "strings" dto "github.com/prometheus/client_model/go" + "github.com/rancher/k3k/pkg/controller" "github.com/virtual-kubelet/virtual-kubelet/node/api" "github.com/virtual-kubelet/virtual-kubelet/node/api/statsv1alpha1" "go.uber.org/zap" @@ -313,23 +311,5 @@ func (p *Provider) translateFrom(hostPod *corev1.Pod) *corev1.Pod { } func (p *Provider) hostName(virtualNamespace string, virtualName string) string { - return safeConcatName(p.ClusterName, p.ClusterNamespace, virtualNamespace, virtualName) -} - -// safeConcatName concatenates the given strings and ensures the returned name is under 64 characters -// by cutting the string off at 57 characters and setting the last 6 with an encoded version of the concatenated string. -func safeConcatName(name ...string) string { - fullPath := strings.Join(name, "-") - if len(fullPath) < 64 { - return fullPath - } - digest := sha256.Sum256([]byte(fullPath)) - // since we cut the string in the middle, the last char may not be compatible with what is expected in k8s - // we are checking and if necessary removing the last char - c := fullPath[56] - if 'a' <= c && c <= 'z' || '0' <= c && c <= '9' { - return fullPath[0:57] + "-" + hex.EncodeToString(digest[0:])[0:5] - } - - return fullPath[0:56] + "-" + hex.EncodeToString(digest[0:])[0:6] + return controller.SafeConcatName(p.ClusterName, p.ClusterNamespace, virtualNamespace, virtualName) } diff --git a/main.go b/main.go index 35e32b8..8388440 100644 --- a/main.go +++ b/main.go @@ -27,10 +27,11 @@ const ( ) var ( - scheme = runtime.NewScheme() - clusterCIDR string - kubeconfig string - flags = []cli.Flag{ + scheme = runtime.NewScheme() + clusterCIDR string + sharedAgentImage string + kubeconfig string + flags = []cli.Flag{ cli.StringFlag{ Name: "kubeconfig", EnvVar: "KUBECONFIG", @@ -42,7 +43,15 @@ var ( EnvVar: "CLUSTER_CIDR", Usage: "Cluster CIDR to be added to the networkpolicy of the clustersets", Destination: &clusterCIDR, - }} + }, + cli.StringFlag{ + Name: "shared-agent-image", + EnvVar: "SHARED_AGENT_IMAGE", + Usage: "K3K Virtual Kubelet image ", + Value: "rancher/k3k:k3k-kubelet-dev", + Destination: &sharedAgentImage, + }, + } ) func init() { @@ -77,8 +86,7 @@ func run(clx *cli.Context) error { if err != nil { return fmt.Errorf("Failed to create new controller runtime manager: %v", err) } - - if err := cluster.Add(ctx, mgr); err != nil { + if err := cluster.Add(ctx, mgr, sharedAgentImage); err != nil { return fmt.Errorf("Failed to add the new cluster controller: %v", err) } diff --git a/pkg/controller/cluster/agent/agent.go b/pkg/controller/cluster/agent/agent.go index e849f6c..95d5771 100644 --- a/pkg/controller/cluster/agent/agent.go +++ b/pkg/controller/cluster/agent/agent.go @@ -2,19 +2,29 @@ package agent import ( "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" - "github.com/rancher/k3k/pkg/controller/cluster/config" + "github.com/rancher/k3k/pkg/controller" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" ) +const ( + configName = "agent-config" +) + type Agent interface { + Name() string Config() (ctrlruntimeclient.Object, error) Resources() ([]ctrlruntimeclient.Object, error) } -func New(cluster *v1alpha1.Cluster, serviceIP string) Agent { - if cluster.Spec.Mode == config.VirtualNodeMode { +func New(cluster *v1alpha1.Cluster, serviceIP, sharedAgentImage string) Agent { + if cluster.Spec.Mode == VirtualNodeMode { return NewVirtualAgent(cluster, serviceIP) } else { - return NewSharedAgent(cluster, serviceIP) + return NewSharedAgent(cluster, serviceIP, sharedAgentImage) } } + +func configSecretName(clusterName string) string { + return controller.ObjectName(clusterName, nil, configName) +} diff --git a/pkg/controller/cluster/agent/shared.go b/pkg/controller/cluster/agent/shared.go index 39dbcb3..b6e7251 100644 --- a/pkg/controller/cluster/agent/shared.go +++ b/pkg/controller/cluster/agent/shared.go @@ -4,7 +4,7 @@ import ( "fmt" "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" - "github.com/rancher/k3k/pkg/controller/util" + "github.com/rancher/k3k/pkg/controller" apps "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" @@ -13,19 +13,22 @@ import ( ) const ( - virtualKubeletImage = "rancher/k3k:k3k-kubelet" - virtualKubeletConfigPath = "/opt/rancher/k3k/config.yaml" + sharedKubeletConfigPath = "/opt/rancher/k3k/config.yaml" + sharedNodeAgentName = "kubelet" + SharedNodeMode = "shared" ) type SharedAgent struct { - cluster *v1alpha1.Cluster - serviceIP string + cluster *v1alpha1.Cluster + serviceIP string + sharedAgentImage string } -func NewSharedAgent(cluster *v1alpha1.Cluster, serviceIP string) Agent { +func NewSharedAgent(cluster *v1alpha1.Cluster, serviceIP, sharedAgentImage string) Agent { return &SharedAgent{ - cluster: cluster, - serviceIP: serviceIP, + cluster: cluster, + serviceIP: serviceIP, + sharedAgentImage: sharedAgentImage, } } @@ -38,8 +41,8 @@ func (s *SharedAgent) Config() (ctrlruntimeclient.Object, error) { APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ - Name: util.AgentConfigName(s.cluster), - Namespace: util.ClusterNamespace(s.cluster), + Name: configSecretName(s.cluster.Name), + Namespace: s.cluster.Namespace, }, Data: map[string][]byte{ "config.yaml": []byte(config), @@ -51,12 +54,13 @@ func sharedAgentData(cluster *v1alpha1.Cluster) string { return fmt.Sprintf(`clusterName: %s clusterNamespace: %s nodeName: %s -token: %s`, cluster.Name, cluster.Namespace, cluster.Name+"-"+"k3k-kubelet", cluster.Spec.Token) +agentHostname: %s +token: %s`, cluster.Name, cluster.Namespace, cluster.Name+"-"+"k3k-kubelet", cluster.Name+"-"+"k3k-kubelet", cluster.Spec.Token) } func (s *SharedAgent) Resources() ([]ctrlruntimeclient.Object, error) { var objs []ctrlruntimeclient.Object - objs = append(objs, s.serviceAccount(), s.role(), s.roleBinding(), s.deployment()) + objs = append(objs, s.serviceAccount(), s.role(), s.roleBinding(), s.service(), s.deployment()) return objs, nil } @@ -68,7 +72,7 @@ func (s *SharedAgent) deployment() *apps.Deployment { "mode": "shared", }, } - name := s.cluster.Name + "-" + "k3k-kubelet" + name := s.Name() return &apps.Deployment{ TypeMeta: metav1.TypeMeta{ Kind: "Deployment", @@ -76,7 +80,7 @@ func (s *SharedAgent) deployment() *apps.Deployment { }, ObjectMeta: metav1.ObjectMeta{ Name: name, - Namespace: util.ClusterNamespace(s.cluster), + Namespace: s.cluster.Namespace, Labels: selector.MatchLabels, }, Spec: apps.DeploymentSpec{ @@ -85,14 +89,14 @@ func (s *SharedAgent) deployment() *apps.Deployment { ObjectMeta: metav1.ObjectMeta{ Labels: selector.MatchLabels, }, - Spec: s.podSpec(virtualKubeletImage, name, &selector), + Spec: s.podSpec(s.sharedAgentImage, name, &selector), }, }, } } func (s *SharedAgent) podSpec(image, name string, affinitySelector *metav1.LabelSelector) v1.PodSpec { - args := []string{"--config", virtualKubeletConfigPath} + args := []string{"--config", sharedKubeletConfigPath} var limit v1.ResourceList return v1.PodSpec{ Affinity: &v1.Affinity{ @@ -105,13 +109,13 @@ func (s *SharedAgent) podSpec(image, name string, affinitySelector *metav1.Label }, }, }, - ServiceAccountName: s.cluster.Name + "-" + "k3k-kubelet", + ServiceAccountName: s.Name(), Volumes: []v1.Volume{ { Name: "config", VolumeSource: v1.VolumeSource{ Secret: &v1.SecretVolumeSource{ - SecretName: util.AgentConfigName(s.cluster), + SecretName: configSecretName(s.cluster.Name), Items: []v1.KeyToPath{ { Key: "config.yaml", @@ -131,16 +135,6 @@ func (s *SharedAgent) podSpec(image, name string, affinitySelector *metav1.Label Limits: limit, }, Args: args, - Env: []v1.EnvVar{ - { - Name: "AGENT_POD_IP", - ValueFrom: &v1.EnvVarSource{ - FieldRef: &v1.ObjectFieldSelector{ - FieldPath: "status.podIP", - }, - }, - }, - }, VolumeMounts: []v1.VolumeMount{ { Name: "config", @@ -152,6 +146,34 @@ func (s *SharedAgent) podSpec(image, name string, affinitySelector *metav1.Label }} } +func (s *SharedAgent) service() *v1.Service { + return &v1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: s.Name(), + Namespace: s.cluster.Namespace, + }, + Spec: v1.ServiceSpec{ + Type: v1.ServiceTypeClusterIP, + Selector: map[string]string{ + "cluster": s.cluster.Name, + "type": "agent", + "mode": "shared", + }, + Ports: []v1.ServicePort{ + { + Name: "k3s-kubelet-port", + Protocol: v1.ProtocolTCP, + Port: 9443, + }, + }, + }, + } +} + func (s *SharedAgent) serviceAccount() *v1.ServiceAccount { return &v1.ServiceAccount{ TypeMeta: metav1.TypeMeta{ @@ -159,8 +181,8 @@ func (s *SharedAgent) serviceAccount() *v1.ServiceAccount { APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ - Name: s.cluster.Name + "-" + "k3k-kubelet", - Namespace: util.ClusterNamespace(s.cluster), + Name: s.Name(), + Namespace: s.cluster.Namespace, }, } } @@ -172,8 +194,8 @@ func (s *SharedAgent) role() *rbacv1.Role { APIVersion: "rbac.authorization.k8s.io/v1", }, ObjectMeta: metav1.ObjectMeta{ - Name: s.cluster.Name + "-" + "k3k-kubelet", - Namespace: util.ClusterNamespace(s.cluster), + Name: s.Name(), + Namespace: s.cluster.Namespace, }, Rules: []rbacv1.PolicyRule{ { @@ -202,20 +224,24 @@ func (s *SharedAgent) roleBinding() *rbacv1.RoleBinding { APIVersion: "rbac.authorization.k8s.io/v1", }, ObjectMeta: metav1.ObjectMeta{ - Name: s.cluster.Name + "-" + "k3k-kubelet", - Namespace: util.ClusterNamespace(s.cluster), + Name: s.Name(), + Namespace: s.cluster.Namespace, }, RoleRef: rbacv1.RoleRef{ APIGroup: "rbac.authorization.k8s.io", Kind: "Role", - Name: s.cluster.Name + "-" + "k3k-kubelet", + Name: s.Name(), }, Subjects: []rbacv1.Subject{ { Kind: "ServiceAccount", - Name: s.cluster.Name + "-" + "k3k-kubelet", - Namespace: util.ClusterNamespace(s.cluster), + Name: s.Name(), + Namespace: s.cluster.Namespace, }, }, } } + +func (s *SharedAgent) Name() string { + return controller.ObjectName(s.cluster.Name, nil, sharedNodeAgentName) +} diff --git a/pkg/controller/cluster/agent/virtual.go b/pkg/controller/cluster/agent/virtual.go index 05d12ec..28dd395 100644 --- a/pkg/controller/cluster/agent/virtual.go +++ b/pkg/controller/cluster/agent/virtual.go @@ -4,7 +4,7 @@ import ( "fmt" "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" - "github.com/rancher/k3k/pkg/controller/util" + "github.com/rancher/k3k/pkg/controller" apps "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -12,6 +12,11 @@ import ( ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" ) +const ( + VirtualNodeMode = "virtual" + virtualNodeAgentName = "kubelet" +) + type VirtualAgent struct { cluster *v1alpha1.Cluster serviceIP string @@ -33,8 +38,8 @@ func (v *VirtualAgent) Config() (ctrlruntimeclient.Object, error) { APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ - Name: util.AgentConfigName(v.cluster), - Namespace: util.ClusterNamespace(v.cluster), + Name: configSecretName(v.cluster.Name), + Namespace: v.cluster.Namespace, }, Data: map[string][]byte{ "config.yaml": []byte(config), @@ -55,7 +60,7 @@ with-node-id: true`, serviceIP, token) } func (v *VirtualAgent) deployment() *apps.Deployment { - image := util.K3SImage(v.cluster) + image := controller.K3SImage(v.cluster) const name = "k3k-agent" selector := metav1.LabelSelector{ @@ -71,8 +76,8 @@ func (v *VirtualAgent) deployment() *apps.Deployment { APIVersion: "apps/v1", }, ObjectMeta: metav1.ObjectMeta{ - Name: v.cluster.Name + "-" + name, - Namespace: util.ClusterNamespace(v.cluster), + Name: v.Name(), + Namespace: v.cluster.Namespace, Labels: selector.MatchLabels, }, Spec: apps.DeploymentSpec{ @@ -107,7 +112,7 @@ func (v *VirtualAgent) podSpec(image, name string, args []string, affinitySelect Name: "config", VolumeSource: v1.VolumeSource{ Secret: &v1.SecretVolumeSource{ - SecretName: util.AgentConfigName(v.cluster), + SecretName: configSecretName(v.cluster.Name), Items: []v1.KeyToPath{ { Key: "config.yaml", @@ -212,3 +217,7 @@ func (v *VirtualAgent) podSpec(image, name string, args []string, affinitySelect return podSpec } + +func (v *VirtualAgent) Name() string { + return controller.ObjectName(v.cluster.Name, nil, virtualNodeAgentName) +} diff --git a/pkg/controller/cluster/cluster.go b/pkg/controller/cluster/cluster.go index 77023ed..8df3002 100644 --- a/pkg/controller/cluster/cluster.go +++ b/pkg/controller/cluster/cluster.go @@ -8,11 +8,10 @@ import ( "time" "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" + k3kcontroller "github.com/rancher/k3k/pkg/controller" "github.com/rancher/k3k/pkg/controller/cluster/agent" - "github.com/rancher/k3k/pkg/controller/cluster/config" "github.com/rancher/k3k/pkg/controller/cluster/server" "github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap" - "github.com/rancher/k3k/pkg/controller/util" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -28,6 +27,7 @@ import ( ) const ( + namePrefix = "k3k" clusterController = "k3k-cluster-controller" clusterFinalizerName = "cluster.k3k.io/finalizer" etcdPodFinalizerName = "etcdpod.k3k.io/finalizer" @@ -42,16 +42,18 @@ const ( ) type ClusterReconciler struct { - Client ctrlruntimeclient.Client - Scheme *runtime.Scheme + Client ctrlruntimeclient.Client + Scheme *runtime.Scheme + SharedAgentImage string } // Add adds a new controller to the manager -func Add(ctx context.Context, mgr manager.Manager) error { +func Add(ctx context.Context, mgr manager.Manager, sharedAgentImage string) error { // initialize a new Reconciler reconciler := ClusterReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + SharedAgentImage: sharedAgentImage, } return ctrl.NewControllerManagedBy(mgr). For(&v1alpha1.Cluster{}). @@ -76,20 +78,20 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request if !controllerutil.ContainsFinalizer(&cluster, clusterFinalizerName) { controllerutil.AddFinalizer(&cluster, clusterFinalizerName) if err := c.Client.Update(ctx, &cluster); err != nil { - return reconcile.Result{}, util.LogAndReturnErr("failed to add cluster finalizer", err) + return reconcile.Result{}, k3kcontroller.LogAndReturnErr("failed to add cluster finalizer", err) } } klog.Infof("enqueue cluster [%s]", cluster.Name) if err := c.createCluster(ctx, &cluster); err != nil { - return reconcile.Result{}, util.LogAndReturnErr("failed to create cluster", err) + return reconcile.Result{}, k3kcontroller.LogAndReturnErr("failed to create cluster", err) } return reconcile.Result{}, nil } // remove finalizer from the server pods and update them. matchingLabels := ctrlruntimeclient.MatchingLabels(map[string]string{"role": "server"}) - listOpts := &ctrlruntimeclient.ListOptions{Namespace: util.ClusterNamespace(&cluster)} + listOpts := &ctrlruntimeclient.ListOptions{Namespace: cluster.Namespace} matchingLabels.ApplyToList(listOpts) if err := c.Client.List(ctx, &podList, listOpts); err != nil { @@ -99,7 +101,7 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request if controllerutil.ContainsFinalizer(&pod, etcdPodFinalizerName) { controllerutil.RemoveFinalizer(&pod, etcdPodFinalizerName) if err := c.Client.Update(ctx, &pod); err != nil { - return reconcile.Result{}, util.LogAndReturnErr("failed to remove etcd finalizer", err) + return reconcile.Result{}, k3kcontroller.LogAndReturnErr("failed to remove etcd finalizer", err) } } } @@ -108,7 +110,7 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request // remove finalizer from the cluster and update it. controllerutil.RemoveFinalizer(&cluster, clusterFinalizerName) if err := c.Client.Update(ctx, &cluster); err != nil { - return reconcile.Result{}, util.LogAndReturnErr("failed to remove cluster finalizer", err) + return reconcile.Result{}, k3kcontroller.LogAndReturnErr("failed to remove cluster finalizer", err) } } klog.Infof("deleting cluster [%s]", cluster.Name) @@ -131,7 +133,7 @@ func (c *ClusterReconciler) createCluster(ctx context.Context, cluster *v1alpha1 } } if err := c.Client.Update(ctx, cluster); err != nil { - return util.LogAndReturnErr("failed to update cluster with persistence type", err) + return k3kcontroller.LogAndReturnErr("failed to update cluster with persistence type", err) } cluster.Status.ClusterCIDR = cluster.Spec.ClusterCIDR @@ -147,32 +149,32 @@ func (c *ClusterReconciler) createCluster(ctx context.Context, cluster *v1alpha1 klog.Infof("creating cluster service") serviceIP, err := c.createClusterService(ctx, cluster, s) if err != nil { - return util.LogAndReturnErr("failed to create cluster service", err) + return k3kcontroller.LogAndReturnErr("failed to create cluster service", err) } - if err := c.createClusterConfigs(ctx, cluster, serviceIP); err != nil { - return util.LogAndReturnErr("failed to create cluster configs", err) + if err := c.createClusterConfigs(ctx, cluster, s, serviceIP); err != nil { + return k3kcontroller.LogAndReturnErr("failed to create cluster configs", err) } // creating statefulsets in case the user chose a persistence type other than ephermal if err := c.server(ctx, cluster, s); err != nil { - return util.LogAndReturnErr("failed to create servers", err) + return k3kcontroller.LogAndReturnErr("failed to create servers", err) } if err := c.agent(ctx, cluster, serviceIP); err != nil { - return util.LogAndReturnErr("failed to create agents", err) + return k3kcontroller.LogAndReturnErr("failed to create agents", err) } if cluster.Spec.Expose != nil { if cluster.Spec.Expose.Ingress != nil { serverIngress, err := s.Ingress(ctx, c.Client) if err != nil { - return util.LogAndReturnErr("failed to create ingress object", err) + return k3kcontroller.LogAndReturnErr("failed to create ingress object", err) } if err := c.Client.Create(ctx, serverIngress); err != nil { if !apierrors.IsAlreadyExists(err) { - return util.LogAndReturnErr("failed to create server ingress", err) + return k3kcontroller.LogAndReturnErr("failed to create server ingress", err) } } } @@ -180,21 +182,21 @@ func (c *ClusterReconciler) createCluster(ctx context.Context, cluster *v1alpha1 bootstrapSecret, err := bootstrap.Generate(ctx, cluster, serviceIP) if err != nil { - return util.LogAndReturnErr("failed to generate new kubeconfig", err) + return k3kcontroller.LogAndReturnErr("failed to generate new kubeconfig", err) } if err := c.Client.Create(ctx, bootstrapSecret); err != nil { if !apierrors.IsAlreadyExists(err) { - return util.LogAndReturnErr("failed to create kubeconfig secret", err) + return k3kcontroller.LogAndReturnErr("failed to create kubeconfig secret", err) } } return c.Client.Update(ctx, cluster) } -func (c *ClusterReconciler) createClusterConfigs(ctx context.Context, cluster *v1alpha1.Cluster, serviceIP string) error { +func (c *ClusterReconciler) createClusterConfigs(ctx context.Context, cluster *v1alpha1.Cluster, server *server.Server, serviceIP string) error { // create init node config - initServerConfig, err := config.Server(cluster, true, serviceIP) + initServerConfig, err := server.Config(true, serviceIP) if err != nil { return err } @@ -210,7 +212,7 @@ func (c *ClusterReconciler) createClusterConfigs(ctx context.Context, cluster *v } // create servers configuration - serverConfig, err := config.Server(cluster, false, serviceIP) + serverConfig, err := server.Config(false, serviceIP) if err != nil { return err } @@ -226,9 +228,9 @@ func (c *ClusterReconciler) createClusterConfigs(ctx context.Context, cluster *v return nil } -func (c *ClusterReconciler) createClusterService(ctx context.Context, cluster *v1alpha1.Cluster, server *server.Server) (string, error) { +func (c *ClusterReconciler) createClusterService(ctx context.Context, cluster *v1alpha1.Cluster, s *server.Server) (string, error) { // create cluster service - clusterService := server.Service(cluster) + clusterService := s.Service(cluster) if err := controllerutil.SetControllerReference(cluster, clusterService, c.Scheme); err != nil { return "", err @@ -242,8 +244,8 @@ func (c *ClusterReconciler) createClusterService(ctx context.Context, cluster *v var service v1.Service objKey := ctrlruntimeclient.ObjectKey{ - Namespace: util.ClusterNamespace(cluster), - Name: util.ServerSvcName(cluster), + Namespace: cluster.Namespace, + Name: server.ServiceName(cluster.Name), } if err := c.Client.Get(ctx, objKey, &service); err != nil { return "", err @@ -254,7 +256,7 @@ func (c *ClusterReconciler) createClusterService(ctx context.Context, cluster *v func (c *ClusterReconciler) server(ctx context.Context, cluster *v1alpha1.Cluster, server *server.Server) error { // create headless service for the statefulset - serverStatefulService := server.StatefulServerService(cluster) + serverStatefulService := server.StatefulServerService() if err := controllerutil.SetControllerReference(cluster, serverStatefulService, c.Scheme); err != nil { return err } @@ -263,7 +265,7 @@ func (c *ClusterReconciler) server(ctx context.Context, cluster *v1alpha1.Cluste return err } } - ServerStatefulSet, err := server.StatefulServer(ctx, cluster) + ServerStatefulSet, err := server.StatefulServer(ctx) if err != nil { return err } @@ -280,13 +282,12 @@ func (c *ClusterReconciler) server(ctx context.Context, cluster *v1alpha1.Cluste } func (c *ClusterReconciler) agent(ctx context.Context, cluster *v1alpha1.Cluster, serviceIP string) error { - agent := agent.New(cluster, serviceIP) + agent := agent.New(cluster, serviceIP, c.SharedAgentImage) agentsConfig, err := agent.Config() if err != nil { return err } - agentResources, err := agent.Resources() if err != nil { return err diff --git a/pkg/controller/cluster/pod.go b/pkg/controller/cluster/pod.go index 77c9c7c..f087869 100644 --- a/pkg/controller/cluster/pod.go +++ b/pkg/controller/cluster/pod.go @@ -11,9 +11,10 @@ import ( certutil "github.com/rancher/dynamiclistener/cert" "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" + k3kcontroller "github.com/rancher/k3k/pkg/controller" + "github.com/rancher/k3k/pkg/controller/cluster/server" "github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap" "github.com/rancher/k3k/pkg/controller/kubeconfig" - "github.com/rancher/k3k/pkg/controller/util" "github.com/sirupsen/logrus" "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" clientv3 "go.etcd.io/etcd/client/v3" @@ -60,11 +61,13 @@ func AddPodController(ctx context.Context, mgr manager.Manager) error { } func (p *PodReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { - s := strings.Split(req.Namespace, "-") - if len(s) <= 1 { - return reconcile.Result{}, util.LogAndReturnErr("failed to get cluster namespace", nil) + s := strings.Split(req.Name, "-") + if len(s) < 1 { + return reconcile.Result{}, k3kcontroller.LogAndReturnErr("failed to get cluster namespace", nil) + } + if s[0] != "k3k" { + return reconcile.Result{}, nil } - clusterName := s[1] var cluster v1alpha1.Cluster if err := p.Client.Get(ctx, types.NamespacedName{Name: clusterName}, &cluster); err != nil { @@ -83,7 +86,7 @@ func (p *PodReconciler) Reconcile(ctx context.Context, req reconcile.Request) (r for _, pod := range podList.Items { klog.Infof("Handle etcd server pod [%s/%s]", pod.Namespace, pod.Name) if err := p.handleServerPod(ctx, cluster, &pod); err != nil { - return reconcile.Result{}, util.LogAndReturnErr("failed to handle etcd pod", err) + return reconcile.Result{}, k3kcontroller.LogAndReturnErr("failed to handle etcd pod", err) } } return reconcile.Result{}, nil @@ -116,7 +119,7 @@ func (p *PodReconciler) handleServerPod(ctx context.Context, cluster v1alpha1.Cl // remove server from etcd client, err := clientv3.New(clientv3.Config{ Endpoints: []string{ - fmt.Sprintf("https://%s.%s:2379", util.ServerSvcName(&cluster), pod.Namespace), + fmt.Sprintf("https://%s.%s:2379", server.ServiceName(cluster.Name), pod.Namespace), }, TLS: tlsConfig, }) @@ -146,9 +149,9 @@ func (p *PodReconciler) handleServerPod(ctx context.Context, cluster v1alpha1.Cl func (p *PodReconciler) getETCDTLS(cluster *v1alpha1.Cluster) (*tls.Config, error) { klog.Infof("generating etcd TLS client certificate for cluster [%s]", cluster.Name) token := cluster.Spec.Token - endpoint := fmt.Sprintf("%s.%s", util.ServerSvcName(cluster), util.ClusterNamespace(cluster)) + endpoint := fmt.Sprintf("%s.%s", server.ServiceName(cluster.Name), cluster.Namespace) var b *bootstrap.ControlRuntimeBootstrap - if err := retry.OnError(retry.DefaultBackoff, func(err error) bool { + if err := retry.OnError(k3kcontroller.Backoff, func(err error) bool { return true }, func() error { var err error diff --git a/pkg/controller/cluster/server/bootstrap/bootstrap.go b/pkg/controller/cluster/server/bootstrap/bootstrap.go index a8372f4..41f2285 100644 --- a/pkg/controller/cluster/server/bootstrap/bootstrap.go +++ b/pkg/controller/cluster/server/bootstrap/bootstrap.go @@ -9,7 +9,7 @@ import ( "time" "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" - "github.com/rancher/k3k/pkg/controller/util" + "github.com/rancher/k3k/pkg/controller" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/util/retry" @@ -36,7 +36,7 @@ func Generate(ctx context.Context, cluster *v1alpha1.Cluster, ip string) (*v1.Se token := cluster.Spec.Token var bootstrap *ControlRuntimeBootstrap - if err := retry.OnError(retry.DefaultBackoff, func(err error) bool { + if err := retry.OnError(controller.Backoff, func(err error) bool { return true }, func() error { var err error @@ -60,8 +60,8 @@ func Generate(ctx context.Context, cluster *v1alpha1.Cluster, ip string) (*v1.Se APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ - Name: cluster.Name + "-bootstrap", - Namespace: util.ClusterNamespace(cluster), + Name: controller.ObjectName(cluster.Name, nil, "bootstrap"), + Namespace: cluster.Namespace, OwnerReferences: []metav1.OwnerReference{ { APIVersion: cluster.APIVersion, diff --git a/pkg/controller/cluster/config/server.go b/pkg/controller/cluster/server/config.go similarity index 66% rename from pkg/controller/cluster/config/server.go rename to pkg/controller/cluster/server/config.go index 3d563c3..5e0bf87 100644 --- a/pkg/controller/cluster/config/server.go +++ b/pkg/controller/cluster/server/config.go @@ -1,35 +1,26 @@ -package config +package server import ( "fmt" "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" - "github.com/rancher/k3k/pkg/controller/util" + "github.com/rancher/k3k/pkg/controller" + "github.com/rancher/k3k/pkg/controller/cluster/agent" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -const ( - VirtualNodeMode = "virtual" -) - -// Server returns the secret for the server's config. Note that this doesn't set the ownerRef on the secret -// to tie it back to the cluster. -func Server(cluster *v1alpha1.Cluster, init bool, serviceIP string) (*v1.Secret, error) { - name := util.ServerConfigName(cluster) - if init { - name = util.ServerInitConfigName(cluster) - } - - cluster.Status.TLSSANs = append(cluster.Spec.TLSSANs, +func (s *Server) Config(init bool, serviceIP string) (*v1.Secret, error) { + name := configSecretName(s.cluster.Name, init) + s.cluster.Status.TLSSANs = append(s.cluster.Spec.TLSSANs, serviceIP, - util.ServerSvcName(cluster), - fmt.Sprintf("%s.%s", util.ServerSvcName(cluster), util.ClusterNamespace(cluster)), + ServiceName(s.cluster.Name), + fmt.Sprintf("%s.%s", ServiceName(s.cluster.Name), s.cluster.Namespace), ) - config := serverConfigData(serviceIP, cluster) + config := serverConfigData(serviceIP, s.cluster) if init { - config = initConfigData(cluster) + config = initConfigData(s.cluster) } return &v1.Secret{ TypeMeta: metav1.TypeMeta{ @@ -38,7 +29,7 @@ func Server(cluster *v1alpha1.Cluster, init bool, serviceIP string) (*v1.Secret, }, ObjectMeta: metav1.ObjectMeta{ Name: name, - Namespace: util.ClusterNamespace(cluster), + Namespace: s.cluster.Namespace, }, Data: map[string][]byte{ "config.yaml": []byte(config), @@ -76,10 +67,17 @@ func serverOptions(cluster *v1alpha1.Cluster) string { opts = opts + "- " + addr + "\n" } } - if cluster.Spec.Mode != VirtualNodeMode { + if cluster.Spec.Mode != agent.VirtualNodeMode { opts = opts + "disable-agent: true\negress-selector-mode: disabled\n" } // TODO: Add extra args to the options return opts } + +func configSecretName(clusterName string, init bool) string { + if !init { + return controller.ObjectName(clusterName, nil, configName) + } + return controller.ObjectName(clusterName, nil, initConfigName) +} diff --git a/pkg/controller/cluster/server/ingress.go b/pkg/controller/cluster/server/ingress.go index 154cb5f..4c7b9fb 100644 --- a/pkg/controller/cluster/server/ingress.go +++ b/pkg/controller/cluster/server/ingress.go @@ -3,7 +3,7 @@ package server import ( "context" - "github.com/rancher/k3k/pkg/controller/util" + "github.com/rancher/k3k/pkg/controller" networkingv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -15,12 +15,13 @@ const ( nginxSSLPassthroughAnnotation = "nginx.ingress.kubernetes.io/ssl-passthrough" nginxBackendProtocolAnnotation = "nginx.ingress.kubernetes.io/backend-protocol" nginxSSLRedirectAnnotation = "nginx.ingress.kubernetes.io/ssl-redirect" - serverPort = 6443 - etcdPort = 2379 + + serverPort = 6443 + etcdPort = 2379 ) func (s *Server) Ingress(ctx context.Context, client client.Client) (*networkingv1.Ingress, error) { - addresses, err := util.Addresses(ctx, client) + addresses, err := controller.Addresses(ctx, client) if err != nil { return nil, err } @@ -31,8 +32,12 @@ func (s *Server) Ingress(ctx context.Context, client client.Client) (*networking APIVersion: "networking.k8s.io/v1", }, ObjectMeta: metav1.ObjectMeta{ - Name: s.cluster.Name + "-server-ingress", - Namespace: util.ClusterNamespace(s.cluster), + Name: controller.ObjectName(s.cluster.Name, &networkingv1.Ingress{ + TypeMeta: metav1.TypeMeta{ + Kind: "Ingress", + }, + }), + Namespace: s.cluster.Namespace, }, Spec: networkingv1.IngressSpec{ IngressClassName: &s.cluster.Spec.Expose.Ingress.IngressClassName, @@ -59,7 +64,7 @@ func (s *Server) ingressRules(addresses []string) []networkingv1.IngressRule { PathType: &pathTypePrefix, Backend: networkingv1.IngressBackend{ Service: &networkingv1.IngressServiceBackend{ - Name: util.ServerSvcName(s.cluster), + Name: ServiceName(s.cluster.Name), Port: networkingv1.ServiceBackendPort{ Number: serverPort, }, diff --git a/pkg/controller/cluster/server/server.go b/pkg/controller/cluster/server/server.go index c8e382b..0e9c2ac 100644 --- a/pkg/controller/cluster/server/server.go +++ b/pkg/controller/cluster/server/server.go @@ -5,7 +5,7 @@ import ( "strings" "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" - "github.com/rancher/k3k/pkg/controller/util" + "github.com/rancher/k3k/pkg/controller" apps "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -17,12 +17,12 @@ import ( ) const ( - serverName = "k3k-" - k3kSystemNamespace = serverName + "system" - initServerName = serverName + "init-server" - initContainerName = serverName + "server-check" - initContainerImage = "alpine/curl" + k3kSystemNamespace = "k3k-system" + serverName = "server" + configName = "server-config" + initConfigName = "init-server-config" + ServerPort = 6443 EphermalNodesType = "ephermal" DynamicNodesType = "dynamic" ) @@ -40,7 +40,7 @@ func New(cluster *v1alpha1.Cluster, client client.Client) *Server { } } -func (s *Server) podSpec(ctx context.Context, image, name string, persistent bool, affinitySelector *metav1.LabelSelector) v1.PodSpec { +func (s *Server) podSpec(image, name string, persistent bool, affinitySelector *metav1.LabelSelector) v1.PodSpec { var limit v1.ResourceList if s.cluster.Spec.Limit != nil && s.cluster.Spec.Limit.ServerLimit != nil { limit = s.cluster.Spec.Limit.ServerLimit @@ -62,7 +62,7 @@ func (s *Server) podSpec(ctx context.Context, image, name string, persistent boo Name: "initconfig", VolumeSource: v1.VolumeSource{ Secret: &v1.SecretVolumeSource{ - SecretName: util.ServerInitConfigName(s.cluster), + SecretName: configSecretName(s.cluster.Name, true), Items: []v1.KeyToPath{ { Key: "config.yaml", @@ -76,7 +76,7 @@ func (s *Server) podSpec(ctx context.Context, image, name string, persistent boo Name: "config", VolumeSource: v1.VolumeSource{ Secret: &v1.SecretVolumeSource{ - SecretName: util.ServerConfigName(s.cluster), + SecretName: configSecretName(s.cluster.Name, false), Items: []v1.KeyToPath{ { Key: "config.yaml", @@ -220,18 +220,18 @@ func (s *Server) podSpec(ctx context.Context, image, name string, persistent boo return podSpec } -func (s *Server) StatefulServer(ctx context.Context, cluster *v1alpha1.Cluster) (*apps.StatefulSet, error) { +func (s *Server) StatefulServer(ctx context.Context) (*apps.StatefulSet, error) { var ( replicas int32 pvClaims []v1.PersistentVolumeClaim persistent bool ) - image := util.K3SImage(cluster) - name := serverName + "server" + image := controller.K3SImage(s.cluster) + name := controller.ObjectName(s.cluster.Name, nil, serverName) - replicas = *cluster.Spec.Servers + replicas = *s.cluster.Spec.Servers - if cluster.Spec.Persistence != nil && cluster.Spec.Persistence.Type != EphermalNodesType { + if s.cluster.Spec.Persistence != nil && s.cluster.Spec.Persistence.Type != EphermalNodesType { persistent = true pvClaims = []v1.PersistentVolumeClaim{ { @@ -241,14 +241,14 @@ func (s *Server) StatefulServer(ctx context.Context, cluster *v1alpha1.Cluster) }, ObjectMeta: metav1.ObjectMeta{ Name: "varlibrancherk3s", - Namespace: util.ClusterNamespace(cluster), + Namespace: s.cluster.Namespace, }, Spec: v1.PersistentVolumeClaimSpec{ AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, - StorageClassName: &cluster.Spec.Persistence.StorageClassName, + StorageClassName: &s.cluster.Spec.Persistence.StorageClassName, Resources: v1.VolumeResourceRequirements{ Requests: v1.ResourceList{ - "storage": resource.MustParse(cluster.Spec.Persistence.StorageRequestSize), + "storage": resource.MustParse(s.cluster.Spec.Persistence.StorageRequestSize), }, }, }, @@ -260,16 +260,16 @@ func (s *Server) StatefulServer(ctx context.Context, cluster *v1alpha1.Cluster) }, ObjectMeta: metav1.ObjectMeta{ Name: "varlibkubelet", - Namespace: util.ClusterNamespace(cluster), + Namespace: s.cluster.Namespace, }, Spec: v1.PersistentVolumeClaimSpec{ Resources: v1.VolumeResourceRequirements{ Requests: v1.ResourceList{ - "storage": resource.MustParse(cluster.Spec.Persistence.StorageRequestSize), + "storage": resource.MustParse(s.cluster.Spec.Persistence.StorageRequestSize), }, }, AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, - StorageClassName: &cluster.Spec.Persistence.StorageClassName, + StorageClassName: &s.cluster.Spec.Persistence.StorageClassName, }, }, } @@ -301,7 +301,7 @@ func (s *Server) StatefulServer(ctx context.Context, cluster *v1alpha1.Cluster) }, ObjectMeta: metav1.ObjectMeta{ Name: addons.Name, - Namespace: util.ClusterNamespace(s.cluster), + Namespace: s.cluster.Namespace, }, Data: make(map[string][]byte, len(addons.Data)), } @@ -335,12 +335,12 @@ func (s *Server) StatefulServer(ctx context.Context, cluster *v1alpha1.Cluster) selector := metav1.LabelSelector{ MatchLabels: map[string]string{ - "cluster": cluster.Name, + "cluster": s.cluster.Name, "role": "server", }, } - podSpec := s.podSpec(ctx, image, name, persistent, &selector) + podSpec := s.podSpec(image, name, persistent, &selector) podSpec.Volumes = append(podSpec.Volumes, volumes...) podSpec.Containers[0].VolumeMounts = append(podSpec.Containers[0].VolumeMounts, volumeMounts...) @@ -350,13 +350,13 @@ func (s *Server) StatefulServer(ctx context.Context, cluster *v1alpha1.Cluster) APIVersion: "apps/v1", }, ObjectMeta: metav1.ObjectMeta{ - Name: cluster.Name + "-" + name, - Namespace: util.ClusterNamespace(cluster), + Name: name, + Namespace: s.cluster.Namespace, Labels: selector.MatchLabels, }, Spec: apps.StatefulSetSpec{ Replicas: &replicas, - ServiceName: cluster.Name + "-" + name + "-headless", + ServiceName: headlessServiceName(s.cluster.Name), Selector: &selector, VolumeClaimTemplates: pvClaims, Template: v1.PodTemplateSpec{ diff --git a/pkg/controller/cluster/server/service.go b/pkg/controller/cluster/server/service.go index 1ecbf6c..5fe253e 100644 --- a/pkg/controller/cluster/server/service.go +++ b/pkg/controller/cluster/server/service.go @@ -2,7 +2,7 @@ package server import ( "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" - "github.com/rancher/k3k/pkg/controller/util" + "github.com/rancher/k3k/pkg/controller" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -23,8 +23,8 @@ func (s *Server) Service(cluster *v1alpha1.Cluster) *v1.Service { APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ - Name: util.ServerSvcName(cluster), - Namespace: util.ClusterNamespace(cluster), + Name: ServiceName(s.cluster.Name), + Namespace: cluster.Namespace, }, Spec: v1.ServiceSpec{ Type: serviceType, @@ -48,22 +48,21 @@ func (s *Server) Service(cluster *v1alpha1.Cluster) *v1.Service { } } -func (s *Server) StatefulServerService(cluster *v1alpha1.Cluster) *v1.Service { - name := serverName +func (s *Server) StatefulServerService() *v1.Service { return &v1.Service{ TypeMeta: metav1.TypeMeta{ Kind: "Service", APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ - Name: cluster.Name + "-" + name + "headless", - Namespace: util.ClusterNamespace(cluster), + Name: headlessServiceName(s.cluster.Name), + Namespace: s.cluster.Namespace, }, Spec: v1.ServiceSpec{ Type: v1.ServiceTypeClusterIP, ClusterIP: v1.ClusterIPNone, Selector: map[string]string{ - "cluster": cluster.Name, + "cluster": s.cluster.Name, "role": "server", }, Ports: []v1.ServicePort{ @@ -81,3 +80,18 @@ func (s *Server) StatefulServerService(cluster *v1alpha1.Cluster) *v1.Service { }, } } + +func ServiceName(clusterName string) string { + return controller.ObjectName(clusterName, &v1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + }, + }) +} + +func headlessServiceName(clusterName string) string { + return controller.ObjectName(clusterName, &v1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + }}, "-headless") +} diff --git a/pkg/controller/clusterset/clusterset.go b/pkg/controller/clusterset/clusterset.go index a1827b0..1e62239 100644 --- a/pkg/controller/clusterset/clusterset.go +++ b/pkg/controller/clusterset/clusterset.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" + k3kcontroller "github.com/rancher/k3k/pkg/controller" v1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -20,7 +21,6 @@ import ( const ( clusterSetController = "k3k-clusterset-controller" - networkPolicyName = "k3k-cluster-netpol" allTrafficCIDR = "0.0.0.0/0" maxConcurrentReconciles = 1 ) @@ -108,7 +108,7 @@ func netpol(ctx context.Context, clusterCIDR string, clusterSet *v1alpha1.Cluste } return &networkingv1.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{ - Name: networkPolicyName, + Name: k3kcontroller.ObjectName(clusterSet.Name, nil), Namespace: clusterSet.Namespace, }, TypeMeta: metav1.TypeMeta{ diff --git a/pkg/controller/clusterset/node.go b/pkg/controller/clusterset/node.go index 9cd23d7..fe2a7c9 100644 --- a/pkg/controller/clusterset/node.go +++ b/pkg/controller/clusterset/node.go @@ -4,7 +4,7 @@ import ( "context" "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" - "github.com/rancher/k3k/pkg/controller/util" + k3kcontroller "github.com/rancher/k3k/pkg/controller" v1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" "k8s.io/apimachinery/pkg/runtime" @@ -45,7 +45,7 @@ func AddNodeController(ctx context.Context, mgr manager.Manager) error { func (n *NodeReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { var clusterSetList v1alpha1.ClusterSetList if err := n.Client.List(ctx, &clusterSetList); err != nil { - return reconcile.Result{}, util.LogAndReturnErr("failed to list clusterSets", err) + return reconcile.Result{}, k3kcontroller.LogAndReturnErr("failed to list clusterSets", err) } if len(clusterSetList.Items) <= 0 { diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go new file mode 100644 index 0000000..8c5d537 --- /dev/null +++ b/pkg/controller/controller.go @@ -0,0 +1,106 @@ +package controller + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "strings" + "time" + + "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + namePrefix = "k3k" + k3SImageName = "rancher/k3s" + AdminCommonName = "system:admin" +) + +// Backoff is the cluster creation duration backoff +var Backoff = wait.Backoff{ + Steps: 5, + Duration: 5 * time.Second, + Factor: 2, + Jitter: 0.1, +} + +func K3SImage(cluster *v1alpha1.Cluster) string { + return k3SImageName + ":" + cluster.Spec.Version +} + +func LogAndReturnErr(errString string, err error) error { + klog.Errorf("%s: %v", errString, err) + return err +} + +func nodeAddress(node *v1.Node) string { + var externalIP string + var internalIP string + + for _, ip := range node.Status.Addresses { + if ip.Type == "ExternalIP" && ip.Address != "" { + externalIP = ip.Address + break + } + if ip.Type == "InternalIP" && ip.Address != "" { + internalIP = ip.Address + } + } + if externalIP != "" { + return externalIP + } + + return internalIP +} + +// return all the nodes external addresses, if not found then return internal addresses +func Addresses(ctx context.Context, client ctrlruntimeclient.Client) ([]string, error) { + var nodeList v1.NodeList + if err := client.List(ctx, &nodeList); err != nil { + return nil, err + } + + var addresses []string + for _, node := range nodeList.Items { + addresses = append(addresses, nodeAddress(&node)) + } + + return addresses, nil +} + +// ObjectName will create a concatenated name based on the object's kind name that is being sent +// along with a prefix and the cluster name as well. +func ObjectName(clusterName string, object ctrlruntimeclient.Object, any ...string) string { + names := []string{namePrefix} + if clusterName != "" { + names = append(names, clusterName) + } + var objectKind string + if object != nil { + objectKind = strings.ToLower(object.GetObjectKind().GroupVersionKind().Kind) + names = append(names, objectKind) + } + return SafeConcatName(append(names, any...)...) +} + +// safeConcatName concatenates the given strings and ensures the returned name is under 64 characters +// by cutting the string off at 57 characters and setting the last 6 with an encoded version of the concatenated string. +func SafeConcatName(name ...string) string { + fullPath := strings.Join(name, "-") + if len(fullPath) < 64 { + return fullPath + } + digest := sha256.Sum256([]byte(fullPath)) + // since we cut the string in the middle, the last char may not be compatible with what is expected in k8s + // we are checking and if necessary removing the last char + c := fullPath[56] + if 'a' <= c && c <= 'z' || '0' <= c && c <= '9' { + return fullPath[0:57] + "-" + hex.EncodeToString(digest[0:])[0:5] + } + + return fullPath[0:56] + "-" + hex.EncodeToString(digest[0:])[0:6] +} diff --git a/pkg/controller/kubeconfig/kubeconfig.go b/pkg/controller/kubeconfig/kubeconfig.go index 2b8de59..0347cdd 100644 --- a/pkg/controller/kubeconfig/kubeconfig.go +++ b/pkg/controller/kubeconfig/kubeconfig.go @@ -10,8 +10,9 @@ import ( certutil "github.com/rancher/dynamiclistener/cert" "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" + "github.com/rancher/k3k/pkg/controller" + "github.com/rancher/k3k/pkg/controller/cluster/server" "github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap" - "github.com/rancher/k3k/pkg/controller/util" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/clientcmd" @@ -28,8 +29,8 @@ type KubeConfig struct { func (k *KubeConfig) Extract(ctx context.Context, client client.Client, cluster *v1alpha1.Cluster, hostServerIP string) ([]byte, error) { nn := types.NamespacedName{ - Name: cluster.Name + "-bootstrap", - Namespace: util.ClusterNamespace(cluster), + Name: controller.ObjectName(cluster.Name, nil, "bootstrap"), + Namespace: cluster.Namespace, } var bootstrapSecret v1.Secret @@ -57,8 +58,8 @@ func (k *KubeConfig) Extract(ctx context.Context, client client.Client, cluster } // get the server service to extract the right IP nn = types.NamespacedName{ - Name: util.ServerSvcName(cluster), - Namespace: util.ClusterNamespace(cluster), + Name: server.ServiceName(cluster.Name), + Namespace: cluster.Namespace, } var k3kService v1.Service @@ -66,7 +67,7 @@ func (k *KubeConfig) Extract(ctx context.Context, client client.Client, cluster return nil, err } - url := fmt.Sprintf("https://%s:%d", k3kService.Spec.ClusterIP, util.ServerPort) + url := fmt.Sprintf("https://%s:%d", k3kService.Spec.ClusterIP, server.ServerPort) if k3kService.Spec.Type == v1.ServiceTypeNodePort { nodePort := k3kService.Spec.Ports[0].NodePort url = fmt.Sprintf("https://%s:%d", hostServerIP, nodePort) diff --git a/pkg/controller/util/util.go b/pkg/controller/util/util.go deleted file mode 100644 index 4c86651..0000000 --- a/pkg/controller/util/util.go +++ /dev/null @@ -1,87 +0,0 @@ -package util - -import ( - "context" - "fmt" - - "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" - v1 "k8s.io/api/core/v1" - "k8s.io/klog" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -const ( - namespacePrefix = "k3k-" - k3SImageName = "rancher/k3s" - - AdminCommonName = "system:admin" - ServerPort = 6443 -) - -const ( - K3kSystemNamespace = namespacePrefix + "system" -) - -func ClusterNamespace(cluster *v1alpha1.Cluster) string { - return cluster.Namespace -} - -func ServerSvcName(cluster *v1alpha1.Cluster) string { - return fmt.Sprintf("k3k-%s-service", cluster.Name) -} - -func ServerConfigName(cluster *v1alpha1.Cluster) string { - return fmt.Sprintf("k3k-%s-server-config", cluster.Name) -} - -func ServerInitConfigName(cluster *v1alpha1.Cluster) string { - return fmt.Sprintf("k3k-init-%s-server-config", cluster.Name) -} - -func AgentConfigName(cluster *v1alpha1.Cluster) string { - return fmt.Sprintf("k3k-%s-agent-config", cluster.Name) -} - -func K3SImage(cluster *v1alpha1.Cluster) string { - return k3SImageName + ":" + cluster.Spec.Version -} - -func LogAndReturnErr(errString string, err error) error { - klog.Errorf("%s: %v", errString, err) - return err -} - -func nodeAddress(node *v1.Node) string { - var externalIP string - var internalIP string - - for _, ip := range node.Status.Addresses { - if ip.Type == "ExternalIP" && ip.Address != "" { - externalIP = ip.Address - break - } - if ip.Type == "InternalIP" && ip.Address != "" { - internalIP = ip.Address - } - } - if externalIP != "" { - return externalIP - } - - return internalIP -} - -// return all the nodes external addresses, if not found then return internal addresses -func Addresses(ctx context.Context, client client.Client) ([]string, error) { - var nodeList v1.NodeList - if err := client.List(ctx, &nodeList); err != nil { - return nil, err - } - - var addresses []string - for _, node := range nodeList.Items { - addresses = append(addresses, nodeAddress(&node)) - } - - return addresses, nil -} From 7fc047777b2fac5f298fefec8d4a654662ca4d0b Mon Sep 17 00:00:00 2001 From: galal-hussein Date: Fri, 18 Oct 2024 21:50:41 +0300 Subject: [PATCH 10/15] Fix comments Signed-off-by: galal-hussein --- go.mod | 4 ++-- k3k-kubelet/kubelet.go | 20 ++++++---------- k3k-kubelet/main.go | 32 +++++++++++++------------- main.go | 1 - pkg/controller/cluster/agent/agent.go | 3 +-- pkg/controller/cluster/agent/shared.go | 4 +++- 6 files changed, 29 insertions(+), 35 deletions(-) diff --git a/go.mod b/go.mod index 64204ef..b0ebaf5 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,7 @@ require ( go.etcd.io/etcd/api/v3 v3.5.14 go.etcd.io/etcd/client/v3 v3.5.14 go.uber.org/zap v1.26.0 - gopkg.in/yaml.v3 v3.0.1 + gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.31.1 k8s.io/apimachinery v0.31.1 k8s.io/apiserver v0.31.0 @@ -120,7 +120,7 @@ require ( google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/apiextensions-apiserver v0.29.2 // indirect k8s.io/component-base v0.29.2 // indirect k8s.io/kms v0.29.2 // indirect diff --git a/k3k-kubelet/kubelet.go b/k3k-kubelet/kubelet.go index 884d2a9..100e71b 100644 --- a/k3k-kubelet/kubelet.go +++ b/k3k-kubelet/kubelet.go @@ -96,7 +96,7 @@ func (k *kubelet) Start(ctx context.Context) { logger, err := zap.NewProduction() if err != nil { fmt.Printf("unable to create logger: %s", err.Error()) - os.Exit(-1) + os.Exit(1) } wrapped := LogWrapper{ *logger.Sugar(), @@ -105,19 +105,19 @@ func (k *kubelet) Start(ctx context.Context) { err = k.node.Run(ctx) if err != nil { fmt.Printf("node errored when running: %s \n", err.Error()) - os.Exit(-1) + os.Exit(1) } }() if err := k.node.WaitReady(context.Background(), time.Minute*1); err != nil { fmt.Printf("node was not ready within timeout of 1 minute: %s \n", err.Error()) - os.Exit(-1) + os.Exit(1) } <-k.node.Done() if err := k.node.Err(); err != nil { fmt.Printf("node stopped with an error: %s \n", err.Error()) - os.Exit(-1) + os.Exit(1) } - fmt.Printf("node exited without an error") + fmt.Print("node exited without an error") } func (k *kubelet) newProviderFunc(namespace, name, hostname string) nodeutil.NewProviderFunc { @@ -138,8 +138,7 @@ func (k *kubelet) nodeOpts(srvPort, namespace, name, hostname string) nodeutil.N c.HTTPListenAddr = fmt.Sprintf(":%s", srvPort) // set up the routes mux := http.NewServeMux() - err := nodeutil.AttachProviderRoutes(mux)(c) - if err != nil { + if err := nodeutil.AttachProviderRoutes(mux)(c); err != nil { return fmt.Errorf("unable to attach routes: %w", err) } c.Handler = mux @@ -212,12 +211,7 @@ func kubeconfigBytes(url string, serverCA, clientCert, clientKey []byte) ([]byte config.Contexts["default"] = context config.CurrentContext = "default" - kubeconfig, err := clientcmd.Write(*config) - if err != nil { - return nil, err - } - - return kubeconfig, nil + return clientcmd.Write(*config) } func loadTLSConfig(ctx context.Context, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, nodeName, hostname string) (*tls.Config, error) { diff --git a/k3k-kubelet/main.go b/k3k-kubelet/main.go index a6ddf84..d03a31b 100644 --- a/k3k-kubelet/main.go +++ b/k3k-kubelet/main.go @@ -11,7 +11,7 @@ import ( var ( configFile string - c config + cfg config ) func main() { @@ -22,44 +22,44 @@ func main() { cli.StringFlag{ Name: "cluster-name", Usage: "Name of the k3k cluster", - Destination: &c.ClusterName, + Destination: &cfg.ClusterName, EnvVar: "CLUSTER_NAME", }, cli.StringFlag{ Name: "cluster-namespace", Usage: "Namespace of the k3k cluster", - Destination: &c.ClusterNamespace, + Destination: &cfg.ClusterNamespace, EnvVar: "CLUSTER_NAMESPACE", }, cli.StringFlag{ Name: "cluster-token", Usage: "K3S token of the k3k cluster", - Destination: &c.Token, + Destination: &cfg.Token, EnvVar: "CLUSTER_TOKEN", }, cli.StringFlag{ Name: "host-config-path", Usage: "Path to the host kubeconfig, if empty then virtual-kubelet will use incluster config", - Destination: &c.HostConfigPath, + Destination: &cfg.HostConfigPath, EnvVar: "HOST_KUBECONFIG", }, cli.StringFlag{ Name: "virtual-config-path", Usage: "Path to the k3k cluster kubeconfig, if empty then virtual-kubelet will create its own config from k3k cluster", - Destination: &c.VirtualConfigPath, + Destination: &cfg.VirtualConfigPath, EnvVar: "CLUSTER_NAME", }, cli.StringFlag{ Name: "kubelet-port", Usage: "kubelet API port number", - Destination: &c.KubeletPort, + Destination: &cfg.KubeletPort, EnvVar: "SERVER_PORT", Value: "9443", }, cli.StringFlag{ Name: "agent-hostname", Usage: "Agent Hostname used for TLS SAN for the kubelet server", - Destination: &c.AgentHostname, + Destination: &cfg.AgentHostname, EnvVar: "AGENT_HOSTNAME", }, cli.StringFlag{ @@ -77,24 +77,24 @@ func main() { } func Run(clx *cli.Context) { - if err := c.Parse(configFile); err != nil { + if err := cfg.Parse(configFile); err != nil { fmt.Printf("failed to parse config file %s: %v", configFile, err) - os.Exit(-1) + os.Exit(1) } - if err := c.Validate(); err != nil { + if err := cfg.Validate(); err != nil { fmt.Printf("failed to validate config: %v", err) - os.Exit(-1) + os.Exit(1) } - k, err := newKubelet(&c) + k, err := newKubelet(&cfg) if err != nil { fmt.Printf("failed to create new virtual kubelet instance: %v", err) - os.Exit(-1) + os.Exit(1) } - if err := k.RegisterNode(c.KubeletPort, c.ClusterNamespace, c.ClusterName, c.AgentHostname); err != nil { + if err := k.RegisterNode(cfg.KubeletPort, cfg.ClusterNamespace, cfg.ClusterName, cfg.AgentHostname); err != nil { fmt.Printf("failed to register new node: %v", err) - os.Exit(-1) + os.Exit(1) } k.Start(context.Background()) diff --git a/main.go b/main.go index 8388440..6b23ec0 100644 --- a/main.go +++ b/main.go @@ -16,7 +16,6 @@ import ( "k8s.io/client-go/tools/clientcmd" "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/manager" ) diff --git a/pkg/controller/cluster/agent/agent.go b/pkg/controller/cluster/agent/agent.go index 95d5771..676c372 100644 --- a/pkg/controller/cluster/agent/agent.go +++ b/pkg/controller/cluster/agent/agent.go @@ -20,9 +20,8 @@ type Agent interface { func New(cluster *v1alpha1.Cluster, serviceIP, sharedAgentImage string) Agent { if cluster.Spec.Mode == VirtualNodeMode { return NewVirtualAgent(cluster, serviceIP) - } else { - return NewSharedAgent(cluster, serviceIP, sharedAgentImage) } + return NewSharedAgent(cluster, serviceIP, sharedAgentImage) } func configSecretName(clusterName string) string { diff --git a/pkg/controller/cluster/agent/shared.go b/pkg/controller/cluster/agent/shared.go index b6e7251..c068856 100644 --- a/pkg/controller/cluster/agent/shared.go +++ b/pkg/controller/cluster/agent/shared.go @@ -51,11 +51,13 @@ func (s *SharedAgent) Config() (ctrlruntimeclient.Object, error) { } func sharedAgentData(cluster *v1alpha1.Cluster) string { + nodeName := cluster.Name + "-" + "k3k-kubelet" return fmt.Sprintf(`clusterName: %s clusterNamespace: %s nodeName: %s agentHostname: %s -token: %s`, cluster.Name, cluster.Namespace, cluster.Name+"-"+"k3k-kubelet", cluster.Name+"-"+"k3k-kubelet", cluster.Spec.Token) +token: %s`, + cluster.Name, cluster.Namespace, nodeName, nodeName, cluster.Spec.Token) } func (s *SharedAgent) Resources() ([]ctrlruntimeclient.Object, error) { From 1750c5273cee2faf68b94363940b9f53b59a133c Mon Sep 17 00:00:00 2001 From: galal-hussein Date: Sat, 19 Oct 2024 00:44:37 +0300 Subject: [PATCH 11/15] more fixes to naming Signed-off-by: galal-hussein --- k3k-kubelet/kubelet.go | 2 +- pkg/controller/cluster/agent/agent.go | 2 +- pkg/controller/cluster/agent/shared.go | 2 +- pkg/controller/cluster/agent/virtual.go | 2 +- .../cluster/server/bootstrap/bootstrap.go | 2 +- pkg/controller/cluster/server/config.go | 4 ++-- pkg/controller/cluster/server/ingress.go | 6 +----- pkg/controller/cluster/server/server.go | 2 +- pkg/controller/cluster/server/service.go | 11 ++--------- pkg/controller/clusterset/clusterset.go | 2 +- pkg/controller/controller.go | 18 ++++-------------- pkg/controller/kubeconfig/kubeconfig.go | 2 +- 12 files changed, 17 insertions(+), 38 deletions(-) diff --git a/k3k-kubelet/kubelet.go b/k3k-kubelet/kubelet.go index 100e71b..3b98846 100644 --- a/k3k-kubelet/kubelet.go +++ b/k3k-kubelet/kubelet.go @@ -166,7 +166,7 @@ func virtRestConfig(ctx context.Context, VirtualConfigPath string, hostClient ct endpoint := fmt.Sprintf("%s.%s", server.ServiceName(cluster.Name), cluster.Namespace) var b *bootstrap.ControlRuntimeBootstrap if err := retry.OnError(controller.Backoff, func(err error) bool { - return err == nil + return err != nil }, func() error { var err error b, err = bootstrap.DecodedBootstrap(cluster.Spec.Token, endpoint) diff --git a/pkg/controller/cluster/agent/agent.go b/pkg/controller/cluster/agent/agent.go index 676c372..dfc90e3 100644 --- a/pkg/controller/cluster/agent/agent.go +++ b/pkg/controller/cluster/agent/agent.go @@ -25,5 +25,5 @@ func New(cluster *v1alpha1.Cluster, serviceIP, sharedAgentImage string) Agent { } func configSecretName(clusterName string) string { - return controller.ObjectName(clusterName, nil, configName) + return controller.SafeConcatNameWithPrefix(clusterName, configName) } diff --git a/pkg/controller/cluster/agent/shared.go b/pkg/controller/cluster/agent/shared.go index c068856..f3f9d01 100644 --- a/pkg/controller/cluster/agent/shared.go +++ b/pkg/controller/cluster/agent/shared.go @@ -245,5 +245,5 @@ func (s *SharedAgent) roleBinding() *rbacv1.RoleBinding { } func (s *SharedAgent) Name() string { - return controller.ObjectName(s.cluster.Name, nil, sharedNodeAgentName) + return controller.SafeConcatNameWithPrefix(s.cluster.Name, sharedNodeAgentName) } diff --git a/pkg/controller/cluster/agent/virtual.go b/pkg/controller/cluster/agent/virtual.go index 28dd395..994ce63 100644 --- a/pkg/controller/cluster/agent/virtual.go +++ b/pkg/controller/cluster/agent/virtual.go @@ -219,5 +219,5 @@ func (v *VirtualAgent) podSpec(image, name string, args []string, affinitySelect } func (v *VirtualAgent) Name() string { - return controller.ObjectName(v.cluster.Name, nil, virtualNodeAgentName) + return controller.SafeConcatNameWithPrefix(v.cluster.Name, virtualNodeAgentName) } diff --git a/pkg/controller/cluster/server/bootstrap/bootstrap.go b/pkg/controller/cluster/server/bootstrap/bootstrap.go index 41f2285..9e94dc0 100644 --- a/pkg/controller/cluster/server/bootstrap/bootstrap.go +++ b/pkg/controller/cluster/server/bootstrap/bootstrap.go @@ -60,7 +60,7 @@ func Generate(ctx context.Context, cluster *v1alpha1.Cluster, ip string) (*v1.Se APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ - Name: controller.ObjectName(cluster.Name, nil, "bootstrap"), + Name: controller.SafeConcatNameWithPrefix(cluster.Name, "bootstrap"), Namespace: cluster.Namespace, OwnerReferences: []metav1.OwnerReference{ { diff --git a/pkg/controller/cluster/server/config.go b/pkg/controller/cluster/server/config.go index 5e0bf87..b5203c4 100644 --- a/pkg/controller/cluster/server/config.go +++ b/pkg/controller/cluster/server/config.go @@ -77,7 +77,7 @@ func serverOptions(cluster *v1alpha1.Cluster) string { func configSecretName(clusterName string, init bool) string { if !init { - return controller.ObjectName(clusterName, nil, configName) + return controller.SafeConcatNameWithPrefix(clusterName, configName) } - return controller.ObjectName(clusterName, nil, initConfigName) + return controller.SafeConcatNameWithPrefix(clusterName, initConfigName) } diff --git a/pkg/controller/cluster/server/ingress.go b/pkg/controller/cluster/server/ingress.go index 4c7b9fb..c29de26 100644 --- a/pkg/controller/cluster/server/ingress.go +++ b/pkg/controller/cluster/server/ingress.go @@ -32,11 +32,7 @@ func (s *Server) Ingress(ctx context.Context, client client.Client) (*networking APIVersion: "networking.k8s.io/v1", }, ObjectMeta: metav1.ObjectMeta{ - Name: controller.ObjectName(s.cluster.Name, &networkingv1.Ingress{ - TypeMeta: metav1.TypeMeta{ - Kind: "Ingress", - }, - }), + Name: controller.SafeConcatNameWithPrefix(s.cluster.Name, "ingress"), Namespace: s.cluster.Namespace, }, Spec: networkingv1.IngressSpec{ diff --git a/pkg/controller/cluster/server/server.go b/pkg/controller/cluster/server/server.go index 0e9c2ac..72470bd 100644 --- a/pkg/controller/cluster/server/server.go +++ b/pkg/controller/cluster/server/server.go @@ -227,7 +227,7 @@ func (s *Server) StatefulServer(ctx context.Context) (*apps.StatefulSet, error) persistent bool ) image := controller.K3SImage(s.cluster) - name := controller.ObjectName(s.cluster.Name, nil, serverName) + name := controller.SafeConcatNameWithPrefix(s.cluster.Name, serverName) replicas = *s.cluster.Spec.Servers diff --git a/pkg/controller/cluster/server/service.go b/pkg/controller/cluster/server/service.go index 5fe253e..98a0560 100644 --- a/pkg/controller/cluster/server/service.go +++ b/pkg/controller/cluster/server/service.go @@ -82,16 +82,9 @@ func (s *Server) StatefulServerService() *v1.Service { } func ServiceName(clusterName string) string { - return controller.ObjectName(clusterName, &v1.Service{ - TypeMeta: metav1.TypeMeta{ - Kind: "Service", - }, - }) + return controller.SafeConcatNameWithPrefix(clusterName, "service") } func headlessServiceName(clusterName string) string { - return controller.ObjectName(clusterName, &v1.Service{ - TypeMeta: metav1.TypeMeta{ - Kind: "Service", - }}, "-headless") + return controller.SafeConcatNameWithPrefix(clusterName, "service", "headless") } diff --git a/pkg/controller/clusterset/clusterset.go b/pkg/controller/clusterset/clusterset.go index 1e62239..ea81e2a 100644 --- a/pkg/controller/clusterset/clusterset.go +++ b/pkg/controller/clusterset/clusterset.go @@ -108,7 +108,7 @@ func netpol(ctx context.Context, clusterCIDR string, clusterSet *v1alpha1.Cluste } return &networkingv1.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{ - Name: k3kcontroller.ObjectName(clusterSet.Name, nil), + Name: k3kcontroller.SafeConcatNameWithPrefix(clusterSet.Name), Namespace: clusterSet.Namespace, }, TypeMeta: metav1.TypeMeta{ diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index 8c5d537..e95d4bc 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -72,22 +72,12 @@ func Addresses(ctx context.Context, client ctrlruntimeclient.Client) ([]string, return addresses, nil } -// ObjectName will create a concatenated name based on the object's kind name that is being sent -// along with a prefix and the cluster name as well. -func ObjectName(clusterName string, object ctrlruntimeclient.Object, any ...string) string { - names := []string{namePrefix} - if clusterName != "" { - names = append(names, clusterName) - } - var objectKind string - if object != nil { - objectKind = strings.ToLower(object.GetObjectKind().GroupVersionKind().Kind) - names = append(names, objectKind) - } - return SafeConcatName(append(names, any...)...) +// SafeConcatNameWithPrefix runs the SafeConcatName with extra prefix. +func SafeConcatNameWithPrefix(name ...string) string { + return SafeConcatName(append([]string{namePrefix}, name...)...) } -// safeConcatName concatenates the given strings and ensures the returned name is under 64 characters +// SafeConcatName concatenates the given strings and ensures the returned name is under 64 characters // by cutting the string off at 57 characters and setting the last 6 with an encoded version of the concatenated string. func SafeConcatName(name ...string) string { fullPath := strings.Join(name, "-") diff --git a/pkg/controller/kubeconfig/kubeconfig.go b/pkg/controller/kubeconfig/kubeconfig.go index 0347cdd..787c797 100644 --- a/pkg/controller/kubeconfig/kubeconfig.go +++ b/pkg/controller/kubeconfig/kubeconfig.go @@ -29,7 +29,7 @@ type KubeConfig struct { func (k *KubeConfig) Extract(ctx context.Context, client client.Client, cluster *v1alpha1.Cluster, hostServerIP string) ([]byte, error) { nn := types.NamespacedName{ - Name: controller.ObjectName(cluster.Name, nil, "bootstrap"), + Name: controller.SafeConcatNameWithPrefix(cluster.Name, "bootstrap"), Namespace: cluster.Namespace, } From 629c0c5e6193785342f87e5d23618d7c5394bf98 Mon Sep 17 00:00:00 2001 From: galal-hussein Date: Sat, 19 Oct 2024 05:10:17 +0300 Subject: [PATCH 12/15] fixes Signed-off-by: galal-hussein --- k3k-kubelet/config.go | 50 +++++++++++++-------------- k3k-kubelet/kubelet.go | 47 ++++++++++++------------- k3k-kubelet/main.go | 15 ++++---- pkg/controller/cluster/agent/agent.go | 1 - pkg/controller/controller.go | 2 +- 5 files changed, 57 insertions(+), 58 deletions(-) diff --git a/k3k-kubelet/config.go b/k3k-kubelet/config.go index c4beba1..7dd9a53 100644 --- a/k3k-kubelet/config.go +++ b/k3k-kubelet/config.go @@ -7,7 +7,7 @@ import ( "gopkg.in/yaml.v2" ) -// Config has all virtual-kubelet startup options +// config has all virtual-kubelet startup options type config struct { ClusterName string `yaml:"clusterName,omitempty"` ClusterNamespace string `yaml:"clusterNamespace,omitempty"` @@ -19,58 +19,58 @@ type config struct { KubeletPort string `yaml:"kubeletPort,omitempty"` } -func (t *config) unmarshalYAML(data []byte) error { - var c config +func (c *config) unmarshalYAML(data []byte) error { + var conf config - if err := yaml.Unmarshal(data, &c); err != nil { + if err := yaml.Unmarshal(data, &conf); err != nil { return err } - if t.ClusterName == "" { - t.ClusterName = c.ClusterName + if c.ClusterName == "" { + c.ClusterName = conf.ClusterName } - if t.ClusterNamespace == "" { - t.ClusterNamespace = c.ClusterNamespace + if c.ClusterNamespace == "" { + c.ClusterNamespace = conf.ClusterNamespace } - if t.HostConfigPath == "" { - t.HostConfigPath = c.HostConfigPath + if c.HostConfigPath == "" { + c.HostConfigPath = conf.HostConfigPath } - if t.VirtualConfigPath == "" { - t.VirtualConfigPath = c.VirtualConfigPath + if c.VirtualConfigPath == "" { + c.VirtualConfigPath = conf.VirtualConfigPath } - if t.KubeletPort == "" { - t.KubeletPort = c.KubeletPort + if c.KubeletPort == "" { + c.KubeletPort = conf.KubeletPort } - if t.AgentHostname == "" { - t.AgentHostname = c.AgentHostname + if c.AgentHostname == "" { + c.AgentHostname = conf.AgentHostname } - if t.NodeName == "" { - t.NodeName = c.NodeName + if c.NodeName == "" { + c.NodeName = conf.NodeName } return nil } -func (t *config) Validate() error { - if t.ClusterName == "" { +func (c *config) validate() error { + if c.ClusterName == "" { return errors.New("cluster name is not provided") } - if t.ClusterNamespace == "" { + if c.ClusterNamespace == "" { return errors.New("cluster namespace is not provided") } - if t.AgentHostname == "" { + if c.AgentHostname == "" { return errors.New("agent Hostname is not provided") } return nil } -func (t *config) Parse(path string) error { +func (c *config) parse(path string) error { if _, err := os.Stat(path); os.IsNotExist(err) { return nil } - configFileBytes, err := os.ReadFile(path) + b, err := os.ReadFile(path) if err != nil { return err } - return t.unmarshalYAML(configFileBytes) + return c.unmarshalYAML(b) } diff --git a/k3k-kubelet/kubelet.go b/k3k-kubelet/kubelet.go index 3b98846..8cebf3c 100644 --- a/k3k-kubelet/kubelet.go +++ b/k3k-kubelet/kubelet.go @@ -32,11 +32,11 @@ import ( ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" ) -var Scheme = runtime.NewScheme() +var scheme = runtime.NewScheme() func init() { - _ = clientgoscheme.AddToScheme(Scheme) - _ = v1alpha1.AddToScheme(Scheme) + _ = clientgoscheme.AddToScheme(scheme) + _ = v1alpha1.AddToScheme(scheme) } type kubelet struct { @@ -48,20 +48,20 @@ type kubelet struct { node *nodeutil.Node } -func newKubelet(c *config) (*kubelet, error) { +func newKubelet(ctx context.Context, c *config) (*kubelet, error) { hostConfig, err := clientcmd.BuildConfigFromFlags("", c.HostConfigPath) if err != nil { return nil, err } hostClient, err := ctrlruntimeclient.New(hostConfig, ctrlruntimeclient.Options{ - Scheme: Scheme, + Scheme: scheme, }) if err != nil { return nil, err } - virtConfig, err := virtRestConfig(context.Background(), c.VirtualConfigPath, hostClient, c.ClusterName, c.ClusterNamespace) + virtConfig, err := virtRestConfig(ctx, c.VirtualConfigPath, hostClient, c.ClusterName, c.ClusterNamespace) if err != nil { return nil, err } @@ -78,9 +78,9 @@ func newKubelet(c *config) (*kubelet, error) { }, nil } -func (k *kubelet) RegisterNode(srvPort, namespace, name, hostname string) error { +func (k *kubelet) registerNode(ctx context.Context, srvPort, namespace, name, hostname string) error { providerFunc := k.newProviderFunc(namespace, name, hostname) - nodeOpts := k.nodeOpts(srvPort, namespace, name, hostname) + nodeOpts := k.nodeOpts(ctx, srvPort, namespace, name, hostname) var err error k.node, err = nodeutil.NewNode(k.name, providerFunc, nodeutil.WithClient(k.virtClient), nodeOpts) @@ -90,34 +90,32 @@ func (k *kubelet) RegisterNode(srvPort, namespace, name, hostname string) error return nil } -func (k *kubelet) Start(ctx context.Context) { +func (k *kubelet) start(ctx context.Context) { go func() { - ctx := context.Background() logger, err := zap.NewProduction() if err != nil { - fmt.Printf("unable to create logger: %s", err.Error()) + fmt.Println("unable to create logger:", err.Error()) os.Exit(1) } wrapped := LogWrapper{ *logger.Sugar(), } ctx = log.WithLogger(ctx, &wrapped) - err = k.node.Run(ctx) - if err != nil { + if err := k.node.Run(ctx); err != nil { fmt.Printf("node errored when running: %s \n", err.Error()) os.Exit(1) } }() if err := k.node.WaitReady(context.Background(), time.Minute*1); err != nil { - fmt.Printf("node was not ready within timeout of 1 minute: %s \n", err.Error()) + fmt.Println("node was not ready within timeout of 1 minute:", err.Error()) os.Exit(1) } <-k.node.Done() if err := k.node.Err(); err != nil { - fmt.Printf("node stopped with an error: %s \n", err.Error()) + fmt.Println("node stopped with an error:", err.Error()) os.Exit(1) } - fmt.Print("node exited without an error") + fmt.Println("node exited without an error") } func (k *kubelet) newProviderFunc(namespace, name, hostname string) nodeutil.NewProviderFunc { @@ -133,7 +131,7 @@ func (k *kubelet) newProviderFunc(namespace, name, hostname string) nodeutil.New } } -func (k *kubelet) nodeOpts(srvPort, namespace, name, hostname string) nodeutil.NodeOpt { +func (k *kubelet) nodeOpts(ctx context.Context, srvPort, namespace, name, hostname string) nodeutil.NodeOpt { return func(c *nodeutil.NodeConfig) error { c.HTTPListenAddr = fmt.Sprintf(":%s", srvPort) // set up the routes @@ -143,8 +141,6 @@ func (k *kubelet) nodeOpts(srvPort, namespace, name, hostname string) nodeutil.N } c.Handler = mux - ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) - defer cancel() tlsConfig, err := loadTLSConfig(ctx, k.hostClient, name, namespace, k.name, hostname) if err != nil { return fmt.Errorf("unable to get tls config: %w", err) @@ -154,16 +150,16 @@ func (k *kubelet) nodeOpts(srvPort, namespace, name, hostname string) nodeutil.N } } -func virtRestConfig(ctx context.Context, VirtualConfigPath string, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace string) (*rest.Config, error) { - if VirtualConfigPath != "" { - return clientcmd.BuildConfigFromFlags("", VirtualConfigPath) +func virtRestConfig(ctx context.Context, virtualConfigPath string, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace string) (*rest.Config, error) { + if virtualConfigPath != "" { + return clientcmd.BuildConfigFromFlags("", virtualConfigPath) } // virtual kubeconfig file is empty, trying to fetch the k3k cluster kubeconfig var cluster v1alpha1.Cluster if err := hostClient.Get(ctx, types.NamespacedName{Namespace: clusterNamespace, Name: clusterName}, &cluster); err != nil { return nil, err } - endpoint := fmt.Sprintf("%s.%s", server.ServiceName(cluster.Name), cluster.Namespace) + endpoint := server.ServiceName(cluster.Name) + "." + cluster.Namespace var b *bootstrap.ControlRuntimeBootstrap if err := retry.OnError(controller.Backoff, func(err error) bool { return err != nil @@ -246,7 +242,10 @@ func loadTLSConfig(ctx context.Context, hostClient ctrlruntimeclient.Client, clu // create rootCA CertPool certs, err := certutil.ParseCertsPEM([]byte(b.ServerCA.Content)) if err != nil { - return nil, fmt.Errorf("unable to create certs: %w", err) + return nil, fmt.Errorf("unable to create ca certs: %w", err) + } + if len(certs) < 1 { + return nil, fmt.Errorf("ca cert is not parsed correctly") } pool := x509.NewCertPool() pool.AddCert(certs[0]) diff --git a/k3k-kubelet/main.go b/k3k-kubelet/main.go index d03a31b..f653344 100644 --- a/k3k-kubelet/main.go +++ b/k3k-kubelet/main.go @@ -70,32 +70,33 @@ func main() { Value: "/etc/rancher/k3k/config.yaml", }, } - app.Action = Run + app.Action = run if err := app.Run(os.Args); err != nil { logrus.Fatal(err) } } -func Run(clx *cli.Context) { - if err := cfg.Parse(configFile); err != nil { +func run(clx *cli.Context) { + if err := cfg.parse(configFile); err != nil { fmt.Printf("failed to parse config file %s: %v", configFile, err) os.Exit(1) } - if err := cfg.Validate(); err != nil { + if err := cfg.validate(); err != nil { fmt.Printf("failed to validate config: %v", err) os.Exit(1) } - k, err := newKubelet(&cfg) + ctx := context.Background() + k, err := newKubelet(ctx, &cfg) if err != nil { fmt.Printf("failed to create new virtual kubelet instance: %v", err) os.Exit(1) } - if err := k.RegisterNode(cfg.KubeletPort, cfg.ClusterNamespace, cfg.ClusterName, cfg.AgentHostname); err != nil { + if err := k.registerNode(ctx, cfg.KubeletPort, cfg.ClusterNamespace, cfg.ClusterName, cfg.AgentHostname); err != nil { fmt.Printf("failed to register new node: %v", err) os.Exit(1) } - k.Start(context.Background()) + k.start(ctx) } diff --git a/pkg/controller/cluster/agent/agent.go b/pkg/controller/cluster/agent/agent.go index dfc90e3..262ce18 100644 --- a/pkg/controller/cluster/agent/agent.go +++ b/pkg/controller/cluster/agent/agent.go @@ -3,7 +3,6 @@ package agent import ( "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" "github.com/rancher/k3k/pkg/controller" - ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" ) diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index e95d4bc..29a6d26 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -64,7 +64,7 @@ func Addresses(ctx context.Context, client ctrlruntimeclient.Client) ([]string, return nil, err } - var addresses []string + addresses := make([]string, len(nodeList.Items)) for _, node := range nodeList.Items { addresses = append(addresses, nodeAddress(&node)) } From 36b12ddcf035552c66b0bae06c42b1990c6742a1 Mon Sep 17 00:00:00 2001 From: galal-hussein Date: Sat, 19 Oct 2024 05:13:34 +0300 Subject: [PATCH 13/15] fixes Signed-off-by: galal-hussein --- k3k-kubelet/provider/provider.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/k3k-kubelet/provider/provider.go b/k3k-kubelet/provider/provider.go index e8fbf86..3ee22bd 100644 --- a/k3k-kubelet/provider/provider.go +++ b/k3k-kubelet/provider/provider.go @@ -310,6 +310,6 @@ func (p *Provider) translateFrom(hostPod *corev1.Pod) *corev1.Pod { return virtualPod } -func (p *Provider) hostName(virtualNamespace string, virtualName string) string { +func (p *Provider) hostName(virtualNamespace, virtualName string) string { return controller.SafeConcatName(p.ClusterName, p.ClusterNamespace, virtualNamespace, virtualName) } From 567be344b1133bd065aa64838206b3f1c3f2be9b Mon Sep 17 00:00:00 2001 From: galal-hussein Date: Sat, 19 Oct 2024 05:14:27 +0300 Subject: [PATCH 14/15] fixes Signed-off-by: galal-hussein --- k3k-kubelet/kubelet.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/k3k-kubelet/kubelet.go b/k3k-kubelet/kubelet.go index 8cebf3c..6a04b3e 100644 --- a/k3k-kubelet/kubelet.go +++ b/k3k-kubelet/kubelet.go @@ -102,7 +102,7 @@ func (k *kubelet) start(ctx context.Context) { } ctx = log.WithLogger(ctx, &wrapped) if err := k.node.Run(ctx); err != nil { - fmt.Printf("node errored when running: %s \n", err.Error()) + fmt.Println("node errored when running:", err.Error()) os.Exit(1) } }() From 35126170ab488d9199c35b2e143145ac9655c002 Mon Sep 17 00:00:00 2001 From: galal-hussein Date: Sat, 19 Oct 2024 06:18:18 +0300 Subject: [PATCH 15/15] fixes Signed-off-by: galal-hussein --- pkg/controller/cluster/agent/agent.go | 2 +- pkg/controller/cluster/agent/shared.go | 6 ++---- pkg/controller/cluster/agent/virtual.go | 6 ++---- pkg/controller/cluster/cluster.go | 7 +------ 4 files changed, 6 insertions(+), 15 deletions(-) diff --git a/pkg/controller/cluster/agent/agent.go b/pkg/controller/cluster/agent/agent.go index 262ce18..6d6ad6b 100644 --- a/pkg/controller/cluster/agent/agent.go +++ b/pkg/controller/cluster/agent/agent.go @@ -13,7 +13,7 @@ const ( type Agent interface { Name() string Config() (ctrlruntimeclient.Object, error) - Resources() ([]ctrlruntimeclient.Object, error) + Resources() []ctrlruntimeclient.Object } func New(cluster *v1alpha1.Cluster, serviceIP, sharedAgentImage string) Agent { diff --git a/pkg/controller/cluster/agent/shared.go b/pkg/controller/cluster/agent/shared.go index f3f9d01..22393c4 100644 --- a/pkg/controller/cluster/agent/shared.go +++ b/pkg/controller/cluster/agent/shared.go @@ -60,10 +60,8 @@ token: %s`, cluster.Name, cluster.Namespace, nodeName, nodeName, cluster.Spec.Token) } -func (s *SharedAgent) Resources() ([]ctrlruntimeclient.Object, error) { - var objs []ctrlruntimeclient.Object - objs = append(objs, s.serviceAccount(), s.role(), s.roleBinding(), s.service(), s.deployment()) - return objs, nil +func (s *SharedAgent) Resources() []ctrlruntimeclient.Object { + return []ctrlruntimeclient.Object{s.serviceAccount(), s.role(), s.roleBinding(), s.service(), s.deployment()} } func (s *SharedAgent) deployment() *apps.Deployment { diff --git a/pkg/controller/cluster/agent/virtual.go b/pkg/controller/cluster/agent/virtual.go index 994ce63..65fb3eb 100644 --- a/pkg/controller/cluster/agent/virtual.go +++ b/pkg/controller/cluster/agent/virtual.go @@ -47,10 +47,8 @@ func (v *VirtualAgent) Config() (ctrlruntimeclient.Object, error) { }, nil } -func (v *VirtualAgent) Resources() ([]ctrlruntimeclient.Object, error) { - var objs []ctrlruntimeclient.Object - objs = append(objs, v.deployment()) - return objs, nil +func (v *VirtualAgent) Resources() []ctrlruntimeclient.Object { + return []ctrlruntimeclient.Object{v.deployment()} } func virtualAgentData(serviceIP, token string) string { diff --git a/pkg/controller/cluster/cluster.go b/pkg/controller/cluster/cluster.go index 8df3002..f1657c6 100644 --- a/pkg/controller/cluster/cluster.go +++ b/pkg/controller/cluster/cluster.go @@ -283,16 +283,11 @@ func (c *ClusterReconciler) server(ctx context.Context, cluster *v1alpha1.Cluste func (c *ClusterReconciler) agent(ctx context.Context, cluster *v1alpha1.Cluster, serviceIP string) error { agent := agent.New(cluster, serviceIP, c.SharedAgentImage) - agentsConfig, err := agent.Config() if err != nil { return err } - agentResources, err := agent.Resources() - if err != nil { - return err - } - + agentResources := agent.Resources() agentResources = append(agentResources, agentsConfig) return c.ensureAll(ctx, cluster, agentResources)