From f2ca5d71f6df2b58f577fe69c870412373820cd7 Mon Sep 17 00:00:00 2001 From: galal-hussein Date: Fri, 10 Jan 2025 02:03:18 +0200 Subject: [PATCH] Add pvc syncing support Signed-off-by: galal-hussein --- cli/cmds/kubeconfig/kubeconfig.go | 3 +- .../controller/persistentvolumeclaims.go | 125 ++++++++++++++++++ k3k-kubelet/controller/webhook/pod.go | 120 +++++++++++++++++ k3k-kubelet/kubelet.go | 23 +++- k3k-kubelet/provider/provider.go | 2 + pkg/controller/{kubeconfig => certs}/certs.go | 2 +- pkg/controller/cluster/agent/agent.go | 4 +- pkg/controller/cluster/agent/shared.go | 114 +++++++++++++++- pkg/controller/cluster/agent/virtual.go | 8 +- pkg/controller/cluster/cluster.go | 4 +- pkg/controller/cluster/pod.go | 4 +- pkg/controller/cluster/server/config.go | 2 +- pkg/controller/cluster/server/server.go | 2 - pkg/controller/kubeconfig/kubeconfig.go | 3 +- 14 files changed, 391 insertions(+), 25 deletions(-) create mode 100644 k3k-kubelet/controller/persistentvolumeclaims.go create mode 100644 k3k-kubelet/controller/webhook/pod.go rename pkg/controller/{kubeconfig => certs}/certs.go (98%) diff --git a/cli/cmds/kubeconfig/kubeconfig.go b/cli/cmds/kubeconfig/kubeconfig.go index d527abb..ad2041f 100644 --- a/cli/cmds/kubeconfig/kubeconfig.go +++ b/cli/cmds/kubeconfig/kubeconfig.go @@ -11,6 +11,7 @@ import ( "github.com/rancher/k3k/cli/cmds" "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" "github.com/rancher/k3k/pkg/controller" + "github.com/rancher/k3k/pkg/controller/certs" "github.com/rancher/k3k/pkg/controller/kubeconfig" "github.com/sirupsen/logrus" "github.com/urfave/cli" @@ -122,7 +123,7 @@ func generate(clx *cli.Context) error { } host := strings.Split(url.Host, ":") - certAltNames := kubeconfig.AddSANs(altNames) + certAltNames := certs.AddSANs(altNames) if org == nil { org = cli.StringSlice{user.SystemPrivilegedGroup} } diff --git a/k3k-kubelet/controller/persistentvolumeclaims.go b/k3k-kubelet/controller/persistentvolumeclaims.go new file mode 100644 index 0000000..86c1dcf --- /dev/null +++ b/k3k-kubelet/controller/persistentvolumeclaims.go @@ -0,0 +1,125 @@ +package controller + +import ( + "context" + + "github.com/rancher/k3k/k3k-kubelet/translate" + "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" + "github.com/rancher/k3k/pkg/log" + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +const ( + pvcController = "pvc-syncer-controller" + pvcFinalizerName = "pv.k3k.io/finalizer" +) + +type PVCReconciler struct { + virtualClient ctrlruntimeclient.Client + hostClient ctrlruntimeclient.Client + clusterName string + clusterNamespace string + Scheme *runtime.Scheme + HostScheme *runtime.Scheme + logger *log.Logger + Translater translate.ToHostTranslater + //objs sets.Set[types.NamespacedName] +} + +// AddPVCSyncer adds persistentvolumeclaims syncer controller to k3k-kubelet +func AddPVCSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string, logger *log.Logger) error { + translater := translate.ToHostTranslater{ + ClusterName: clusterName, + ClusterNamespace: clusterNamespace, + } + // initialize a new Reconciler + reconciler := PVCReconciler{ + virtualClient: virtMgr.GetClient(), + hostClient: hostMgr.GetClient(), + Scheme: virtMgr.GetScheme(), + HostScheme: hostMgr.GetScheme(), + logger: logger.Named(pvcController), + Translater: translater, + clusterName: clusterName, + clusterNamespace: clusterNamespace, + } + return ctrl.NewControllerManagedBy(virtMgr). + For(&v1.PersistentVolumeClaim{}). + WithOptions(controller.Options{ + MaxConcurrentReconciles: maxConcurrentReconciles, + }). + Complete(&reconciler) +} + +func (v *PVCReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { + log := v.logger.With("Cluster", v.clusterName, "PersistentVolumeClaim", req.NamespacedName) + var ( + virtPVC v1.PersistentVolumeClaim + hostPVC v1.PersistentVolumeClaim + cluster v1alpha1.Cluster + ) + if err := v.hostClient.Get(ctx, types.NamespacedName{Name: v.clusterName, Namespace: v.clusterNamespace}, &cluster); err != nil { + return reconcile.Result{}, err + } + + // handling persistent volume sync + if err := v.virtualClient.Get(ctx, req.NamespacedName, &virtPVC); err != nil { + return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err) + } + syncedPVC := v.pvc(&virtPVC) + if err := controllerutil.SetControllerReference(&cluster, syncedPVC, v.HostScheme); err != nil { + return reconcile.Result{}, err + } + // handle deletion + if !virtPVC.DeletionTimestamp.IsZero() { + // deleting the synced service if exists + if err := v.hostClient.Delete(ctx, syncedPVC); err != nil { + return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err) + } + // remove the finalizer after cleaning up the synced service + if controllerutil.ContainsFinalizer(&virtPVC, pvcFinalizerName) { + controllerutil.RemoveFinalizer(&virtPVC, pvcFinalizerName) + if err := v.virtualClient.Update(ctx, &virtPVC); err != nil { + return reconcile.Result{}, err + } + } + return reconcile.Result{}, nil + } + + // getting the cluster for setting the controller reference + + // Add finalizer if it does not exist + if !controllerutil.ContainsFinalizer(&virtPVC, pvcFinalizerName) { + controllerutil.AddFinalizer(&virtPVC, pvcFinalizerName) + if err := v.virtualClient.Update(ctx, &virtPVC); err != nil { + return reconcile.Result{}, err + } + } + // create or update the pv on host + if err := v.hostClient.Get(ctx, types.NamespacedName{Name: syncedPVC.Name, Namespace: v.clusterNamespace}, &hostPVC); err != nil { + if apierrors.IsNotFound(err) { + log.Info("creating the persistent volume for the first time on the host cluster") + return reconcile.Result{}, v.hostClient.Create(ctx, syncedPVC) + } + return reconcile.Result{}, err + } + log.Info("updating pvc on the host cluster") + return reconcile.Result{}, v.hostClient.Update(ctx, syncedPVC) + +} + +func (v *PVCReconciler) pvc(obj *v1.PersistentVolumeClaim) *v1.PersistentVolumeClaim { + hostPVC := obj.DeepCopy() + v.Translater.TranslateTo(hostPVC) + // don't sync finalizers to the host + return hostPVC +} diff --git a/k3k-kubelet/controller/webhook/pod.go b/k3k-kubelet/controller/webhook/pod.go new file mode 100644 index 0000000..65bef7a --- /dev/null +++ b/k3k-kubelet/controller/webhook/pod.go @@ -0,0 +1,120 @@ +package webhook + +import ( + "context" + "errors" + "fmt" + + "github.com/rancher/k3k/pkg/controller/cluster/agent" + "github.com/rancher/k3k/pkg/log" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" +) + +const ( + webhookName = "nodename.podmutator.k3k.io" + webhookTimeout = int32(10) + webhookPort = "9443" + webhookPath = "/mutate--v1-pod" +) + +type webhookHandler struct { + client ctrlruntimeclient.Client + scheme *runtime.Scheme + nodeName string + clusterName string + clusterNamespace string + logger *log.Logger +} + +// AddPodMutatorWebhook will add a mutator webhook to the virtual cluster to +// modify the nodeName of the created pods with the name of the virtual kubelet node name +func AddPodMutatorWebhook(ctx context.Context, mgr manager.Manager, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, nodeName string, logger *log.Logger) error { + handler := webhookHandler{ + client: mgr.GetClient(), + scheme: mgr.GetScheme(), + logger: logger, + clusterName: clusterName, + clusterNamespace: clusterNamespace, + nodeName: nodeName, + } + + // create mutator webhook configuration to the cluster + config, err := handler.configuration(ctx, hostClient) + if err != nil { + return err + } + if err := handler.client.Create(ctx, config); err != nil { + return err + } + // register webhook with the manager + return ctrl.NewWebhookManagedBy(mgr).For(&v1.Pod{}).WithDefaulter(&handler).Complete() +} + +func (w *webhookHandler) Default(ctx context.Context, obj runtime.Object) error { + pod, ok := obj.(*v1.Pod) + if !ok { + return fmt.Errorf("invalid request: object was type %t not cluster", obj) + } + w.logger.Infow("recieved request", "Pod", pod.Name, "Namespace", pod.Namespace) + if pod.Spec.NodeName == "" { + pod.Spec.NodeName = w.nodeName + } + return nil +} + +func (w *webhookHandler) configuration(ctx context.Context, hostClient ctrlruntimeclient.Client) (*admissionregistrationv1.MutatingWebhookConfiguration, error) { + w.logger.Infow("extracting webhook tls from host cluster") + var ( + webhookTLSSecret v1.Secret + ) + if err := hostClient.Get(ctx, types.NamespacedName{Name: agent.WebhookSecretName(w.clusterName), Namespace: w.clusterNamespace}, &webhookTLSSecret); err != nil { + return nil, err + } + caBundle, ok := webhookTLSSecret.Data["ca.crt"] + if !ok { + return nil, errors.New("webhook CABundle does not exist in secret") + } + webhookURL := "https://" + w.nodeName + ":" + webhookPort + webhookPath + return &admissionregistrationv1.MutatingWebhookConfiguration{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "admissionregistration.k8s.io/v1", + Kind: "MutatingWebhookConfiguration", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: webhookName + "-configuration", + }, + Webhooks: []admissionregistrationv1.MutatingWebhook{ + { + Name: webhookName, + AdmissionReviewVersions: []string{"v1"}, + SideEffects: ptr.To(admissionregistrationv1.SideEffectClassNone), + TimeoutSeconds: ptr.To(webhookTimeout), + ClientConfig: admissionregistrationv1.WebhookClientConfig{ + URL: ptr.To(webhookURL), + CABundle: caBundle, + }, + Rules: []admissionregistrationv1.RuleWithOperations{ + { + Operations: []admissionregistrationv1.OperationType{ + "CREATE", + }, + Rule: admissionregistrationv1.Rule{ + APIGroups: []string{""}, + APIVersions: []string{"v1"}, + Resources: []string{"pods"}, + Scope: ptr.To(admissionregistrationv1.NamespacedScope), + }, + }, + }, + }, + }, + }, nil +} diff --git a/k3k-kubelet/kubelet.go b/k3k-kubelet/kubelet.go index 41e95fe..157c4ec 100644 --- a/k3k-kubelet/kubelet.go +++ b/k3k-kubelet/kubelet.go @@ -12,12 +12,13 @@ import ( certutil "github.com/rancher/dynamiclistener/cert" k3kkubeletcontroller "github.com/rancher/k3k/k3k-kubelet/controller" + k3kwebhook "github.com/rancher/k3k/k3k-kubelet/controller/webhook" "github.com/rancher/k3k/k3k-kubelet/provider" "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" "github.com/rancher/k3k/pkg/controller" + "github.com/rancher/k3k/pkg/controller/certs" "github.com/rancher/k3k/pkg/controller/cluster/server" "github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap" - "github.com/rancher/k3k/pkg/controller/kubeconfig" k3klog "github.com/rancher/k3k/pkg/log" "github.com/virtual-kubelet/virtual-kubelet/log" "github.com/virtual-kubelet/virtual-kubelet/node" @@ -38,6 +39,7 @@ import ( ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/manager" ctrlserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + "sigs.k8s.io/controller-runtime/pkg/webhook" ) var ( @@ -111,8 +113,12 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet if err != nil { return nil, errors.New("unable to add client go types to virtual cluster scheme: " + err.Error()) } + webhookServer := webhook.NewServer(webhook.Options{ + CertDir: "/opt/rancher/k3k-webhook", + }) virtualMgr, err := ctrl.NewManager(virtConfig, manager.Options{ - Scheme: virtualScheme, + Scheme: virtualScheme, + WebhookServer: webhookServer, Metrics: ctrlserver.Options{ BindAddress: ":8084", }, @@ -120,12 +126,21 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet if err != nil { return nil, errors.New("unable to create controller-runtime mgr for virtual cluster: " + err.Error()) } + logger.Info("adding pod mutator webhook") + if err := k3kwebhook.AddPodMutatorWebhook(ctx, virtualMgr, hostClient, c.ClusterName, c.ClusterNamespace, c.NodeName, logger); err != nil { + return nil, errors.New("unable to add pod mutator webhook for virtual cluster: " + err.Error()) + } logger.Info("adding service syncer controller") if err := k3kkubeletcontroller.AddServiceSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace, k3klog.New(false)); err != nil { return nil, errors.New("failed to add service syncer controller: " + err.Error()) } + logger.Info("adding pvc syncer controller") + if err := k3kkubeletcontroller.AddPVCSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace, k3klog.New(false)); err != nil { + return nil, errors.New("failed to add pvc syncer controller: " + err.Error()) + } + clusterIP, err := clusterIP(ctx, c.AgentHostname, c.ClusterNamespace, hostClient) if err != nil { return nil, errors.New("failed to extract the clusterIP for the server service: " + err.Error()) @@ -270,7 +285,7 @@ func virtRestConfig(ctx context.Context, virtualConfigPath string, hostClient ct }); err != nil { return nil, errors.New("unable to decode bootstrap: " + err.Error()) } - adminCert, adminKey, err := kubeconfig.CreateClientCertKey( + adminCert, adminKey, err := certs.CreateClientCertKey( controller.AdminCommonName, []string{user.SystemPrivilegedGroup}, nil, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, time.Hour*24*time.Duration(356), b.ClientCA.Content, @@ -333,7 +348,7 @@ func loadTLSConfig(ctx context.Context, hostClient ctrlruntimeclient.Client, clu DNSNames: []string{hostname}, IPs: []net.IP{ip}, } - cert, key, err := kubeconfig.CreateClientCertKey(nodeName, nil, &altNames, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, 0, b.ServerCA.Content, b.ServerCAKey.Content) + cert, key, err := certs.CreateClientCertKey(nodeName, nil, &altNames, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, 0, b.ServerCA.Content, b.ServerCAKey.Content) if err != nil { return nil, errors.New("unable to get cert and key: " + err.Error()) } diff --git a/k3k-kubelet/provider/provider.go b/k3k-kubelet/provider/provider.go index 982afc1..9576585 100644 --- a/k3k-kubelet/provider/provider.go +++ b/k3k-kubelet/provider/provider.go @@ -393,6 +393,8 @@ func (p *Provider) transformVolumes(ctx context.Context, podNamespace string, vo } } } + } else if volume.PersistentVolumeClaim != nil { + volume.PersistentVolumeClaim.ClaimName = p.Translater.TranslateName(podNamespace, volume.PersistentVolumeClaim.ClaimName) } } return nil diff --git a/pkg/controller/kubeconfig/certs.go b/pkg/controller/certs/certs.go similarity index 98% rename from pkg/controller/kubeconfig/certs.go rename to pkg/controller/certs/certs.go index 276e455..0e3cdc0 100644 --- a/pkg/controller/kubeconfig/certs.go +++ b/pkg/controller/certs/certs.go @@ -1,4 +1,4 @@ -package kubeconfig +package certs import ( "crypto" diff --git a/pkg/controller/cluster/agent/agent.go b/pkg/controller/cluster/agent/agent.go index ee3a11c..b81df88 100644 --- a/pkg/controller/cluster/agent/agent.go +++ b/pkg/controller/cluster/agent/agent.go @@ -12,8 +12,8 @@ const ( type Agent interface { Name() string - Config() (ctrlruntimeclient.Object, error) - Resources() []ctrlruntimeclient.Object + Config() ctrlruntimeclient.Object + Resources() ([]ctrlruntimeclient.Object, error) } func New(cluster *v1alpha1.Cluster, serviceIP, sharedAgentImage, token string) Agent { diff --git a/pkg/controller/cluster/agent/shared.go b/pkg/controller/cluster/agent/shared.go index 366dc64..17a2449 100644 --- a/pkg/controller/cluster/agent/shared.go +++ b/pkg/controller/cluster/agent/shared.go @@ -1,11 +1,16 @@ package agent import ( + "crypto" + "crypto/x509" "fmt" + "time" + certutil "github.com/rancher/dynamiclistener/cert" "github.com/rancher/k3k/k3k-kubelet/translate" "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" "github.com/rancher/k3k/pkg/controller" + "github.com/rancher/k3k/pkg/controller/certs" apps "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" @@ -36,7 +41,7 @@ func NewSharedAgent(cluster *v1alpha1.Cluster, serviceIP, sharedAgentImage, toke } } -func (s *SharedAgent) Config() (ctrlruntimeclient.Object, error) { +func (s *SharedAgent) Config() ctrlruntimeclient.Object { config := sharedAgentData(s.cluster, s.token, s.Name(), s.serviceIP) return &v1.Secret{ @@ -51,7 +56,7 @@ func (s *SharedAgent) Config() (ctrlruntimeclient.Object, error) { Data: map[string][]byte{ "config.yaml": []byte(config), }, - }, nil + } } func sharedAgentData(cluster *v1alpha1.Cluster, token, nodeName, ip string) string { @@ -64,7 +69,12 @@ token: %s`, cluster.Name, cluster.Namespace, nodeName, nodeName, ip, token) } -func (s *SharedAgent) Resources() []ctrlruntimeclient.Object { +func (s *SharedAgent) Resources() ([]ctrlruntimeclient.Object, error) { + // generate certs for webhook + certSecret, err := s.webhookTLS() + if err != nil { + return nil, err + } return []ctrlruntimeclient.Object{ s.serviceAccount(), s.role(), @@ -72,7 +82,7 @@ func (s *SharedAgent) Resources() []ctrlruntimeclient.Object { s.service(), s.deployment(), s.dnsService(), - } + certSecret}, nil } func (s *SharedAgent) deployment() *apps.Deployment { @@ -136,6 +146,28 @@ func (s *SharedAgent) podSpec(affinitySelector *metav1.LabelSelector) v1.PodSpec }, }, }, + { + Name: "webhook-certs", + VolumeSource: v1.VolumeSource{ + Secret: &v1.SecretVolumeSource{ + SecretName: WebhookSecretName(s.cluster.Name), + Items: []v1.KeyToPath{ + { + Key: "tls.crt", + Path: "tls.crt", + }, + { + Key: "tls.key", + Path: "tls.key", + }, + { + Key: "ca.crt", + Path: "ca.crt", + }, + }, + }, + }, + }, }, Containers: []v1.Container{ { @@ -155,6 +187,18 @@ func (s *SharedAgent) podSpec(affinitySelector *metav1.LabelSelector) v1.PodSpec MountPath: "/opt/rancher/k3k/", ReadOnly: false, }, + { + Name: "webhook-certs", + MountPath: "/opt/rancher/k3k-webhook", + ReadOnly: false, + }, + }, + Ports: []v1.ContainerPort{ + { + Name: "webhook-port", + Protocol: v1.ProtocolTCP, + ContainerPort: 9443, + }, }, }, }} @@ -183,6 +227,12 @@ func (s *SharedAgent) service() *v1.Service { Protocol: v1.ProtocolTCP, Port: 10250, }, + { + Name: "webhook-server", + Protocol: v1.ProtocolTCP, + Port: 9443, + TargetPort: intstr.FromInt32(9443), + }, }, }, } @@ -254,7 +304,7 @@ func (s *SharedAgent) role() *rbacv1.Role { Rules: []rbacv1.PolicyRule{ { APIGroups: []string{""}, - Resources: []string{"pods", "pods/log", "pods/exec", "secrets", "configmaps", "services"}, + Resources: []string{"persistentvolumeclaims", "pods", "pods/log", "pods/exec", "secrets", "configmaps", "services"}, Verbs: []string{"*"}, }, { @@ -298,3 +348,57 @@ func (s *SharedAgent) Name() string { func (s *SharedAgent) DNSName() string { return controller.SafeConcatNameWithPrefix(s.cluster.Name, "kube-dns") } + +func (s *SharedAgent) webhookTLS() (*v1.Secret, error) { + // generate CA CERT/KEY + caKeyBytes, err := certutil.MakeEllipticPrivateKeyPEM() + if err != nil { + return nil, err + } + + caKey, err := certutil.ParsePrivateKeyPEM(caKeyBytes) + if err != nil { + return nil, err + } + + cfg := certutil.Config{ + CommonName: fmt.Sprintf("k3k-webhook-ca@%d", time.Now().Unix()), + } + + caCert, err := certutil.NewSelfSignedCACert(cfg, caKey.(crypto.Signer)) + if err != nil { + return nil, err + } + + caCertBytes := certutil.EncodeCertPEM(caCert) + // generate webhook cert bundle + altNames := certs.AddSANs([]string{s.Name(), s.cluster.Name}) + webhookCert, webhookKey, err := certs.CreateClientCertKey( + s.Name(), nil, + &altNames, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, time.Hour*24*time.Duration(356), + string(caCertBytes), + string(caKeyBytes)) + if err != nil { + return nil, err + } + return &v1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: WebhookSecretName(s.cluster.Name), + Namespace: s.cluster.Namespace, + }, + Data: map[string][]byte{ + "tls.crt": webhookCert, + "tls.key": webhookKey, + "ca.crt": caCertBytes, + "ca.key": caKeyBytes, + }, + }, nil +} + +func WebhookSecretName(clusterName string) string { + return controller.SafeConcatNameWithPrefix(clusterName, "webhook") +} diff --git a/pkg/controller/cluster/agent/virtual.go b/pkg/controller/cluster/agent/virtual.go index c0a97b0..eabc6b9 100644 --- a/pkg/controller/cluster/agent/virtual.go +++ b/pkg/controller/cluster/agent/virtual.go @@ -31,7 +31,7 @@ func NewVirtualAgent(cluster *v1alpha1.Cluster, serviceIP, token string) Agent { } } -func (v *VirtualAgent) Config() (ctrlruntimeclient.Object, error) { +func (v *VirtualAgent) Config() ctrlruntimeclient.Object { config := virtualAgentData(v.serviceIP, v.token) return &v1.Secret{ @@ -46,11 +46,11 @@ func (v *VirtualAgent) Config() (ctrlruntimeclient.Object, error) { Data: map[string][]byte{ "config.yaml": []byte(config), }, - }, nil + } } -func (v *VirtualAgent) Resources() []ctrlruntimeclient.Object { - return []ctrlruntimeclient.Object{v.deployment()} +func (v *VirtualAgent) Resources() ([]ctrlruntimeclient.Object, error) { + return []ctrlruntimeclient.Object{v.deployment()}, nil } func virtualAgentData(serviceIP, token string) string { diff --git a/pkg/controller/cluster/cluster.go b/pkg/controller/cluster/cluster.go index 1d3acbc..f7936da 100644 --- a/pkg/controller/cluster/cluster.go +++ b/pkg/controller/cluster/cluster.go @@ -339,11 +339,11 @@ func (c *ClusterReconciler) unbindNodeProxyClusterRole(ctx context.Context, clus func (c *ClusterReconciler) agent(ctx context.Context, cluster *v1alpha1.Cluster, serviceIP, token string) error { agent := agent.New(cluster, serviceIP, c.SharedAgentImage, token) - agentsConfig, err := agent.Config() + agentsConfig := agent.Config() + agentResources, err := agent.Resources() if err != nil { return err } - agentResources := agent.Resources() agentResources = append(agentResources, agentsConfig) return c.ensureAll(ctx, cluster, agentResources) diff --git a/pkg/controller/cluster/pod.go b/pkg/controller/cluster/pod.go index f3a9c0e..c997845 100644 --- a/pkg/controller/cluster/pod.go +++ b/pkg/controller/cluster/pod.go @@ -12,9 +12,9 @@ import ( certutil "github.com/rancher/dynamiclistener/cert" "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" k3kcontroller "github.com/rancher/k3k/pkg/controller" + "github.com/rancher/k3k/pkg/controller/certs" "github.com/rancher/k3k/pkg/controller/cluster/server" "github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap" - "github.com/rancher/k3k/pkg/controller/kubeconfig" "github.com/rancher/k3k/pkg/log" "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" clientv3 "go.etcd.io/etcd/client/v3" @@ -168,7 +168,7 @@ func (p *PodReconciler) getETCDTLS(ctx context.Context, cluster *v1alpha1.Cluste return nil, err } - etcdCert, etcdKey, err := kubeconfig.CreateClientCertKey("etcd-client", nil, nil, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, 0, b.ETCDServerCA.Content, b.ETCDServerCAKey.Content) + etcdCert, etcdKey, err := certs.CreateClientCertKey("etcd-client", nil, nil, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, 0, b.ETCDServerCA.Content, b.ETCDServerCAKey.Content) if err != nil { return nil, err } diff --git a/pkg/controller/cluster/server/config.go b/pkg/controller/cluster/server/config.go index bf23001..a80f0bb 100644 --- a/pkg/controller/cluster/server/config.go +++ b/pkg/controller/cluster/server/config.go @@ -68,7 +68,7 @@ func serverOptions(cluster *v1alpha1.Cluster, token string) string { } } if cluster.Spec.Mode != agent.VirtualNodeMode { - opts = opts + "disable-agent: true\negress-selector-mode: disabled\ndisable:\n- servicelb\n- traefik\n- metrics-server" + opts = opts + "disable-agent: true\negress-selector-mode: disabled\ndisable:\n- servicelb\n- traefik\n- metrics-server\n- local-storage" } // TODO: Add extra args to the options diff --git a/pkg/controller/cluster/server/server.go b/pkg/controller/cluster/server/server.go index 5d30bdc..e3880c2 100644 --- a/pkg/controller/cluster/server/server.go +++ b/pkg/controller/cluster/server/server.go @@ -218,7 +218,6 @@ func (s *Server) podSpec(image, name string, persistent bool, affinitySelector * }, }, } - // start the pod unprivileged in shared mode if s.mode == agent.VirtualNodeMode { podSpec.Containers[0].SecurityContext = &v1.SecurityContext{ @@ -285,7 +284,6 @@ func (s *Server) StatefulServer(ctx context.Context) (*apps.StatefulSet, error) var volumes []v1.Volume var volumeMounts []v1.VolumeMount - for _, addon := range s.cluster.Spec.Addons { namespace := k3kSystemNamespace if addon.SecretNamespace != "" { diff --git a/pkg/controller/kubeconfig/kubeconfig.go b/pkg/controller/kubeconfig/kubeconfig.go index 787c797..53225bc 100644 --- a/pkg/controller/kubeconfig/kubeconfig.go +++ b/pkg/controller/kubeconfig/kubeconfig.go @@ -11,6 +11,7 @@ import ( certutil "github.com/rancher/dynamiclistener/cert" "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" "github.com/rancher/k3k/pkg/controller" + "github.com/rancher/k3k/pkg/controller/certs" "github.com/rancher/k3k/pkg/controller/cluster/server" "github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap" v1 "k8s.io/api/core/v1" @@ -48,7 +49,7 @@ func (k *KubeConfig) Extract(ctx context.Context, client client.Client, cluster return nil, err } - adminCert, adminKey, err := CreateClientCertKey( + adminCert, adminKey, err := certs.CreateClientCertKey( k.CN, k.ORG, &k.AltNames, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, k.ExpiryDate, bootstrap.ClientCA.Content,