From 99eee826b33b56915bc95c79008656ef84cc770f Mon Sep 17 00:00:00 2001 From: mgianluc Date: Tue, 9 Apr 2024 15:56:30 +0200 Subject: [PATCH] When values section of an helm chart changes, treat it as an upgrade --- Makefile | 4 +- api/v1alpha1/clustersummary_types.go | 5 +- api/v1alpha1/zz_generated.deepcopy.go | 4 +- ...ig.projectsveltos.io_clustersummaries.yaml | 11 ++- config/default/manager_auth_proxy_patch.yaml | 2 +- config/default/manager_image_patch.yaml | 2 +- controllers/handlers_helm.go | 51 ++++++++++---- controllers/handlers_kustomize.go | 5 -- controllers/handlers_resources.go | 5 -- controllers/handlers_utils.go | 4 -- controllers/utils.go | 41 ----------- manifest/deployment-agentless.yaml | 4 +- manifest/deployment-shard.yaml | 4 +- manifest/manifest.yaml | 15 ++-- test/fv/drift_test.go | 70 ++++++++++++++++--- 15 files changed, 123 insertions(+), 104 deletions(-) diff --git a/Makefile b/Makefile index 47c0e854..909ff88f 100644 --- a/Makefile +++ b/Makefile @@ -25,7 +25,7 @@ ARCH ?= amd64 OS ?= $(shell uname -s | tr A-Z a-z) K8S_LATEST_VER ?= $(shell curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt) export CONTROLLER_IMG ?= $(REGISTRY)/$(IMAGE_NAME) -TAG ?= main +TAG ?= dev .PHONY: all all: build @@ -351,7 +351,7 @@ run: manifests generate fmt vet ## Run a controller from your host. .PHONY: docker-build docker-build: ## Build docker image with the manager. go generate - docker build --build-arg BUILDOS=linux --build-arg TARGETARCH=amd64 -t $(CONTROLLER_IMG):$(TAG) . + docker build --load --build-arg BUILDOS=linux --build-arg TARGETARCH=amd64 -t $(CONTROLLER_IMG):$(TAG) . MANIFEST_IMG=$(CONTROLLER_IMG) MANIFEST_TAG=$(TAG) $(MAKE) set-manifest-image $(MAKE) set-manifest-pull-policy diff --git a/api/v1alpha1/clustersummary_types.go b/api/v1alpha1/clustersummary_types.go index 8a4cdd49..31b2f109 100644 --- a/api/v1alpha1/clustersummary_types.go +++ b/api/v1alpha1/clustersummary_types.go @@ -141,10 +141,9 @@ type HelmChartSummary struct { // chart or there is a conflict Status HelmChartStatus `json:"status"` - // MetadataHash represents of a unique value for the extra metadata, extraLabels - // and extraAnnotations, deployed on this helm chart + // ValueHash represents of a unique value for the values section // +optional - MetadataHash []byte `json:"metadataHash,omitempty"` + ValueHash []byte `json:"valueHash,omitempty"` // Status indicates whether ClusterSummary can manage the helm // chart or there is a conflict diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 0300dc21..8329afb6 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -563,8 +563,8 @@ func (in *HelmChart) DeepCopy() *HelmChart { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HelmChartSummary) DeepCopyInto(out *HelmChartSummary) { *out = *in - if in.MetadataHash != nil { - in, out := &in.MetadataHash, &out.MetadataHash + if in.ValueHash != nil { + in, out := &in.ValueHash, &out.ValueHash *out = make([]byte, len(*in)) copy(*out, *in) } diff --git a/config/crd/bases/config.projectsveltos.io_clustersummaries.yaml b/config/crd/bases/config.projectsveltos.io_clustersummaries.yaml index cfd20412..418e7501 100644 --- a/config/crd/bases/config.projectsveltos.io_clustersummaries.yaml +++ b/config/crd/bases/config.projectsveltos.io_clustersummaries.yaml @@ -668,12 +668,6 @@ spec: Status indicates whether ClusterSummary can manage the helm chart or there is a conflict type: string - metadataHash: - description: |- - MetadataHash represents of a unique value for the extra metadata, extraLabels - and extraAnnotations, deployed on this helm chart - format: byte - type: string releaseName: description: ReleaseName is the chart release minLength: 1 @@ -691,6 +685,11 @@ spec: - Managing - Conflict type: string + valueHash: + description: ValueHash represents of a unique value for the + values section + format: byte + type: string required: - releaseName - releaseNamespace diff --git a/config/default/manager_auth_proxy_patch.yaml b/config/default/manager_auth_proxy_patch.yaml index a03918f5..c20df17e 100644 --- a/config/default/manager_auth_proxy_patch.yaml +++ b/config/default/manager_auth_proxy_patch.yaml @@ -15,4 +15,4 @@ spec: - "--report-mode=0" - --shard-key= - "--v=5" - - "--version=main" + - "--version=dev" diff --git a/config/default/manager_image_patch.yaml b/config/default/manager_image_patch.yaml index 1b408a59..9668d7e6 100644 --- a/config/default/manager_image_patch.yaml +++ b/config/default/manager_image_patch.yaml @@ -8,5 +8,5 @@ spec: spec: containers: # Change the value of image field below to your controller image URL - - image: projectsveltos/addon-controller:main + - image: projectsveltos/addon-controller:dev name: controller diff --git a/controllers/handlers_helm.go b/controllers/handlers_helm.go index f821c99f..ff9015f5 100644 --- a/controllers/handlers_helm.go +++ b/controllers/handlers_helm.go @@ -21,6 +21,7 @@ import ( "crypto/sha256" "fmt" "os" + "reflect" "strconv" "strings" "time" @@ -364,11 +365,6 @@ func helmHash(ctx context.Context, c client.Client, clusterSummaryScope *scope.C config += getVersion() } - metadataHash := getMetadataHash(clusterSummary) - if metadataHash != nil { - config += string(metadataHash) - } - h.Write([]byte(config)) return h.Sum(nil), nil } @@ -483,6 +479,11 @@ func walkChartsAndDeploy(ctx context.Context, c client.Client, clusterSummary *c if err != nil { return nil, nil, err } + err = updateValueHashOnHelmChartSummary(ctx, currentChart, clusterSummary) + if err != nil { + return nil, nil, err + } + releaseReports = append(releaseReports, *report) if currentRelease != nil { @@ -1003,6 +1004,14 @@ func shouldUpgrade(currentRelease *releaseInfo, requestedChart *configv1alpha1.H clusterSummary *configv1alpha1.ClusterSummary) bool { if clusterSummary.Spec.ClusterProfileSpec.SyncMode != configv1alpha1.SyncModeContinuousWithDriftDetection { + currentValueHash := []byte(requestedChart.Values) + oldValueHash := getValueHashFromHelmChartSummary(requestedChart, clusterSummary) + + // If Values configuration has changed, trigger an upgrade + if !reflect.DeepEqual(oldValueHash, currentValueHash) { + return true + } + // With drift detection mode, there is reconciliation due to configuration drift even // when version is same. So skip this check in SyncModeContinuousWithDriftDetection if currentRelease != nil { @@ -1256,6 +1265,8 @@ func updateStatusForeferencedHelmReleases(ctx context.Context, c client.Client, ReleaseName: currentChart.ReleaseName, ReleaseNamespace: currentChart.ReleaseNamespace, Status: configv1alpha1.HelChartStatusManaging, + ValueHash: getValueHashFromHelmChartSummary(currentChart, clusterSummary), // if a value is currently stored, keep it. + // after chart is deployed such value will be updated } currentlyReferenced[helmInfo(currentChart.ReleaseNamespace, currentChart.ReleaseName)] = true } else { @@ -1695,9 +1706,6 @@ func addExtraMetadata(ctx context.Context, requestedChart *configv1alpha1.HelmCh return nil } - // Current hash of current metadata (extraLabels and extraAnnotations) - metadataHash := getMetadataHash(clusterSummary) - actionConfig, err := actionConfigInit(requestedChart.ReleaseNamespace, kubeconfig, getEnableClientCacheValue(requestedChart.Options)) if err != nil { return err @@ -1748,7 +1756,7 @@ func addExtraMetadata(ctx context.Context, requestedChart *configv1alpha1.HelmCh } } - return updateMetadataHashOnHelmChartSummary(ctx, requestedChart, metadataHash, clusterSummary) + return nil } func getResourceNamespace(r *unstructured.Unstructured, releaseNamespace string, config *rest.Config) (string, error) { @@ -1766,11 +1774,13 @@ func getResourceNamespace(r *unstructured.Unstructured, releaseNamespace string, return namespace, nil } -func updateMetadataHashOnHelmChartSummary(ctx context.Context, requestedChart *configv1alpha1.HelmChart, - metadataHash []byte, clusterSummary *configv1alpha1.ClusterSummary) error { +func updateValueHashOnHelmChartSummary(ctx context.Context, requestedChart *configv1alpha1.HelmChart, + clusterSummary *configv1alpha1.ClusterSummary) error { c := getManagementClusterClient() + valueHash := []byte(requestedChart.Values) + err := retry.RetryOnConflict(retry.DefaultRetry, func() error { currentClusterSummary := &configv1alpha1.ClusterSummary{} err := c.Get(ctx, @@ -1784,7 +1794,7 @@ func updateMetadataHashOnHelmChartSummary(ctx context.Context, requestedChart *c if rs.ReleaseName == requestedChart.ReleaseName && rs.ReleaseNamespace == requestedChart.ReleaseNamespace { - rs.MetadataHash = metadataHash + rs.ValueHash = valueHash } } @@ -1793,3 +1803,20 @@ func updateMetadataHashOnHelmChartSummary(ctx context.Context, requestedChart *c return err } + +// getValueHashFromHelmChartSummary returns the valueHash stored for this chart +// in the ClusterSummary +func getValueHashFromHelmChartSummary(requestedChart *configv1alpha1.HelmChart, + clusterSummary *configv1alpha1.ClusterSummary) []byte { + + for i := range clusterSummary.Status.HelmReleaseSummaries { + rs := &clusterSummary.Status.HelmReleaseSummaries[i] + if rs.ReleaseName == requestedChart.ReleaseName && + rs.ReleaseNamespace == requestedChart.ReleaseNamespace { + + return rs.ValueHash + } + } + + return nil +} diff --git a/controllers/handlers_kustomize.go b/controllers/handlers_kustomize.go index c820d68b..3d4494ec 100644 --- a/controllers/handlers_kustomize.go +++ b/controllers/handlers_kustomize.go @@ -322,11 +322,6 @@ func kustomizationHash(ctx context.Context, c client.Client, clusterSummaryScope config += getVersion() } - metadataHash := getMetadataHash(clusterSummaryScope.ClusterSummary) - if metadataHash != nil { - config += string(metadataHash) - } - h.Write([]byte(config)) return h.Sum(nil), nil } diff --git a/controllers/handlers_resources.go b/controllers/handlers_resources.go index 87dffa10..eb0e91e7 100644 --- a/controllers/handlers_resources.go +++ b/controllers/handlers_resources.go @@ -397,11 +397,6 @@ func resourcesHash(ctx context.Context, c client.Client, clusterSummaryScope *sc config += getVersion() } - metadataHash := getMetadataHash(clusterSummary) - if metadataHash != nil { - config += string(metadataHash) - } - h.Write([]byte(config)) return h.Sum(nil), nil } diff --git a/controllers/handlers_utils.go b/controllers/handlers_utils.go index eb19736f..1bdb6e88 100644 --- a/controllers/handlers_utils.go +++ b/controllers/handlers_utils.go @@ -295,10 +295,6 @@ func deployUnstructured(ctx context.Context, deployingToMgmtCluster bool, destCo policy.GetKind(), policy.GetNamespace(), policy.GetName(), deployingToMgmtCluster)) resource, policyHash := getResource(policy, referencedObject, featureID, logger) - metadataHash := getMetadataHash(clusterSummary) - if metadataHash != nil { - policyHash += string(metadataHash) - } // If policy is namespaced, create namespace if not already existing err = createNamespace(ctx, destClient, clusterSummary, policy.GetNamespace()) diff --git a/controllers/utils.go b/controllers/utils.go index b324c244..c68b6cc2 100644 --- a/controllers/utils.go +++ b/controllers/utils.go @@ -19,9 +19,7 @@ package controllers import ( "bytes" "context" - "crypto/sha256" "fmt" - "sort" "strings" "text/template" @@ -375,45 +373,6 @@ func getVersion() string { return version } -// getMetadataHash returns hash of current ExtraLabels and ExtraAnnotations -func getMetadataHash(clusterSummary *configv1alpha1.ClusterSummary) []byte { - if clusterSummary.Spec.ClusterProfileSpec.ExtraLabels == nil && - clusterSummary.Spec.ClusterProfileSpec.ExtraAnnotations == nil { - - return nil - } - - h := sha256.New() - var config string - - if clusterSummary.Spec.ClusterProfileSpec.ExtraLabels != nil { - sortedKey := getSortedKeys(clusterSummary.Spec.ClusterProfileSpec.ExtraLabels) - for i := range sortedKey { - key := sortedKey[i] - config += clusterSummary.Spec.ClusterProfileSpec.ExtraLabels[key] - } - } - if clusterSummary.Spec.ClusterProfileSpec.ExtraAnnotations != nil { - sortedKey := getSortedKeys(clusterSummary.Spec.ClusterProfileSpec.ExtraAnnotations) - for i := range sortedKey { - key := sortedKey[i] - config += clusterSummary.Spec.ClusterProfileSpec.ExtraAnnotations[key] - } - } - - h.Write([]byte(config)) - return h.Sum(nil) -} - -func getSortedKeys(m map[string]string) []string { - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - sort.Strings(keys) - return keys -} - func isNamespaced(r *unstructured.Unstructured, config *rest.Config) (bool, error) { gvk := schema.GroupVersionKind{ Group: r.GroupVersionKind().Group, diff --git a/manifest/deployment-agentless.yaml b/manifest/deployment-agentless.yaml index b92d27f0..74659670 100644 --- a/manifest/deployment-agentless.yaml +++ b/manifest/deployment-agentless.yaml @@ -23,10 +23,10 @@ spec: - --report-mode=0 - --agent-in-mgmt-cluster - --v=5 - - --version=main + - --version=dev command: - /manager - image: projectsveltos/addon-controller:main + image: projectsveltos/addon-controller:dev livenessProbe: failureThreshold: 3 httpGet: diff --git a/manifest/deployment-shard.yaml b/manifest/deployment-shard.yaml index 4c9eed16..366b34b2 100644 --- a/manifest/deployment-shard.yaml +++ b/manifest/deployment-shard.yaml @@ -23,10 +23,10 @@ spec: - --report-mode=0 - --shard-key={{.SHARD}} - --v=5 - - --version=main + - --version=dev command: - /manager - image: projectsveltos/addon-controller:main + image: projectsveltos/addon-controller:dev livenessProbe: failureThreshold: 3 httpGet: diff --git a/manifest/manifest.yaml b/manifest/manifest.yaml index 9a4bf482..9e2604c1 100644 --- a/manifest/manifest.yaml +++ b/manifest/manifest.yaml @@ -2161,12 +2161,6 @@ spec: Status indicates whether ClusterSummary can manage the helm chart or there is a conflict type: string - metadataHash: - description: |- - MetadataHash represents of a unique value for the extra metadata, extraLabels - and extraAnnotations, deployed on this helm chart - format: byte - type: string releaseName: description: ReleaseName is the chart release minLength: 1 @@ -2184,6 +2178,11 @@ spec: - Managing - Conflict type: string + valueHash: + description: ValueHash represents of a unique value for the + values section + format: byte + type: string required: - releaseName - releaseNamespace @@ -3444,10 +3443,10 @@ spec: - --report-mode=0 - --shard-key= - --v=5 - - --version=main + - --version=dev command: - /manager - image: projectsveltos/addon-controller:main + image: projectsveltos/addon-controller:dev livenessProbe: failureThreshold: 3 httpGet: diff --git a/test/fv/drift_test.go b/test/fv/drift_test.go index 35461834..5109a29c 100644 --- a/test/fv/drift_test.go +++ b/test/fv/drift_test.go @@ -62,10 +62,18 @@ var _ = Describe("Helm", Serial, func() { RepositoryURL: "https://kyverno.github.io/kyverno/", RepositoryName: "kyverno", ChartName: "kyverno/kyverno", - ChartVersion: "v3.0.1", + ChartVersion: "v3.1.4", ReleaseName: "kyverno-latest", ReleaseNamespace: "kyverno", HelmChartAction: configv1alpha1.HelmChartActionInstall, + Values: `admissionController: + replicas: 1 +backgroundController: + replicas: 1 +cleanupController: + replicas: 1 +reportsController: + replicas: 1`, }, } Expect(k8sClient.Update(context.TODO(), currentClusterProfile)).To(Succeed()) @@ -80,11 +88,16 @@ var _ = Describe("Helm", Serial, func() { Expect(workloadClient).ToNot(BeNil()) Byf("Verifying Kyverno deployment is created in the workload cluster") - Eventually(func() error { + Eventually(func() bool { + expectedReplicas := int32(1) depl := &appsv1.Deployment{} - return workloadClient.Get(context.TODO(), + err = workloadClient.Get(context.TODO(), types.NamespacedName{Namespace: "kyverno", Name: "kyverno-admission-controller"}, depl) - }, timeout, pollingInterval).Should(BeNil()) + if err != nil { + return false + } + return depl.Spec.Replicas != nil && *depl.Spec.Replicas == expectedReplicas + }, timeout, pollingInterval).Should(BeTrue()) if isAgentLessMode() { Byf("Verifying drift detection manager deployment is created in the management cluster") @@ -125,7 +138,7 @@ var _ = Describe("Helm", Serial, func() { verifyFeatureStatusIsProvisioned(kindWorkloadCluster.Namespace, clusterSummary.Name, configv1alpha1.FeatureHelm) charts := []configv1alpha1.Chart{ - {ReleaseName: "kyverno-latest", ChartVersion: "3.0.1", Namespace: "kyverno"}, + {ReleaseName: "kyverno-latest", ChartVersion: "3.1.4", Namespace: "kyverno"}, } verifyClusterConfiguration(configv1alpha1.ClusterProfileKind, clusterProfile.Name, @@ -154,7 +167,7 @@ var _ = Describe("Helm", Serial, func() { for i := range depl.Spec.Template.Spec.Containers { if depl.Spec.Template.Spec.Containers[i].Name == kyvernoImageName { imageChanged = true - depl.Spec.Template.Spec.Containers[i].Image = "ghcr.io/kyverno/kyverno:v1.8.0" + depl.Spec.Template.Spec.Containers[i].Image = "ghcr.io/kyverno/kyverno:v1.11.0" } } Expect(imageChanged).To(BeTrue()) @@ -164,8 +177,8 @@ var _ = Describe("Helm", Serial, func() { types.NamespacedName{Namespace: "kyverno", Name: "kyverno-admission-controller"}, depl)).To(Succeed()) for i := range depl.Spec.Template.Spec.Containers { if depl.Spec.Template.Spec.Containers[i].Name == kyvernoImageName { - By("Kyverno image is set to v1.8.0") - Expect(depl.Spec.Template.Spec.Containers[i].Image).To(Equal("ghcr.io/kyverno/kyverno:v1.8.0")) + By("Kyverno image is set to v1.11.0") + Expect(depl.Spec.Template.Spec.Containers[i].Image).To(Equal("ghcr.io/kyverno/kyverno:v1.11.0")) } } @@ -179,12 +192,49 @@ var _ = Describe("Helm", Serial, func() { } for i := range depl.Spec.Template.Spec.Containers { if depl.Spec.Template.Spec.Containers[i].Name == kyvernoImageName { - return depl.Spec.Template.Spec.Containers[i].Image == "ghcr.io/kyverno/kyverno:v1.10.0" + return depl.Spec.Template.Spec.Containers[i].Image == "ghcr.io/kyverno/kyverno:v1.11.4" } } return false }, timeout, pollingInterval).Should(BeTrue()) - By("Kyverno image is reset to v1.8.5") + By("Kyverno image is reset to v1.11.4") + + By("Change values section") + Expect(k8sClient.Get(context.TODO(), + types.NamespacedName{Name: clusterProfile.Name}, + currentClusterProfile)).To(Succeed()) + currentClusterProfile.Spec.HelmCharts = []configv1alpha1.HelmChart{ + { + RepositoryURL: "https://kyverno.github.io/kyverno/", + RepositoryName: "kyverno", + ChartName: "kyverno/kyverno", + ChartVersion: "v3.1.4", + ReleaseName: "kyverno-latest", + ReleaseNamespace: "kyverno", + HelmChartAction: configv1alpha1.HelmChartActionInstall, + Values: `admissionController: + replicas: 3 +backgroundController: + replicas: 1 +cleanupController: + replicas: 1 +reportsController: + replicas: 1`, + }, + } + Expect(k8sClient.Update(context.TODO(), currentClusterProfile)).To(Succeed()) + + Byf("Verifying Kyverno deployment is updated in the workload cluster") + Eventually(func() bool { + expectedReplicas := int32(3) + depl := &appsv1.Deployment{} + err = workloadClient.Get(context.TODO(), + types.NamespacedName{Namespace: "kyverno", Name: "kyverno-admission-controller"}, depl) + if err != nil { + return false + } + return depl.Spec.Replicas != nil && *depl.Spec.Replicas == expectedReplicas + }, timeout, pollingInterval).Should(BeTrue()) deleteClusterProfile(clusterProfile)