From e959d33ad2b2f08e045256a2fbc5164ef0fec258 Mon Sep 17 00:00:00 2001 From: Nathan Coleman Date: Fri, 27 Sep 2024 12:51:33 -0400 Subject: [PATCH 1/9] [NET-10985] Fix bug where imagePullSecrets were not set up for Gateways (#4316) * Plumb global.imagePullSecrets through to Gateway's ServiceAccount Since pull secrets are a list of structured objects that cannot easily be passed as a flag value to the container, this approach uses a JSON config file that is created as a ConfigMap and then mounted into the connect-injector Pod and parsed on startup. * Leave camp cleaner than I found it * Make path to config file configurable * Add changelog entry * Add note to changelog entry * Ensure ServiceAccount is created if any image pull secrets are provided * Add test coverage for image pull secret inclusion on gateway ServiceAccount * Adjust note in changelog * Add a helpful comment explaining when/why we create a ServiceAccount * Update .changelog/4316.txt Co-authored-by: Blake Covarrubias * Return ServiceAccount name when image pull secrets warrant it * Improve unit tests to assert presence of ServiceAccount name on Deployment * Copy helpful comment added elsewhere --------- Co-authored-by: Blake Covarrubias --- .changelog/4316.txt | 5 ++ .../templates/connect-inject-configmap.yaml | 18 +++++++ .../templates/connect-inject-deployment.yaml | 7 +++ .../api-gateway/common/helm_config.go | 2 + .../api-gateway/gatekeeper/gatekeeper.go | 4 +- .../api-gateway/gatekeeper/gatekeeper_test.go | 53 +++++++++++-------- control-plane/api-gateway/gatekeeper/init.go | 6 +-- .../api-gateway/gatekeeper/rolebinding.go | 7 +-- .../api-gateway/gatekeeper/serviceaccount.go | 22 ++++---- .../subcommand/inject-connect/command.go | 2 + .../inject-connect/v1controllers.go | 20 +++++++ 11 files changed, 105 insertions(+), 41 deletions(-) create mode 100644 .changelog/4316.txt create mode 100644 charts/consul/templates/connect-inject-configmap.yaml diff --git a/.changelog/4316.txt b/.changelog/4316.txt new file mode 100644 index 0000000000..5397ebd093 --- /dev/null +++ b/.changelog/4316.txt @@ -0,0 +1,5 @@ +```release-note:bug +api-gateway: `global.imagePullSecrets` are now configured on the `ServiceAccount` for `Gateways`. + +Note: the referenced image pull Secret(s) must be present in the same namespace the `Gateway` is deployed to. +``` diff --git a/charts/consul/templates/connect-inject-configmap.yaml b/charts/consul/templates/connect-inject-configmap.yaml new file mode 100644 index 0000000000..98a7dae45f --- /dev/null +++ b/charts/consul/templates/connect-inject-configmap.yaml @@ -0,0 +1,18 @@ +{{- if .Values.connectInject.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "consul.fullname" . }}-connect-inject-config + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: connect-injector +data: + config.json: | + { + "image_pull_secrets": {{ .Values.global.imagePullSecrets | toJson }} + } +{{- end }} diff --git a/charts/consul/templates/connect-inject-deployment.yaml b/charts/consul/templates/connect-inject-deployment.yaml index 725c26df10..8eebcfe42f 100644 --- a/charts/consul/templates/connect-inject-deployment.yaml +++ b/charts/consul/templates/connect-inject-deployment.yaml @@ -141,6 +141,7 @@ spec: - "-ec" - | exec consul-k8s-control-plane inject-connect \ + -config-file=/consul/config/config.json \ {{- if .Values.global.federation.enabled }} -enable-federation \ {{- end }} @@ -311,6 +312,9 @@ spec: successThreshold: 1 timeoutSeconds: 5 volumeMounts: + - name: config + mountPath: /consul/config + readOnly: true {{- if not (and .Values.global.secretsBackend.vault.enabled .Values.global.secretsBackend.vault.connectInject.tlsCert.secretName) }} - name: certs mountPath: /etc/connect-injector/certs @@ -326,6 +330,9 @@ spec: {{- toYaml . | nindent 12 }} {{- end }} volumes: + - name: config + configMap: + name: {{ template "consul.fullname" . }}-connect-inject-config {{- if not (and .Values.global.secretsBackend.vault.enabled .Values.global.secretsBackend.vault.connectInject.tlsCert.secretName) }} - name: certs secret: diff --git a/control-plane/api-gateway/common/helm_config.go b/control-plane/api-gateway/common/helm_config.go index d551757c5b..175cef56ba 100644 --- a/control-plane/api-gateway/common/helm_config.go +++ b/control-plane/api-gateway/common/helm_config.go @@ -19,6 +19,8 @@ type HelmConfig struct { ImageDataplane string // ImageConsulK8S is the Consul Kubernetes Control Plane image to use in gateway deployments. ImageConsulK8S string + // ImagePullSecrets reference one or more Secret(s) that contain the credentials to pull images from private image repos. + ImagePullSecrets []v1.LocalObjectReference // GlobalImagePullPolicy is the pull policy to use for all images used in gateway deployments. GlobalImagePullPolicy string ConsulDestinationNamespace string diff --git a/control-plane/api-gateway/gatekeeper/gatekeeper.go b/control-plane/api-gateway/gatekeeper/gatekeeper.go index 538444303f..79766219dc 100644 --- a/control-plane/api-gateway/gatekeeper/gatekeeper.go +++ b/control-plane/api-gateway/gatekeeper/gatekeeper.go @@ -106,7 +106,9 @@ func (g *Gatekeeper) namespacedName(gateway gwv1beta1.Gateway) types.NamespacedN } func (g *Gatekeeper) serviceAccountName(gateway gwv1beta1.Gateway, config common.HelmConfig) string { - if config.AuthMethod == "" && !config.EnableOpenShift { + // We only create a ServiceAccount if it's needed for RBAC or image pull secrets; + // otherwise, we clean up if one was previously created. + if config.AuthMethod == "" && !config.EnableOpenShift && len(config.ImagePullSecrets) == 0 { return "" } return gateway.Name diff --git a/control-plane/api-gateway/gatekeeper/gatekeeper_test.go b/control-plane/api-gateway/gatekeeper/gatekeeper_test.go index f6342ec725..0c60aa610b 100644 --- a/control-plane/api-gateway/gatekeeper/gatekeeper_test.go +++ b/control-plane/api-gateway/gatekeeper/gatekeeper_test.go @@ -197,12 +197,13 @@ func TestUpsert(t *testing.T) { }, }, helmConfig: common.HelmConfig{ - ImageDataplane: dataplaneImage, + ImageDataplane: dataplaneImage, + ImagePullSecrets: []corev1.LocalObjectReference{{Name: "my-secret"}}, }, initialResources: resources{}, finalResources: resources{ deployments: []*appsv1.Deployment{ - configureDeployment(name, namespace, labels, 3, nil, nil, "", "1"), + configureDeployment(name, namespace, labels, 3, nil, nil, name, "1"), }, roles: []*rbac.Role{}, secrets: []*corev1.Secret{ @@ -224,7 +225,9 @@ func TestUpsert(t *testing.T) { }, }, "1", false, false), }, - serviceAccounts: []*corev1.ServiceAccount{}, + serviceAccounts: []*corev1.ServiceAccount{ + configureServiceAccount(name, namespace, labels, "1", []corev1.LocalObjectReference{{Name: "my-secret"}}), + }, }, }, "create a new gateway deployment with managed Service": { @@ -279,7 +282,6 @@ func TestUpsert(t *testing.T) { }, }, "1", false, false), }, - serviceAccounts: []*corev1.ServiceAccount{}, }, }, "create a new gateway deployment with managed Service and ACLs": { @@ -307,13 +309,14 @@ func TestUpsert(t *testing.T) { }, }, helmConfig: common.HelmConfig{ - AuthMethod: "method", - ImageDataplane: dataplaneImage, + AuthMethod: "method", + ImageDataplane: dataplaneImage, + ImagePullSecrets: []corev1.LocalObjectReference{{Name: "my-secret"}}, }, initialResources: resources{}, finalResources: resources{ deployments: []*appsv1.Deployment{ - configureDeployment(name, namespace, labels, 3, nil, nil, "", "1"), + configureDeployment(name, namespace, labels, 3, nil, nil, name, "1"), }, roles: []*rbac.Role{ configureRole(name, namespace, labels, "1", false), @@ -341,7 +344,7 @@ func TestUpsert(t *testing.T) { }, "1", false, false), }, serviceAccounts: []*corev1.ServiceAccount{ - configureServiceAccount(name, namespace, labels, "1"), + configureServiceAccount(name, namespace, labels, "1", []corev1.LocalObjectReference{{Name: "my-secret"}}), }, }, }, @@ -451,7 +454,7 @@ func TestUpsert(t *testing.T) { }, initialResources: resources{ deployments: []*appsv1.Deployment{ - configureDeployment(name, namespace, labels, 3, nil, nil, "", "1"), + configureDeployment(name, namespace, labels, 3, nil, nil, name, "1"), }, roles: []*rbac.Role{ configureRole(name, namespace, labels, "1", false), @@ -472,12 +475,12 @@ func TestUpsert(t *testing.T) { }, "1", true, false), }, serviceAccounts: []*corev1.ServiceAccount{ - configureServiceAccount(name, namespace, labels, "1"), + configureServiceAccount(name, namespace, labels, "1", nil), }, }, finalResources: resources{ deployments: []*appsv1.Deployment{ - configureDeployment(name, namespace, labels, 3, nil, nil, "", "2"), + configureDeployment(name, namespace, labels, 3, nil, nil, name, "2"), }, roles: []*rbac.Role{ configureRole(name, namespace, labels, "1", false), @@ -505,7 +508,7 @@ func TestUpsert(t *testing.T) { }, "2", false, false), }, serviceAccounts: []*corev1.ServiceAccount{ - configureServiceAccount(name, namespace, labels, "1"), + configureServiceAccount(name, namespace, labels, "1", nil), }, }, ignoreTimestampOnService: true, @@ -542,7 +545,7 @@ func TestUpsert(t *testing.T) { }, initialResources: resources{ deployments: []*appsv1.Deployment{ - configureDeployment(name, namespace, labels, 3, nil, nil, "", "1"), + configureDeployment(name, namespace, labels, 3, nil, nil, name, "1"), }, roles: []*rbac.Role{ configureRole(name, namespace, labels, "1", false), @@ -568,12 +571,12 @@ func TestUpsert(t *testing.T) { }, "1", true, false), }, serviceAccounts: []*corev1.ServiceAccount{ - configureServiceAccount(name, namespace, labels, "1"), + configureServiceAccount(name, namespace, labels, "1", nil), }, }, finalResources: resources{ deployments: []*appsv1.Deployment{ - configureDeployment(name, namespace, labels, 3, nil, nil, "", "2"), + configureDeployment(name, namespace, labels, 3, nil, nil, name, "2"), }, roles: []*rbac.Role{ configureRole(name, namespace, labels, "1", false), @@ -595,7 +598,7 @@ func TestUpsert(t *testing.T) { }, "2", false, false), }, serviceAccounts: []*corev1.ServiceAccount{ - configureServiceAccount(name, namespace, labels, "1"), + configureServiceAccount(name, namespace, labels, "1", nil), }, }, ignoreTimestampOnService: true, @@ -955,7 +958,7 @@ func TestUpsert(t *testing.T) { }, finalResources: resources{ deployments: []*appsv1.Deployment{ - configureDeployment(name, namespace, labels, 3, nil, nil, "", "1"), + configureDeployment(name, namespace, labels, 3, nil, nil, name, "1"), }, roles: []*rbac.Role{ configureRole(name, namespace, labels, "1", true), @@ -966,7 +969,7 @@ func TestUpsert(t *testing.T) { secrets: []*corev1.Secret{}, services: []*corev1.Service{}, serviceAccounts: []*corev1.ServiceAccount{ - configureServiceAccount(name, namespace, labels, "1"), + configureServiceAccount(name, namespace, labels, "1", nil), }, }, }, @@ -1311,7 +1314,7 @@ func TestDelete(t *testing.T) { }, "1", true, false), }, serviceAccounts: []*corev1.ServiceAccount{ - configureServiceAccount(name, namespace, labels, "1"), + configureServiceAccount(name, namespace, labels, "1", nil), }, }, finalResources: resources{ @@ -1377,7 +1380,7 @@ func TestDelete(t *testing.T) { }, "1", true, false), }, serviceAccounts: []*corev1.ServiceAccount{ - configureServiceAccount(name, namespace, labels, "1"), + configureServiceAccount(name, namespace, labels, "1", nil), }, }, finalResources: resources{ @@ -1475,6 +1478,9 @@ func validateResourcesExist(t *testing.T, client client.Client, helmConfig commo require.Equal(t, expected.Spec.Template.ObjectMeta.Annotations, actual.Spec.Template.ObjectMeta.Annotations) require.Equal(t, expected.Spec.Template.ObjectMeta.Labels, actual.Spec.Template.Labels) + // Ensure the service account is assigned + require.Equal(t, expected.Spec.Template.Spec.ServiceAccountName, actual.Spec.Template.Spec.ServiceAccountName) + // Ensure there is an init container hasInitContainer := false for _, container := range actual.Spec.Template.Spec.InitContainers { @@ -1684,7 +1690,7 @@ func validateResourcesAreDeleted(t *testing.T, k8sClient client.Client, resource return nil } -func configureDeployment(name, namespace string, labels map[string]string, replicas int32, nodeSelector map[string]string, tolerations []corev1.Toleration, serviceAccoutName, resourceVersion string) *appsv1.Deployment { +func configureDeployment(name, namespace string, labels map[string]string, replicas int32, nodeSelector map[string]string, tolerations []corev1.Toleration, serviceAccountName, resourceVersion string) *appsv1.Deployment { return &appsv1.Deployment{ TypeMeta: metav1.TypeMeta{ APIVersion: "apps/v1", @@ -1737,7 +1743,7 @@ func configureDeployment(name, namespace string, labels map[string]string, repli }, NodeSelector: nodeSelector, Tolerations: tolerations, - ServiceAccountName: serviceAccoutName, + ServiceAccountName: serviceAccountName, }, }, }, @@ -1886,7 +1892,7 @@ func configureService(name, namespace string, labels, annotations map[string]str return &service } -func configureServiceAccount(name, namespace string, labels map[string]string, resourceVersion string) *corev1.ServiceAccount { +func configureServiceAccount(name, namespace string, labels map[string]string, resourceVersion string, pullSecrets []corev1.LocalObjectReference) *corev1.ServiceAccount { return &corev1.ServiceAccount{ TypeMeta: metav1.TypeMeta{ APIVersion: "v1", @@ -1907,6 +1913,7 @@ func configureServiceAccount(name, namespace string, labels map[string]string, r }, }, }, + ImagePullSecrets: pullSecrets, } } diff --git a/control-plane/api-gateway/gatekeeper/init.go b/control-plane/api-gateway/gatekeeper/init.go index 6502b7be8f..e8a17dc8ea 100644 --- a/control-plane/api-gateway/gatekeeper/init.go +++ b/control-plane/api-gateway/gatekeeper/init.go @@ -7,12 +7,12 @@ import ( "bytes" "context" "fmt" - "k8s.io/utils/ptr" "strconv" "strings" "text/template" corev1 "k8s.io/api/core/v1" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/hashicorp/consul-k8s/control-plane/api-gateway/common" @@ -36,9 +36,9 @@ type initContainerCommandData struct { LogJSON bool } -// containerInit returns the init container spec for connect-init that polls for the service and the connect proxy service to be registered +// initContainer returns the init container spec for connect-init that polls for the service and the connect proxy service to be registered // so that it can save the proxy service id to the shared volume and boostrap Envoy with the proxy-id. -func (g Gatekeeper) initContainer(config common.HelmConfig, name, namespace string) (corev1.Container, error) { +func (g *Gatekeeper) initContainer(config common.HelmConfig, name, namespace string) (corev1.Container, error) { data := initContainerCommandData{ AuthMethod: config.AuthMethod, LogLevel: config.LogLevel, diff --git a/control-plane/api-gateway/gatekeeper/rolebinding.go b/control-plane/api-gateway/gatekeeper/rolebinding.go index 1a60e752c8..f315b78402 100644 --- a/control-plane/api-gateway/gatekeeper/rolebinding.go +++ b/control-plane/api-gateway/gatekeeper/rolebinding.go @@ -10,12 +10,13 @@ import ( "k8s.io/apimachinery/pkg/types" gwv1beta1 "sigs.k8s.io/gateway-api/apis/v1beta1" - "github.com/hashicorp/consul-k8s/control-plane/api-gateway/common" - "github.com/hashicorp/consul-k8s/control-plane/api/v1alpha1" rbac "k8s.io/api/rbac/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ctrl "sigs.k8s.io/controller-runtime" + + "github.com/hashicorp/consul-k8s/control-plane/api-gateway/common" + "github.com/hashicorp/consul-k8s/control-plane/api/v1alpha1" ) func (g *Gatekeeper) upsertRoleBinding(ctx context.Context, gateway gwv1beta1.Gateway, gcc v1alpha1.GatewayClassConfig, config common.HelmConfig) error { @@ -65,7 +66,7 @@ func (g *Gatekeeper) deleteRoleBinding(ctx context.Context, gwName types.Namespa func (g *Gatekeeper) roleBinding(gateway gwv1beta1.Gateway, gcc v1alpha1.GatewayClassConfig, config common.HelmConfig) *rbac.RoleBinding { // Create resources for reference. This avoids bugs if naming patterns change. - serviceAccount := g.serviceAccount(gateway) + serviceAccount := g.serviceAccount(gateway, config) role := g.role(gateway, gcc, config) return &rbac.RoleBinding{ diff --git a/control-plane/api-gateway/gatekeeper/serviceaccount.go b/control-plane/api-gateway/gatekeeper/serviceaccount.go index d1c5c9883a..64dc0b75dd 100644 --- a/control-plane/api-gateway/gatekeeper/serviceaccount.go +++ b/control-plane/api-gateway/gatekeeper/serviceaccount.go @@ -7,18 +7,20 @@ import ( "context" "errors" - "github.com/hashicorp/consul-k8s/control-plane/api-gateway/common" - "k8s.io/apimachinery/pkg/types" - gwv1beta1 "sigs.k8s.io/gateway-api/apis/v1beta1" - corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" + gwv1beta1 "sigs.k8s.io/gateway-api/apis/v1beta1" + + "github.com/hashicorp/consul-k8s/control-plane/api-gateway/common" ) func (g *Gatekeeper) upsertServiceAccount(ctx context.Context, gateway gwv1beta1.Gateway, config common.HelmConfig) error { - if config.AuthMethod == "" && !config.EnableOpenShift { + // We only create a ServiceAccount if it's needed for RBAC or image pull secrets; + // otherwise, we clean up if one was previously created. + if config.AuthMethod == "" && !config.EnableOpenShift && len(config.ImagePullSecrets) == 0 { return g.deleteServiceAccount(ctx, types.NamespacedName{Namespace: gateway.Namespace, Name: gateway.Name}) } @@ -47,15 +49,12 @@ func (g *Gatekeeper) upsertServiceAccount(ctx context.Context, gateway gwv1beta1 } // Create the ServiceAccount. - serviceAccount = g.serviceAccount(gateway) + serviceAccount = g.serviceAccount(gateway, config) if err := ctrl.SetControllerReference(&gateway, serviceAccount, g.Client.Scheme()); err != nil { return err } - if err := g.Client.Create(ctx, serviceAccount); err != nil { - return err - } - return nil + return g.Client.Create(ctx, serviceAccount) } func (g *Gatekeeper) deleteServiceAccount(ctx context.Context, gwName types.NamespacedName) error { @@ -69,12 +68,13 @@ func (g *Gatekeeper) deleteServiceAccount(ctx context.Context, gwName types.Name return nil } -func (g *Gatekeeper) serviceAccount(gateway gwv1beta1.Gateway) *corev1.ServiceAccount { +func (g *Gatekeeper) serviceAccount(gateway gwv1beta1.Gateway, config common.HelmConfig) *corev1.ServiceAccount { return &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: gateway.Name, Namespace: gateway.Namespace, Labels: common.LabelsForGateway(&gateway), }, + ImagePullSecrets: config.ImagePullSecrets, } } diff --git a/control-plane/subcommand/inject-connect/command.go b/control-plane/subcommand/inject-connect/command.go index 68addfc7b2..125c46ea2a 100644 --- a/control-plane/subcommand/inject-connect/command.go +++ b/control-plane/subcommand/inject-connect/command.go @@ -48,6 +48,7 @@ type Command struct { flagListen string flagCertDir string // Directory with TLS certs for listening (PEM) flagDefaultInject bool // True to inject by default + flagConfigFile string // Path to a config file in JSON format flagConsulImage string // Docker image for Consul flagConsulDataplaneImage string // Docker image for Envoy flagConsulK8sImage string // Docker image for consul-k8s @@ -174,6 +175,7 @@ func init() { func (c *Command) init() { c.flagSet = flag.NewFlagSet("", flag.ContinueOnError) c.flagSet.StringVar(&c.flagListen, "listen", ":8080", "Address to bind listener to.") + c.flagSet.StringVar(&c.flagConfigFile, "config-file", "", "Path to a JSON config file.") c.flagSet.Var((*flags.FlagMapValue)(&c.flagNodeMeta), "node-meta", "Metadata to set on the node, formatted as key=value. This flag may be specified multiple times to set multiple meta fields.") c.flagSet.BoolVar(&c.flagDefaultInject, "default-inject", true, "Inject by default.") diff --git a/control-plane/subcommand/inject-connect/v1controllers.go b/control-plane/subcommand/inject-connect/v1controllers.go index fa6e36d507..dd6b3be739 100644 --- a/control-plane/subcommand/inject-connect/v1controllers.go +++ b/control-plane/subcommand/inject-connect/v1controllers.go @@ -5,10 +5,12 @@ package connectinject import ( "context" + "encoding/json" "fmt" "os" "github.com/hashicorp/consul-server-connection-manager/discovery" + v1 "k8s.io/api/core/v1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/manager" @@ -31,6 +33,23 @@ func (c *Command) configureControllers(ctx context.Context, mgr manager.Manager, // Create Consul API config object. consulConfig := c.consul.ConsulClientConfig() + type FileConfig struct { + ImagePullSecrets []v1.LocalObjectReference `json:"image_pull_secrets"` + } + + var cfgFile FileConfig + if c.flagConfigFile != "" { + if file, err := os.ReadFile(c.flagConfigFile); err != nil { + setupLog.Info("Failed to read specified -config-file", "file", c.flagConfigFile, "error", err) + } else { + if err := json.Unmarshal(file, &cfgFile); err != nil { + setupLog.Error(err, "Config file present but could not be deserialized, will use defaults", "file", c.flagConfigFile) + } else { + setupLog.Info("Config file present and deserialized", "file", c.flagConfigFile, "config", cfgFile) + } + } + } + // Convert allow/deny lists to sets. allowK8sNamespaces := flags.ToSet(c.flagAllowK8sNamespacesList) denyK8sNamespaces := flags.ToSet(c.flagDenyK8sNamespacesList) @@ -118,6 +137,7 @@ func (c *Command) configureControllers(ctx context.Context, mgr manager.Manager, }, ImageDataplane: c.flagConsulDataplaneImage, ImageConsulK8S: c.flagConsulK8sImage, + ImagePullSecrets: cfgFile.ImagePullSecrets, GlobalImagePullPolicy: c.flagGlobalImagePullPolicy, ConsulDestinationNamespace: c.flagConsulDestinationNamespace, NamespaceMirroringPrefix: c.flagK8SNSMirroringPrefix, From eb9539c6a782cfcbe2fc91f7899bbc789154dc7c Mon Sep 17 00:00:00 2001 From: Anita Akaeze Date: Thu, 10 Oct 2024 09:50:45 -0700 Subject: [PATCH 2/9] bump kubernetes version for AKS (#4383) * bump kubernetes version for AKS * bump kubernetes version for AKS --- charts/consul/test/terraform/aks/variables.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/consul/test/terraform/aks/variables.tf b/charts/consul/test/terraform/aks/variables.tf index b60273bac2..f124aac143 100644 --- a/charts/consul/test/terraform/aks/variables.tf +++ b/charts/consul/test/terraform/aks/variables.tf @@ -7,7 +7,7 @@ variable "location" { } variable "kubernetes_version" { - default = "1.27" + default = "1.28" description = "Kubernetes version supported on AKS" } From 8c232896748676ae125a2c4448498cb0794e351c Mon Sep 17 00:00:00 2001 From: Anita Akaeze Date: Fri, 11 Oct 2024 10:23:26 -0700 Subject: [PATCH 3/9] NO-JIRA: cleanup iamRoles as part of nightly test cleanup (#4382) * NO-JIRA: cleanup iam_roles as part of nightly test cleanup * remove reference to log, use fmt * fix panic in cleanup code * code review feedback * code review feedback --- hack/aws-acceptance-test-cleanup/main.go | 117 +++++++++++++++++++++++ 1 file changed, 117 insertions(+) diff --git a/hack/aws-acceptance-test-cleanup/main.go b/hack/aws-acceptance-test-cleanup/main.go index 1680e9ca63..b9827a23af 100644 --- a/hack/aws-acceptance-test-cleanup/main.go +++ b/hack/aws-acceptance-test-cleanup/main.go @@ -81,6 +81,11 @@ func realMain(ctx context.Context) error { return err } + // Find IAM roles and delete + if err := cleanupIAMRoles(ctx, iamClient); err != nil { + return err + } + // Find OIDC providers to delete. oidcProvidersOutput, err := iamClient.ListOpenIDConnectProvidersWithContext(ctx, &iam.ListOpenIDConnectProvidersInput{}) if err != nil { @@ -759,6 +764,118 @@ func destroyBackoff(ctx context.Context, resourceKind string, resourceID string, }, backoff.WithContext(expoBackoff, ctx)) } +func cleanupIAMRoles(ctx context.Context, iamClient *iam.IAM) error { + var rolesToDelete []*iam.Role + rolesToDelete, err := filterIAMRolesWithPrefix(ctx, iamClient, "consul-k8s-") + if err != nil { + return fmt.Errorf("failed to list roles: %w", err) + } + + if len(rolesToDelete) == 0 { + fmt.Println("Found no iamRoles to clean up") + return nil + } else { + fmt.Printf("Found %d IAM roles to clean up\n", len(rolesToDelete)) + } + + // Delete filtered roles + for _, role := range rolesToDelete { + roleName := aws.StringValue(role.RoleName) + err := detachRolePolicies(iamClient, role.RoleName) + if err != nil { + fmt.Printf("Failed to detach policies for role %s: %v", roleName, err) + continue + } + + _, err = iamClient.DeleteRole(&iam.DeleteRoleInput{ + RoleName: role.RoleName, + }) + if err != nil { + fmt.Printf("Failed to delete role %s: %v", roleName, err) + } else { + fmt.Printf("Deleted role: %s", roleName) + } + } + + return nil +} + +// filterIAMRolesWithPrefix is a callback function used with ListRolesPages. +// It filters roles based on specified prefix and appends matching roles to rolesToDelete. +// +// Parameters: +// - page: A single page of IAM roles returned by the AWS API +// - lastPage: A boolean indicating whether this is the last page of results +// - rolesToDelete: A pointer to the slice where matching roles are accumulated +// - rolePrefix: The prefix to filter roles by +func filterIAMRolesWithPrefix(ctx context.Context, iamClient *iam.IAM, prefix string) ([]*iam.Role, error) { + var roles []*iam.Role + + err := iamClient.ListRolesPagesWithContext(ctx, &iam.ListRolesInput{}, + func(page *iam.ListRolesOutput, lastPage bool) bool { + for _, role := range page.Roles { + name := aws.StringValue(role.RoleName) + if strings.HasPrefix(name, prefix) { + roles = append(roles, role) + } + } + + return true + }) + if err != nil { + return nil, err + } + + return roles, nil +} + +func detachRolePolicies(iamClient *iam.IAM, roleName *string) error { + if roleName == nil { + return fmt.Errorf("roleName is nil") + } + + err := iamClient.ListAttachedRolePoliciesPages(&iam.ListAttachedRolePoliciesInput{ + RoleName: roleName, + }, func(page *iam.ListAttachedRolePoliciesOutput, lastPage bool) bool { + err := detachPoliciesFromRole(iamClient, roleName, page) + if err != nil { + fmt.Printf("Error detaching policies from role %s: %v", *roleName, err) + } + return !lastPage + }) + + if err != nil { + return err + } + + return nil +} + +// detachPoliciesFromRole detaches all policies from a given role. +// +// Parameters: +// - iamClient: The IAM service client +// - roleName: Pointer to the name of the role +// - page: A single page of attached policies returned by the AWS API +func detachPoliciesFromRole(iamClient *iam.IAM, roleName *string, page *iam.ListAttachedRolePoliciesOutput) error { + for _, policy := range page.AttachedPolicies { + if policy.PolicyArn == nil { + fmt.Printf("Warning: PolicyArn is nil for a policy attached to: %s", *roleName) + continue + } + + _, err := iamClient.DetachRolePolicy(&iam.DetachRolePolicyInput{ + RoleName: roleName, + PolicyArn: policy.PolicyArn, + }) + if err != nil { + fmt.Printf("Failed to detach policy %s from role %s: %v\n", aws.StringValue(policy.PolicyArn), *roleName, err) + } + } + + return nil +} + func cleanupPersistentVolumes(ctx context.Context, ec2Client *ec2.EC2) error { var nextToken *string var toDeleteVolumes []*ec2.Volume From 124e38b68aa79c0a665a1f32cc57c266f99f72a4 Mon Sep 17 00:00:00 2001 From: Isaac <10012479+jukie@users.noreply.github.com> Date: Mon, 14 Oct 2024 16:52:45 -0600 Subject: [PATCH 4/9] Sync Catalog: Add k8s endpoint health state to consul health check (#3874) Signed-off-by: jukie <10012479+Jukie@users.noreply.github.com> --------- Signed-off-by: jukie <10012479+Jukie@users.noreply.github.com> --- .changelog/3874.txt | 3 + control-plane/catalog/to-consul/resource.go | 12 +- .../catalog/to-consul/resource_test.go | 163 ++++++++++++++---- 3 files changed, 145 insertions(+), 33 deletions(-) create mode 100644 .changelog/3874.txt diff --git a/.changelog/3874.txt b/.changelog/3874.txt new file mode 100644 index 0000000000..c6928774ef --- /dev/null +++ b/.changelog/3874.txt @@ -0,0 +1,3 @@ +```release-note:sync-catalog +Add Endpoint health state to registered consul service +``` diff --git a/control-plane/catalog/to-consul/resource.go b/control-plane/catalog/to-consul/resource.go index 09e7e17200..9e48305a30 100644 --- a/control-plane/catalog/to-consul/resource.go +++ b/control-plane/catalog/to-consul/resource.go @@ -46,6 +46,7 @@ const ( // consulKubernetesCheckName is the name of health check in Consul for Kubernetes readiness status. consulKubernetesCheckName = "Kubernetes Readiness Check" kubernetesSuccessReasonMsg = "Kubernetes health checks passing" + kubernetesFailureReasonMsg = "Kubernetes health checks failing" ) type NodePortSyncType string @@ -693,7 +694,6 @@ func (t *ServiceResource) generateRegistrations(key string) { } } } - } } } @@ -800,11 +800,17 @@ func (t *ServiceResource) registerServiceInstance( Name: consulKubernetesCheckName, Namespace: baseService.Namespace, Type: consulKubernetesCheckType, - Status: consulapi.HealthPassing, ServiceID: serviceID(r.Service.Service, addr), - Output: kubernetesSuccessReasonMsg, } + // Consider endpoint health state for registered consul service + if endpoint.Conditions.Ready != nil && *endpoint.Conditions.Ready { + r.Check.Status = consulapi.HealthPassing + r.Check.Output = kubernetesSuccessReasonMsg + } else { + r.Check.Status = consulapi.HealthCritical + r.Check.Output = kubernetesFailureReasonMsg + } t.consulMap[key] = append(t.consulMap[key], &r) } } diff --git a/control-plane/catalog/to-consul/resource_test.go b/control-plane/catalog/to-consul/resource_test.go index 08b08ced2f..4951d1b107 100644 --- a/control-plane/catalog/to-consul/resource_test.go +++ b/control-plane/catalog/to-consul/resource_test.go @@ -26,6 +26,7 @@ import ( const nodeName1 = "ip-10-11-12-13.ec2.internal" const nodeName2 = "ip-10-11-12-14.ec2.internal" +const nodeName3 = "ip-10-11-12-15.ec2.internal" func init() { hclog.DefaultOptions.Level = hclog.Debug @@ -763,7 +764,7 @@ func TestServiceResource_lbRegisterEndpoints(t *testing.T) { closer := controller.TestControllerRun(&serviceResource) defer closer() - node1, _ := createNodes(t, client) + node1, _, _ := createNodes(t, client) // Insert the endpoint slice _, err := client.DiscoveryV1().EndpointSlices(metav1.NamespaceDefault).Create( @@ -845,7 +846,7 @@ func TestServiceResource_nodePort(t *testing.T) { syncer.Lock() defer syncer.Unlock() actual := syncer.Registrations - require.Len(r, actual, 2) + require.Len(r, actual, 3) require.Equal(r, "foo", actual[0].Service.Service) require.Equal(r, "1.2.3.4", actual[0].Service.Address) require.Equal(r, 30000, actual[0].Service.Port) @@ -854,7 +855,13 @@ func TestServiceResource_nodePort(t *testing.T) { require.Equal(r, "2.3.4.5", actual[1].Service.Address) require.Equal(r, 30000, actual[1].Service.Port) require.Equal(r, "k8s-sync", actual[1].Node) + require.Equal(r, "foo", actual[2].Service.Service) + require.Equal(r, "3.4.5.6", actual[2].Service.Address) + require.Equal(r, 30000, actual[2].Service.Port) + require.Equal(r, "k8s-sync", actual[2].Node) require.NotEqual(r, actual[0].Service.ID, actual[1].Service.ID) + require.NotEqual(r, actual[0].Service.ID, actual[2].Service.ID) + require.NotEqual(r, actual[1].Service.ID, actual[2].Service.ID) }) } @@ -885,7 +892,7 @@ func TestServiceResource_nodePortPrefix(t *testing.T) { syncer.Lock() defer syncer.Unlock() actual := syncer.Registrations - require.Len(r, actual, 2) + require.Len(r, actual, 3) require.Equal(r, "prefixfoo", actual[0].Service.Service) require.Equal(r, "1.2.3.4", actual[0].Service.Address) require.Equal(r, 30000, actual[0].Service.Port) @@ -894,7 +901,13 @@ func TestServiceResource_nodePortPrefix(t *testing.T) { require.Equal(r, "2.3.4.5", actual[1].Service.Address) require.Equal(r, 30000, actual[1].Service.Port) require.Equal(r, "k8s-sync", actual[1].Node) + require.Equal(r, "prefixfoo", actual[2].Service.Service) + require.Equal(r, "3.4.5.6", actual[2].Service.Address) + require.Equal(r, 30000, actual[2].Service.Port) + require.Equal(r, "k8s-sync", actual[2].Node) require.NotEqual(r, actual[0].Service.ID, actual[1].Service.ID) + require.NotEqual(r, actual[0].Service.ID, actual[2].Service.ID) + require.NotEqual(r, actual[1].Service.ID, actual[2].Service.ID) }) } @@ -911,7 +924,7 @@ func TestServiceResource_nodePort_singleEndpoint(t *testing.T) { closer := controller.TestControllerRun(&serviceResource) defer closer() - node1, _ := createNodes(t, client) + node1, _, _ := createNodes(t, client) // Insert the endpoint slice _, err := client.DiscoveryV1().EndpointSlices(metav1.NamespaceDefault).Create( @@ -994,7 +1007,7 @@ func TestServiceResource_nodePortAnnotatedPort(t *testing.T) { syncer.Lock() defer syncer.Unlock() actual := syncer.Registrations - require.Len(r, actual, 2) + require.Len(r, actual, 3) require.Equal(r, "foo", actual[0].Service.Service) require.Equal(r, "1.2.3.4", actual[0].Service.Address) require.Equal(r, 30001, actual[0].Service.Port) @@ -1003,7 +1016,13 @@ func TestServiceResource_nodePortAnnotatedPort(t *testing.T) { require.Equal(r, "2.3.4.5", actual[1].Service.Address) require.Equal(r, 30001, actual[1].Service.Port) require.Equal(r, "k8s-sync", actual[1].Node) + require.Equal(r, "foo", actual[2].Service.Service) + require.Equal(r, "3.4.5.6", actual[2].Service.Address) + require.Equal(r, 30001, actual[2].Service.Port) + require.Equal(r, "k8s-sync", actual[2].Node) require.NotEqual(r, actual[0].Service.ID, actual[1].Service.ID) + require.NotEqual(r, actual[0].Service.ID, actual[2].Service.ID) + require.NotEqual(r, actual[1].Service.ID, actual[2].Service.ID) }) } @@ -1038,7 +1057,7 @@ func TestServiceResource_nodePortUnnamedPort(t *testing.T) { syncer.Lock() defer syncer.Unlock() actual := syncer.Registrations - require.Len(r, actual, 2) + require.Len(r, actual, 3) require.Equal(r, "foo", actual[0].Service.Service) require.Equal(r, "1.2.3.4", actual[0].Service.Address) require.Equal(r, 30000, actual[0].Service.Port) @@ -1047,7 +1066,13 @@ func TestServiceResource_nodePortUnnamedPort(t *testing.T) { require.Equal(r, "2.3.4.5", actual[1].Service.Address) require.Equal(r, 30000, actual[1].Service.Port) require.Equal(r, "k8s-sync", actual[1].Node) + require.Equal(r, "foo", actual[2].Service.Service) + require.Equal(r, "3.4.5.6", actual[2].Service.Address) + require.Equal(r, 30000, actual[2].Service.Port) + require.Equal(r, "k8s-sync", actual[2].Node) require.NotEqual(r, actual[0].Service.ID, actual[1].Service.ID) + require.NotEqual(r, actual[0].Service.ID, actual[2].Service.ID) + require.NotEqual(r, actual[1].Service.ID, actual[2].Service.ID) }) } @@ -1078,16 +1103,22 @@ func TestServiceResource_nodePort_internalOnlySync(t *testing.T) { syncer.Lock() defer syncer.Unlock() actual := syncer.Registrations - require.Len(r, actual, 2) + require.Len(r, actual, 3) require.Equal(r, "foo", actual[0].Service.Service) require.Equal(r, "4.5.6.7", actual[0].Service.Address) require.Equal(r, 30000, actual[0].Service.Port) require.Equal(r, "k8s-sync", actual[0].Node) require.Equal(r, "foo", actual[1].Service.Service) - require.Equal(r, "3.4.5.6", actual[1].Service.Address) + require.Equal(r, "5.6.7.8", actual[1].Service.Address) require.Equal(r, 30000, actual[1].Service.Port) require.Equal(r, "k8s-sync", actual[1].Node) + require.Equal(r, "foo", actual[2].Service.Service) + require.Equal(r, "6.7.8.9", actual[2].Service.Address) + require.Equal(r, 30000, actual[2].Service.Port) + require.Equal(r, "k8s-sync", actual[2].Node) require.NotEqual(r, actual[0].Service.ID, actual[1].Service.ID) + require.NotEqual(r, actual[0].Service.ID, actual[2].Service.ID) + require.NotEqual(r, actual[1].Service.ID, actual[2].Service.ID) }) } @@ -1104,7 +1135,7 @@ func TestServiceResource_nodePort_externalFirstSync(t *testing.T) { closer := controller.TestControllerRun(&serviceResource) defer closer() - node1, _ := createNodes(t, client) + node1, _, _ := createNodes(t, client) node1.Status = corev1.NodeStatus{ Addresses: []corev1.NodeAddress{ @@ -1126,7 +1157,7 @@ func TestServiceResource_nodePort_externalFirstSync(t *testing.T) { syncer.Lock() defer syncer.Unlock() actual := syncer.Registrations - require.Len(r, actual, 2) + require.Len(r, actual, 3) require.Equal(r, "foo", actual[0].Service.Service) require.Equal(r, "4.5.6.7", actual[0].Service.Address) require.Equal(r, 30000, actual[0].Service.Port) @@ -1135,7 +1166,13 @@ func TestServiceResource_nodePort_externalFirstSync(t *testing.T) { require.Equal(r, "2.3.4.5", actual[1].Service.Address) require.Equal(r, 30000, actual[1].Service.Port) require.Equal(r, "k8s-sync", actual[1].Node) + require.Equal(r, "foo", actual[2].Service.Service) + require.Equal(r, "3.4.5.6", actual[2].Service.Address) + require.Equal(r, 30000, actual[2].Service.Port) + require.Equal(r, "k8s-sync", actual[2].Node) require.NotEqual(r, actual[0].Service.ID, actual[1].Service.ID) + require.NotEqual(r, actual[0].Service.ID, actual[2].Service.ID) + require.NotEqual(r, actual[1].Service.ID, actual[2].Service.ID) }) } @@ -1166,16 +1203,22 @@ func TestServiceResource_clusterIP(t *testing.T) { syncer.Lock() defer syncer.Unlock() actual := syncer.Registrations - require.Len(r, actual, 2) + require.Len(r, actual, 3) require.Equal(r, "foo", actual[0].Service.Service) require.Equal(r, "1.1.1.1", actual[0].Service.Address) require.Equal(r, 8080, actual[0].Service.Port) + require.Equal(r, "us-west-2a", actual[0].Service.Meta["external-k8s-topology-zone"]) require.Equal(r, "foo", actual[1].Service.Service) require.Equal(r, "2.2.2.2", actual[1].Service.Address) require.Equal(r, 8080, actual[1].Service.Port) - require.Equal(r, "us-west-2a", actual[0].Service.Meta["external-k8s-topology-zone"]) require.Equal(r, "us-west-2b", actual[1].Service.Meta["external-k8s-topology-zone"]) + require.Equal(r, "foo", actual[2].Service.Service) + require.Equal(r, "3.3.3.3", actual[2].Service.Address) + require.Equal(r, 8080, actual[2].Service.Port) + require.Equal(r, "us-west-2c", actual[2].Service.Meta["external-k8s-topology-zone"]) require.NotEqual(r, actual[0].Service.ID, actual[1].Service.ID) + require.NotEqual(r, actual[0].Service.ID, actual[2].Service.ID) + require.NotEqual(r, actual[1].Service.ID, actual[2].Service.ID) }) } @@ -1206,15 +1249,19 @@ func TestServiceResource_clusterIP_healthCheck(t *testing.T) { syncer.Lock() defer syncer.Unlock() actual := syncer.Registrations - require.Len(r, actual, 2) + require.Len(r, actual, 3) require.Equal(r, consulKubernetesCheckName, actual[0].Check.Name) require.Equal(r, consulapi.HealthPassing, actual[0].Check.Status) require.Equal(r, kubernetesSuccessReasonMsg, actual[0].Check.Output) require.Equal(r, consulKubernetesCheckType, actual[0].Check.Type) require.Equal(r, consulKubernetesCheckName, actual[1].Check.Name) - require.Equal(r, consulapi.HealthPassing, actual[1].Check.Status) - require.Equal(r, kubernetesSuccessReasonMsg, actual[1].Check.Output) + require.Equal(r, consulapi.HealthCritical, actual[1].Check.Status) + require.Equal(r, kubernetesFailureReasonMsg, actual[1].Check.Output) require.Equal(r, consulKubernetesCheckType, actual[1].Check.Type) + require.Equal(r, consulKubernetesCheckName, actual[2].Check.Name) + require.Equal(r, consulapi.HealthCritical, actual[2].Check.Status) + require.Equal(r, kubernetesFailureReasonMsg, actual[2].Check.Output) + require.Equal(r, consulKubernetesCheckType, actual[2].Check.Type) }) } @@ -1246,14 +1293,19 @@ func TestServiceResource_clusterIPPrefix(t *testing.T) { syncer.Lock() defer syncer.Unlock() actual := syncer.Registrations - require.Len(r, actual, 2) + require.Len(r, actual, 3) require.Equal(r, "prefixfoo", actual[0].Service.Service) require.Equal(r, "1.1.1.1", actual[0].Service.Address) require.Equal(r, 8080, actual[0].Service.Port) require.Equal(r, "prefixfoo", actual[1].Service.Service) require.Equal(r, "2.2.2.2", actual[1].Service.Address) require.Equal(r, 8080, actual[1].Service.Port) + require.Equal(r, "prefixfoo", actual[2].Service.Service) + require.Equal(r, "3.3.3.3", actual[2].Service.Address) + require.Equal(r, 8080, actual[2].Service.Port) require.NotEqual(r, actual[0].Service.ID, actual[1].Service.ID) + require.NotEqual(r, actual[0].Service.ID, actual[2].Service.ID) + require.NotEqual(r, actual[1].Service.ID, actual[2].Service.ID) }) } @@ -1286,14 +1338,19 @@ func TestServiceResource_clusterIPAnnotatedPortName(t *testing.T) { syncer.Lock() defer syncer.Unlock() actual := syncer.Registrations - require.Len(r, actual, 2) + require.Len(r, actual, 3) require.Equal(r, "foo", actual[0].Service.Service) require.Equal(r, "1.1.1.1", actual[0].Service.Address) require.Equal(r, 2000, actual[0].Service.Port) require.Equal(r, "foo", actual[1].Service.Service) require.Equal(r, "2.2.2.2", actual[1].Service.Address) require.Equal(r, 2000, actual[1].Service.Port) + require.Equal(r, "foo", actual[2].Service.Service) + require.Equal(r, "3.3.3.3", actual[2].Service.Address) + require.Equal(r, 2000, actual[2].Service.Port) require.NotEqual(r, actual[0].Service.ID, actual[1].Service.ID) + require.NotEqual(r, actual[0].Service.ID, actual[2].Service.ID) + require.NotEqual(r, actual[1].Service.ID, actual[2].Service.ID) }) } @@ -1326,14 +1383,19 @@ func TestServiceResource_clusterIPAnnotatedPortNumber(t *testing.T) { syncer.Lock() defer syncer.Unlock() actual := syncer.Registrations - require.Len(r, actual, 2) + require.Len(r, actual, 3) require.Equal(r, "foo", actual[0].Service.Service) require.Equal(r, "1.1.1.1", actual[0].Service.Address) require.Equal(r, 4141, actual[0].Service.Port) require.Equal(r, "foo", actual[1].Service.Service) require.Equal(r, "2.2.2.2", actual[1].Service.Address) require.Equal(r, 4141, actual[1].Service.Port) + require.Equal(r, "foo", actual[2].Service.Service) + require.Equal(r, "3.3.3.3", actual[2].Service.Address) + require.Equal(r, 4141, actual[2].Service.Port) require.NotEqual(r, actual[0].Service.ID, actual[1].Service.ID) + require.NotEqual(r, actual[0].Service.ID, actual[2].Service.ID) + require.NotEqual(r, actual[1].Service.ID, actual[2].Service.ID) }) } @@ -1368,14 +1430,19 @@ func TestServiceResource_clusterIPUnnamedPorts(t *testing.T) { syncer.Lock() defer syncer.Unlock() actual := syncer.Registrations - require.Len(r, actual, 2) + require.Len(r, actual, 3) require.Equal(r, "foo", actual[0].Service.Service) require.Equal(r, "1.1.1.1", actual[0].Service.Address) require.Equal(r, 8080, actual[0].Service.Port) require.Equal(r, "foo", actual[1].Service.Service) require.Equal(r, "2.2.2.2", actual[1].Service.Address) require.Equal(r, 8080, actual[1].Service.Port) + require.Equal(r, "foo", actual[2].Service.Service) + require.Equal(r, "3.3.3.3", actual[2].Service.Address) + require.Equal(r, 8080, actual[2].Service.Port) require.NotEqual(r, actual[0].Service.ID, actual[1].Service.ID) + require.NotEqual(r, actual[0].Service.ID, actual[2].Service.ID) + require.NotEqual(r, actual[1].Service.ID, actual[2].Service.ID) }) } @@ -1439,14 +1506,19 @@ func TestServiceResource_clusterIPAllNamespaces(t *testing.T) { syncer.Lock() defer syncer.Unlock() actual := syncer.Registrations - require.Len(r, actual, 2) + require.Len(r, actual, 3) require.Equal(r, "foo", actual[0].Service.Service) require.Equal(r, "1.1.1.1", actual[0].Service.Address) require.Equal(r, 8080, actual[0].Service.Port) require.Equal(r, "foo", actual[1].Service.Service) require.Equal(r, "2.2.2.2", actual[1].Service.Address) require.Equal(r, 8080, actual[1].Service.Port) + require.Equal(r, "foo", actual[2].Service.Service) + require.Equal(r, "3.3.3.3", actual[2].Service.Address) + require.Equal(r, 8080, actual[2].Service.Port) require.NotEqual(r, actual[0].Service.ID, actual[1].Service.ID) + require.NotEqual(r, actual[0].Service.ID, actual[2].Service.ID) + require.NotEqual(r, actual[1].Service.ID, actual[2].Service.ID) }) } @@ -1482,14 +1554,19 @@ func TestServiceResource_clusterIPTargetPortNamed(t *testing.T) { syncer.Lock() defer syncer.Unlock() actual := syncer.Registrations - require.Len(r, actual, 2) + require.Len(r, actual, 3) require.Equal(r, "foo", actual[0].Service.Service) require.Equal(r, "1.1.1.1", actual[0].Service.Address) require.Equal(r, 2000, actual[0].Service.Port) require.Equal(r, "foo", actual[1].Service.Service) require.Equal(r, "2.2.2.2", actual[1].Service.Address) require.Equal(r, 2000, actual[1].Service.Port) + require.Equal(r, "foo", actual[2].Service.Service) + require.Equal(r, "3.3.3.3", actual[2].Service.Address) + require.Equal(r, 2000, actual[2].Service.Port) require.NotEqual(r, actual[0].Service.ID, actual[1].Service.ID) + require.NotEqual(r, actual[0].Service.ID, actual[2].Service.ID) + require.NotEqual(r, actual[1].Service.ID, actual[2].Service.ID) }) } @@ -1520,7 +1597,7 @@ func TestServiceResource_targetRefInMeta(t *testing.T) { syncer.Lock() defer syncer.Unlock() actual := syncer.Registrations - require.Len(r, actual, 2) + require.Len(r, actual, 3) require.Equal(r, "foobar", actual[0].Service.Meta[ConsulK8SRefValue]) require.Equal(r, "pod", actual[0].Service.Meta[ConsulK8SRefKind]) require.Equal(r, nodeName1, actual[0].Service.Meta[ConsulK8SNodeName]) @@ -2013,7 +2090,7 @@ func TestServiceResource_addIngress(t *testing.T) { require.Equal(r, test.expectedAddress, actual[0].Service.Address) require.Equal(r, test.expectedPort, actual[0].Service.Port) } else { - require.Len(r, actual, 2) + require.Len(r, actual, 3) require.Equal(r, test.expectedAddress, actual[0].Service.Address) require.Equal(r, test.expectedPort, actual[0].Service.Port) } @@ -2085,8 +2162,8 @@ func clusterIPService(name, namespace string) *corev1.Service { } } -// createNodes calls the fake k8s client to create two Kubernetes nodes and returns them. -func createNodes(t *testing.T, client *fake.Clientset) (*corev1.Node, *corev1.Node) { +// createNodes calls the fake k8s client to create three Kubernetes nodes and returns them. +func createNodes(t *testing.T, client *fake.Clientset) (*corev1.Node, *corev1.Node, *corev1.Node) { // Insert the nodes node1 := &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ @@ -2112,21 +2189,37 @@ func createNodes(t *testing.T, client *fake.Clientset) (*corev1.Node, *corev1.No Status: corev1.NodeStatus{ Addresses: []corev1.NodeAddress{ {Type: corev1.NodeExternalIP, Address: "2.3.4.5"}, - {Type: corev1.NodeInternalIP, Address: "3.4.5.6"}, - {Type: corev1.NodeInternalIP, Address: "6.7.8.9"}, + {Type: corev1.NodeInternalIP, Address: "5.6.7.8"}, + {Type: corev1.NodeInternalIP, Address: "8.9.10.11"}, }, }, } _, err = client.CoreV1().Nodes().Create(context.Background(), node2, metav1.CreateOptions{}) require.NoError(t, err) - return node1, node2 + node3 := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeName3, + }, + + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{ + {Type: corev1.NodeExternalIP, Address: "3.4.5.6"}, + {Type: corev1.NodeInternalIP, Address: "6.7.8.9"}, + {Type: corev1.NodeInternalIP, Address: "9.10.11.12"}, + }, + }, + } + _, err = client.CoreV1().Nodes().Create(context.Background(), node3, metav1.CreateOptions{}) + require.NoError(t, err) + return node1, node2, node3 } -// createEndpointSlices calls the fake k8s client to create an endpoint slices with two endpoints on different nodes. +// createEndpointSlices calls the fake k8s client to create an endpoint slices with three endpoints on different nodes. func createEndpointSlice(t *testing.T, client *fake.Clientset, serviceName string, namespace string) { node1 := nodeName1 node2 := nodeName2 + node3 := nodeName3 targetRef := corev1.ObjectReference{Kind: "pod", Name: "foobar"} _, err := client.DiscoveryV1().EndpointSlices(namespace).Create( @@ -2152,13 +2245,23 @@ func createEndpointSlice(t *testing.T, client *fake.Clientset, serviceName strin { Addresses: []string{"2.2.2.2"}, Conditions: discoveryv1.EndpointConditions{ - Ready: ptr.To(true), + Ready: nil, Serving: ptr.To(true), Terminating: ptr.To(false), }, NodeName: &node2, Zone: ptr.To("us-west-2b"), }, + { + Addresses: []string{"3.3.3.3"}, + Conditions: discoveryv1.EndpointConditions{ + Ready: ptr.To(false), + Serving: ptr.To(false), + Terminating: ptr.To(true), + }, + NodeName: &node3, + Zone: ptr.To("us-west-2c"), + }, }, Ports: []discoveryv1.EndpointPort{ { From 1e3bbd81b0429dd09c5727898736fbd936747adc Mon Sep 17 00:00:00 2001 From: Nathan Coleman Date: Thu, 17 Oct 2024 13:26:21 -0400 Subject: [PATCH 5/9] Update changelog to include 1.6.0 (#4392) --- CHANGELOG.md | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index dce1f25d0a..8882d2217e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,26 @@ +## 1.6.0 (October 16, 2024) + +> NOTE: Consul K8s 1.6.x is compatible with Consul 1.20.x and Consul Dataplane 1.6.x. Refer to our [compatibility matrix](https://developer.hashicorp.com/consul/docs/k8s/compatibility) for more info. + + +SECURITY: + +* Upgrade Go to use 1.22.7. This addresses CVE + [CVE-2024-34155](https://nvd.nist.gov/vuln/detail/CVE-2024-34155) [[GH-4313](https://github.com/hashicorp/consul-k8s/issues/4313)] + +IMPROVEMENTS: + +* dns-proxy: add the ability to deploy a DNS proxy within the kubernetes cluster that forwards DNS requests to the consul server and can be configured with an ACL token and make partition aware DNS requests. [[GH-4300](https://github.com/hashicorp/consul-k8s/issues/4300)] +* sync-catalog: expose prometheus scrape metrics on sync-catalog pods [[GH-4212](https://github.com/hashicorp/consul-k8s/issues/4212)] +* connect-inject: remove unnecessary resource permissions from connect-inject ClusterRole [[GH-4307](https://github.com/hashicorp/consul-k8s/issues/4307)] +* helm: Exclude gke namespaces from being connect-injected when the connect-inject: default: true value is set. [[GH-4333](https://github.com/hashicorp/consul-k8s/issues/4333)] + +BUG FIXES: + +* control-plane: add missing `$HOST_IP` environment variable to consul-dataplane sidecar containers [[GH-4277](https://github.com/hashicorp/consul-k8s/issues/4277)] +* helm: Fix ArgoCD hooks related annotations on server-acl-init Job, they must be added at Job definition and not template level. [[GH-3989](https://github.com/hashicorp/consul-k8s/issues/3989)] +* sync-catalog: Enable the user to purge the registered services by passing parent node and necessary filters. [[GH-4255](https://github.com/hashicorp/consul-k8s/issues/4255)] + ## 1.5.3 (August 30, 2024) SECURITY: From c64b7262ac74ff593060d5a83b9edef4d023d27d Mon Sep 17 00:00:00 2001 From: Nathan Coleman Date: Thu, 17 Oct 2024 15:39:44 -0400 Subject: [PATCH 6/9] Prepare branch for future minor release (#4391) --- charts/consul/Chart.yaml | 10 +++++----- charts/consul/values.yaml | 6 +++--- version/version.go | 2 +- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/charts/consul/Chart.yaml b/charts/consul/Chart.yaml index 3dc8d13015..7855b049c1 100644 --- a/charts/consul/Chart.yaml +++ b/charts/consul/Chart.yaml @@ -3,8 +3,8 @@ apiVersion: v2 name: consul -version: 1.6.0-dev -appVersion: 1.20-dev +version: 1.7.0-dev +appVersion: 1.21-dev kubeVersion: ">=1.22.0-0" description: Official HashiCorp Consul Chart home: https://www.consul.io @@ -16,11 +16,11 @@ annotations: artifacthub.io/prerelease: true artifacthub.io/images: | - name: consul - image: docker.mirror.hashicorp.services/hashicorppreview/consul:1.20-dev + image: docker.mirror.hashicorp.services/hashicorppreview/consul:1.21-dev - name: consul-k8s-control-plane - image: docker.mirror.hashicorp.services/hashicorppreview/consul-k8s-control-plane:1.6-dev + image: docker.mirror.hashicorp.services/hashicorppreview/consul-k8s-control-plane:1.7-dev - name: consul-dataplane - image: docker.mirror.hashicorp.services/hashicorppreview/consul-dataplane:1.6-dev + image: docker.mirror.hashicorp.services/hashicorppreview/consul-dataplane:1.7-dev - name: envoy image: envoyproxy/envoy:v1.25.11 artifacthub.io/license: MPL-2.0 diff --git a/charts/consul/values.yaml b/charts/consul/values.yaml index 4a6aa20060..b9f9907e59 100644 --- a/charts/consul/values.yaml +++ b/charts/consul/values.yaml @@ -66,7 +66,7 @@ global: # image: "hashicorp/consul-enterprise:1.10.0-ent" # ``` # @default: hashicorp/consul: - image: docker.mirror.hashicorp.services/hashicorppreview/consul:1.20-dev + image: docker.mirror.hashicorp.services/hashicorppreview/consul:1.21-dev # Array of objects containing image pull secret names that will be applied to each service account. # This can be used to reference image pull secrets if using a custom consul or consul-k8s-control-plane Docker image. @@ -86,7 +86,7 @@ global: # image that is used for functionality such as catalog sync. # This can be overridden per component. # @default: hashicorp/consul-k8s-control-plane: - imageK8S: docker.mirror.hashicorp.services/hashicorppreview/consul-k8s-control-plane:1.6-dev + imageK8S: docker.mirror.hashicorp.services/hashicorppreview/consul-k8s-control-plane:1.7-dev # The image pull policy used globally for images controlled by Consul (consul, consul-dataplane, consul-k8s, consul-telemetry-collector). # One of "IfNotPresent", "Always", "Never", and "". Refer to https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy @@ -793,7 +793,7 @@ global: # The name (and tag) of the consul-dataplane Docker image used for the # connect-injected sidecar proxies and mesh, terminating, and ingress gateways. # @default: hashicorp/consul-dataplane: - imageConsulDataplane: docker.mirror.hashicorp.services/hashicorppreview/consul-dataplane:1.6-dev + imageConsulDataplane: docker.mirror.hashicorp.services/hashicorppreview/consul-dataplane:1.7-dev # Configuration for running this Helm chart on the Red Hat OpenShift platform. # This Helm chart currently supports OpenShift v4.x+. diff --git a/version/version.go b/version/version.go index e5d773c66d..ff7207c456 100644 --- a/version/version.go +++ b/version/version.go @@ -17,7 +17,7 @@ var ( // // Version must conform to the format expected by // github.com/hashicorp/go-version for tests to work. - Version = "1.6.0" + Version = "1.7.0" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release From bf41f34fa57290461d9b8ad7c991a5347391b2bf Mon Sep 17 00:00:00 2001 From: Michael Zalimeni Date: Thu, 17 Oct 2024 18:17:25 -0400 Subject: [PATCH 7/9] [NET-11043] crd: support request normalization and header match options to prevent L7 intentions bypass (#4385) crd: support request normalization and header match options to prevent L7 intentions bypass * crd: support L7 intentions header match contains and ignoreCase * crd: support mesh http.incoming.requestNormalization * crd: remove requirement for mesh http.sanitizeXForwardedClientCert This is a boolean field, and should not be required. Removing the requirement allows for it to be omitted when other fields are specified. --- .changelog/4385.txt | 6 + acceptance/go.mod | 2 +- acceptance/go.sum | 4 +- charts/consul/templates/crd-meshes.yaml | 49 ++++++- .../templates/crd-serviceintentions.yaml | 9 ++ cli/go.mod | 2 +- cli/go.sum | 6 +- control-plane/api/v1alpha1/mesh_types.go | 125 +++++++++++++++++- control-plane/api/v1alpha1/mesh_types_test.go | 112 ++++++++++++++++ .../api/v1alpha1/serviceintentions_types.go | 28 ++-- .../v1alpha1/serviceintentions_types_test.go | 92 ++++++++----- control-plane/catalog/metrics/metrics.go | 3 + .../bases/consul.hashicorp.com_meshes.yaml | 49 ++++++- ...onsul.hashicorp.com_serviceintentions.yaml | 9 ++ control-plane/go.mod | 4 +- control-plane/go.sum | 4 +- control-plane/helper/test/test_util.go | 2 +- .../subcommand/common/metrics_util.go | 3 + 18 files changed, 449 insertions(+), 60 deletions(-) create mode 100644 .changelog/4385.txt diff --git a/.changelog/4385.txt b/.changelog/4385.txt new file mode 100644 index 0000000000..2d45c62919 --- /dev/null +++ b/.changelog/4385.txt @@ -0,0 +1,6 @@ +```release-note:security +crd: Add `http.incoming.requestNormalization` to the Mesh CRD to support configuring service traffic request normalization. +``` +```release-note:security +crd: Add `contains` and `ignoreCase` to the Intentions CRD to support configuring L7 Header intentions resilient to variable casing and multiple header values. +``` diff --git a/acceptance/go.mod b/acceptance/go.mod index 7d78edc277..9b2b9e4cdb 100644 --- a/acceptance/go.mod +++ b/acceptance/go.mod @@ -9,7 +9,7 @@ require ( github.com/google/uuid v1.3.0 github.com/gruntwork-io/terratest v0.46.7 github.com/hashicorp/consul-k8s/control-plane v0.0.0-20240821160356-557f7c37e108 - github.com/hashicorp/consul/api v1.29.4 + github.com/hashicorp/consul/api v1.30.0 github.com/hashicorp/consul/sdk v0.16.1 github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/go-uuid v1.0.3 diff --git a/acceptance/go.sum b/acceptance/go.sum index 18b2c1bcf0..cde3feb099 100644 --- a/acceptance/go.sum +++ b/acceptance/go.sum @@ -186,8 +186,8 @@ github.com/gruntwork-io/terratest v0.46.7 h1:oqGPBBO87SEsvBYaA0R5xOq+Lm2Xc5dmFVf github.com/gruntwork-io/terratest v0.46.7/go.mod h1:6gI5MlLeyF+SLwqocA5GBzcTix+XiuxCy1BPwKuT+WM= github.com/hashicorp/consul-k8s/control-plane v0.0.0-20240821160356-557f7c37e108 h1:5jSMtMGeY//hvkAefiomxP1Jqb5MtnKgsnlsZpEwiJE= github.com/hashicorp/consul-k8s/control-plane v0.0.0-20240821160356-557f7c37e108/go.mod h1:SY22WR9TJmlcK18Et2MAqy+kqAFJzbWFElN89vMTSiM= -github.com/hashicorp/consul/api v1.29.4 h1:P6slzxDLBOxUSj3fWo2o65VuKtbtOXFi7TSSgtXutuE= -github.com/hashicorp/consul/api v1.29.4/go.mod h1:HUlfw+l2Zy68ceJavv2zAyArl2fqhGWnMycyt56sBgg= +github.com/hashicorp/consul/api v1.30.0 h1:ArHVMMILb1nQv8vZSGIwwQd2gtc+oSQZ6CalyiyH2XQ= +github.com/hashicorp/consul/api v1.30.0/go.mod h1:B2uGchvaXVW2JhFoS8nqTxMD5PBykr4ebY4JWHTTeLM= github.com/hashicorp/consul/proto-public v0.6.2 h1:+DA/3g/IiKlJZb88NBn0ZgXrxJp2NlvCZdEyl+qxvL0= github.com/hashicorp/consul/proto-public v0.6.2/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg= github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg= diff --git a/charts/consul/templates/crd-meshes.yaml b/charts/consul/templates/crd-meshes.yaml index f81e61a2c5..9ecc1f3cbc 100644 --- a/charts/consul/templates/crd-meshes.yaml +++ b/charts/consul/templates/crd-meshes.yaml @@ -66,10 +66,55 @@ spec: http: description: HTTP defines the HTTP configuration for the service mesh. properties: + incoming: + description: Incoming configures settings for incoming HTTP traffic + to mesh proxies. + properties: + requestNormalization: + description: |- + RequestNormalizationMeshConfig contains options pertaining to the + normalization of HTTP requests processed by mesh proxies. + properties: + headersWithUnderscoresAction: + description: |- + HeadersWithUnderscoresAction sets the value of the \`headers_with_underscores_action\` option in the Envoy + listener's \`HttpConnectionManager\` under \`common_http_protocol_options\`. The default value of this option is + empty, which is equivalent to \`ALLOW\`. Refer to the Envoy documentation for more information on available + options. + type: string + insecureDisablePathNormalization: + description: |- + InsecureDisablePathNormalization sets the value of the \`normalize_path\` option in the Envoy listener's + `HttpConnectionManager`. The default value is \`false\`. When set to \`true\` in Consul, \`normalize_path\` is + set to \`false\` for the Envoy proxy. This parameter disables the normalization of request URL paths according to + RFC 3986, conversion of \`\\\` to \`/\`, and decoding non-reserved %-encoded characters. When using L7 intentions + with path match rules, we recommend enabling path normalization in order to avoid match rule circumvention with + non-normalized path values. + type: boolean + mergeSlashes: + description: |- + MergeSlashes sets the value of the \`merge_slashes\` option in the Envoy listener's \`HttpConnectionManager\`. + The default value is \`false\`. This option controls the normalization of request URL paths by merging + consecutive \`/\` characters. This normalization is not part of RFC 3986. When using L7 intentions with path + match rules, we recommend enabling this setting to avoid match rule circumvention through non-normalized path + values, unless legitimate service traffic depends on allowing for repeat \`/\` characters, or upstream services + are configured to differentiate between single and multiple slashes. + type: boolean + pathWithEscapedSlashesAction: + description: |- + PathWithEscapedSlashesAction sets the value of the \`path_with_escaped_slashes_action\` option in the Envoy + listener's \`HttpConnectionManager\`. The default value of this option is empty, which is equivalent to + \`IMPLEMENTATION_SPECIFIC_DEFAULT\`. This parameter controls the action taken in response to request URL paths + with escaped slashes in the path. When using L7 intentions with path match rules, we recommend enabling this + setting to avoid match rule circumvention through non-normalized path values, unless legitimate service traffic + depends on allowing for escaped \`/\` or \`\\\` characters, or upstream services are configured to differentiate + between escaped and unescaped slashes. Refer to the Envoy documentation for more information on available + options. + type: string + type: object + type: object sanitizeXForwardedClientCert: type: boolean - required: - - sanitizeXForwardedClientCert type: object peering: description: Peering defines the peering configuration for the service diff --git a/charts/consul/templates/crd-serviceintentions.yaml b/charts/consul/templates/crd-serviceintentions.yaml index 72159ec187..09daae2f6c 100644 --- a/charts/consul/templates/crd-serviceintentions.yaml +++ b/charts/consul/templates/crd-serviceintentions.yaml @@ -168,10 +168,19 @@ spec: If more than one is configured all must match for the overall match to apply. items: properties: + contains: + description: Contains matches if the header + with the given name contains this value. + type: string exact: description: Exact matches if the header with the given name is this value. type: string + ignoreCase: + description: IgnoreCase ignores the case of + the header value when matching with exact, + prefix, suffix, or contains. + type: boolean invert: description: Invert inverts the logic of the match. diff --git a/cli/go.mod b/cli/go.mod index f97cda4aef..fd44909f19 100644 --- a/cli/go.mod +++ b/cli/go.mod @@ -96,7 +96,7 @@ require ( github.com/gorilla/websocket v1.5.0 // indirect github.com/gosuri/uitable v0.0.4 // indirect github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect - github.com/hashicorp/consul/api v1.29.4 // indirect + github.com/hashicorp/consul/api v1.30.0 // indirect github.com/hashicorp/consul/envoyextensions v0.7.3 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect diff --git a/cli/go.sum b/cli/go.sum index 9edb6c903b..1609e4bb0b 100644 --- a/cli/go.sum +++ b/cli/go.sum @@ -288,12 +288,10 @@ github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY= github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/hashicorp/consul/api v1.29.4 h1:P6slzxDLBOxUSj3fWo2o65VuKtbtOXFi7TSSgtXutuE= -github.com/hashicorp/consul/api v1.29.4/go.mod h1:HUlfw+l2Zy68ceJavv2zAyArl2fqhGWnMycyt56sBgg= +github.com/hashicorp/consul/api v1.30.0 h1:ArHVMMILb1nQv8vZSGIwwQd2gtc+oSQZ6CalyiyH2XQ= +github.com/hashicorp/consul/api v1.30.0/go.mod h1:B2uGchvaXVW2JhFoS8nqTxMD5PBykr4ebY4JWHTTeLM= github.com/hashicorp/consul/envoyextensions v0.7.3 h1:5Gn1Hj135NYNRBmB3IdwhkxIHQgEJPjXYPZcA+05rNY= github.com/hashicorp/consul/envoyextensions v0.7.3/go.mod h1:tya/kHsOBGaeAS9inAfUFJIEJ812c125cQD4MrLTt2s= -github.com/hashicorp/consul/proto-public v0.6.2 h1:+DA/3g/IiKlJZb88NBn0ZgXrxJp2NlvCZdEyl+qxvL0= -github.com/hashicorp/consul/proto-public v0.6.2/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg= github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg= github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s= github.com/hashicorp/consul/troubleshoot v0.7.1 h1:IQYxC1qsV3jO74VZDyPi283Ufi84/mXSMm53U8dsN2M= diff --git a/control-plane/api/v1alpha1/mesh_types.go b/control-plane/api/v1alpha1/mesh_types.go index 4d8a14358b..f70167a912 100644 --- a/control-plane/api/v1alpha1/mesh_types.go +++ b/control-plane/api/v1alpha1/mesh_types.go @@ -87,7 +87,18 @@ type MeshTLSConfig struct { } type MeshHTTPConfig struct { - SanitizeXForwardedClientCert bool `json:"sanitizeXForwardedClientCert"` + SanitizeXForwardedClientCert bool `json:"sanitizeXForwardedClientCert,omitempty"` + // Incoming configures settings for incoming HTTP traffic to mesh proxies. + Incoming *MeshDirectionalHTTPConfig `json:"incoming,omitempty"` + // There is not currently an outgoing MeshDirectionalHTTPConfig, as + // the only required config for either direction at present is inbound + // request normalization. +} + +// MeshDirectionalHTTPConfig holds mesh configuration specific to HTTP +// requests for a given traffic direction. +type MeshDirectionalHTTPConfig struct { + RequestNormalization *RequestNormalizationMeshConfig `json:"requestNormalization,omitempty"` } type PeeringMeshConfig struct { @@ -117,6 +128,61 @@ type MeshDirectionalTLSConfig struct { CipherSuites []string `json:"cipherSuites,omitempty"` } +// RequestNormalizationMeshConfig contains options pertaining to the +// normalization of HTTP requests processed by mesh proxies. +type RequestNormalizationMeshConfig struct { + // InsecureDisablePathNormalization sets the value of the \`normalize_path\` option in the Envoy listener's + // `HttpConnectionManager`. The default value is \`false\`. When set to \`true\` in Consul, \`normalize_path\` is + // set to \`false\` for the Envoy proxy. This parameter disables the normalization of request URL paths according to + // RFC 3986, conversion of \`\\\` to \`/\`, and decoding non-reserved %-encoded characters. When using L7 intentions + // with path match rules, we recommend enabling path normalization in order to avoid match rule circumvention with + // non-normalized path values. + InsecureDisablePathNormalization bool `json:"insecureDisablePathNormalization,omitempty"` + // MergeSlashes sets the value of the \`merge_slashes\` option in the Envoy listener's \`HttpConnectionManager\`. + // The default value is \`false\`. This option controls the normalization of request URL paths by merging + // consecutive \`/\` characters. This normalization is not part of RFC 3986. When using L7 intentions with path + // match rules, we recommend enabling this setting to avoid match rule circumvention through non-normalized path + // values, unless legitimate service traffic depends on allowing for repeat \`/\` characters, or upstream services + // are configured to differentiate between single and multiple slashes. + MergeSlashes bool `json:"mergeSlashes,omitempty"` + // PathWithEscapedSlashesAction sets the value of the \`path_with_escaped_slashes_action\` option in the Envoy + // listener's \`HttpConnectionManager\`. The default value of this option is empty, which is equivalent to + // \`IMPLEMENTATION_SPECIFIC_DEFAULT\`. This parameter controls the action taken in response to request URL paths + // with escaped slashes in the path. When using L7 intentions with path match rules, we recommend enabling this + // setting to avoid match rule circumvention through non-normalized path values, unless legitimate service traffic + // depends on allowing for escaped \`/\` or \`\\\` characters, or upstream services are configured to differentiate + // between escaped and unescaped slashes. Refer to the Envoy documentation for more information on available + // options. + PathWithEscapedSlashesAction string `json:"pathWithEscapedSlashesAction,omitempty"` + // HeadersWithUnderscoresAction sets the value of the \`headers_with_underscores_action\` option in the Envoy + // listener's \`HttpConnectionManager\` under \`common_http_protocol_options\`. The default value of this option is + // empty, which is equivalent to \`ALLOW\`. Refer to the Envoy documentation for more information on available + // options. + HeadersWithUnderscoresAction string `json:"headersWithUnderscoresAction,omitempty"` +} + +// PathWithEscapedSlashesAction is an enum that defines the action to take when +// a request path contains escaped slashes. It mirrors exactly the set of options +// in Envoy's UriPathNormalizationOptions.PathWithEscapedSlashesAction enum. +// See github.com/envoyproxy/go-control-plane envoy_http_v3.HttpConnectionManager_PathWithEscapedSlashesAction. +const ( + PathWithEscapedSlashesActionDefault = "IMPLEMENTATION_SPECIFIC_DEFAULT" + PathWithEscapedSlashesActionKeep = "KEEP_UNCHANGED" + PathWithEscapedSlashesActionReject = "REJECT_REQUEST" + PathWithEscapedSlashesActionUnescapeAndRedirect = "UNESCAPE_AND_REDIRECT" + PathWithEscapedSlashesActionUnescapeAndForward = "UNESCAPE_AND_FORWARD" +) + +// HeadersWithUnderscoresAction is an enum that defines the action to take when +// a request contains headers with underscores. It mirrors exactly the set of +// options in Envoy's HttpProtocolOptions.HeadersWithUnderscoresAction enum. +// See github.com/envoyproxy/go-control-plane envoy_core_v3.HttpProtocolOptions_HeadersWithUnderscoresAction. +const ( + HeadersWithUnderscoresActionAllow = "ALLOW" + HeadersWithUnderscoresActionRejectRequest = "REJECT_REQUEST" + HeadersWithUnderscoresActionDropHeader = "DROP_HEADER" +) + func (in *TransparentProxyMeshConfig) toConsul() capi.TransparentProxyMeshConfig { return capi.TransparentProxyMeshConfig{MeshDestinationsOnly: in.MeshDestinationsOnly} } @@ -227,6 +293,11 @@ func (in *Mesh) Validate(consulMeta common.ConsulMeta) error { errs = append(errs, in.Spec.TLS.validate(path.Child("tls"))...) errs = append(errs, in.Spec.Peering.validate(path.Child("peering"), consulMeta.PartitionsEnabled, consulMeta.Partition)...) + if in.Spec.HTTP != nil && + in.Spec.HTTP.Incoming != nil && + in.Spec.HTTP.Incoming.RequestNormalization != nil { + errs = append(errs, in.Spec.HTTP.Incoming.RequestNormalization.validate(path.Child("http", "incoming", "requestNormalization"))...) + } if len(errs) > 0 { return apierrors.NewInvalid( @@ -252,6 +323,28 @@ func (in *MeshHTTPConfig) toConsul() *capi.MeshHTTPConfig { } return &capi.MeshHTTPConfig{ SanitizeXForwardedClientCert: in.SanitizeXForwardedClientCert, + Incoming: in.Incoming.toConsul(), + } +} + +func (in *MeshDirectionalHTTPConfig) toConsul() *capi.MeshDirectionalHTTPConfig { + if in == nil { + return nil + } + return &capi.MeshDirectionalHTTPConfig{ + RequestNormalization: in.RequestNormalization.toConsul(), + } +} + +func (in *RequestNormalizationMeshConfig) toConsul() *capi.RequestNormalizationMeshConfig { + if in == nil { + return nil + } + return &capi.RequestNormalizationMeshConfig{ + InsecureDisablePathNormalization: in.InsecureDisablePathNormalization, + MergeSlashes: in.MergeSlashes, + PathWithEscapedSlashesAction: in.PathWithEscapedSlashesAction, + HeadersWithUnderscoresAction: in.HeadersWithUnderscoresAction, } } @@ -316,6 +409,36 @@ func (in *PeeringMeshConfig) validate(path *field.Path, partitionsEnabled bool, return errs } +func (in *RequestNormalizationMeshConfig) validate(path *field.Path) field.ErrorList { + if in == nil { + return nil + } + + var errs field.ErrorList + pathWithEscapedSlashesActions := []string{ + PathWithEscapedSlashesActionDefault, + PathWithEscapedSlashesActionKeep, + PathWithEscapedSlashesActionReject, + PathWithEscapedSlashesActionUnescapeAndRedirect, + PathWithEscapedSlashesActionUnescapeAndForward, + "", + } + headersWithUnderscoresActions := []string{ + HeadersWithUnderscoresActionAllow, + HeadersWithUnderscoresActionRejectRequest, + HeadersWithUnderscoresActionDropHeader, + "", + } + + if !sliceContains(pathWithEscapedSlashesActions, in.PathWithEscapedSlashesAction) { + errs = append(errs, field.Invalid(path.Child("pathWithEscapedSlashesAction"), in.PathWithEscapedSlashesAction, notInSliceMessage(pathWithEscapedSlashesActions))) + } + if !sliceContains(headersWithUnderscoresActions, in.HeadersWithUnderscoresAction) { + errs = append(errs, field.Invalid(path.Child("headersWithUnderscoresAction"), in.HeadersWithUnderscoresAction, notInSliceMessage(headersWithUnderscoresActions))) + } + return errs +} + // DefaultNamespaceFields has no behaviour here as meshes have no namespace specific fields. func (in *Mesh) DefaultNamespaceFields(_ common.ConsulMeta) { } diff --git a/control-plane/api/v1alpha1/mesh_types_test.go b/control-plane/api/v1alpha1/mesh_types_test.go index 7a925fb0ce..5911e25d4d 100644 --- a/control-plane/api/v1alpha1/mesh_types_test.go +++ b/control-plane/api/v1alpha1/mesh_types_test.go @@ -64,6 +64,14 @@ func TestMesh_MatchesConsul(t *testing.T) { }, HTTP: &MeshHTTPConfig{ SanitizeXForwardedClientCert: true, + Incoming: &MeshDirectionalHTTPConfig{ + RequestNormalization: &RequestNormalizationMeshConfig{ + InsecureDisablePathNormalization: true, // note: this is the opposite of the recommended default + MergeSlashes: true, + PathWithEscapedSlashesAction: "REJECT_REQUEST", + HeadersWithUnderscoresAction: "DROP_HEADER", + }, + }, }, Peering: &PeeringMeshConfig{ PeerThroughMeshGateways: true, @@ -90,6 +98,14 @@ func TestMesh_MatchesConsul(t *testing.T) { }, HTTP: &capi.MeshHTTPConfig{ SanitizeXForwardedClientCert: true, + Incoming: &capi.MeshDirectionalHTTPConfig{ + RequestNormalization: &capi.RequestNormalizationMeshConfig{ + InsecureDisablePathNormalization: true, // note: this is the opposite of the recommended default + MergeSlashes: true, + PathWithEscapedSlashesAction: "REJECT_REQUEST", + HeadersWithUnderscoresAction: "DROP_HEADER", + }, + }, }, Peering: &capi.PeeringMeshConfig{ PeerThroughMeshGateways: true, @@ -168,6 +184,14 @@ func TestMesh_ToConsul(t *testing.T) { }, HTTP: &MeshHTTPConfig{ SanitizeXForwardedClientCert: true, + Incoming: &MeshDirectionalHTTPConfig{ + RequestNormalization: &RequestNormalizationMeshConfig{ + InsecureDisablePathNormalization: true, // note: this is the opposite of the recommended default + MergeSlashes: true, + PathWithEscapedSlashesAction: "REJECT_REQUEST", + HeadersWithUnderscoresAction: "DROP_HEADER", + }, + }, }, Peering: &PeeringMeshConfig{ PeerThroughMeshGateways: true, @@ -194,6 +218,14 @@ func TestMesh_ToConsul(t *testing.T) { }, HTTP: &capi.MeshHTTPConfig{ SanitizeXForwardedClientCert: true, + Incoming: &capi.MeshDirectionalHTTPConfig{ + RequestNormalization: &capi.RequestNormalizationMeshConfig{ + InsecureDisablePathNormalization: true, // note: this is the opposite of the recommended default + MergeSlashes: true, + PathWithEscapedSlashesAction: "REJECT_REQUEST", + HeadersWithUnderscoresAction: "DROP_HEADER", + }, + }, }, Peering: &capi.PeeringMeshConfig{ PeerThroughMeshGateways: true, @@ -367,6 +399,76 @@ func TestMesh_Validate(t *testing.T) { }, }, }, + "http.incoming.requestNormalization.pathWithEscapedSlashesAction valid": { + input: &Mesh{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + }, + Spec: MeshSpec{ + HTTP: &MeshHTTPConfig{ + Incoming: &MeshDirectionalHTTPConfig{ + RequestNormalization: &RequestNormalizationMeshConfig{ + PathWithEscapedSlashesAction: "UNESCAPE_AND_FORWARD", + }, + }, + }, + }, + }, + }, + "http.incoming.requestNormalization.pathWithEscapedSlashesAction invalid": { + input: &Mesh{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + }, + Spec: MeshSpec{ + HTTP: &MeshHTTPConfig{ + Incoming: &MeshDirectionalHTTPConfig{ + RequestNormalization: &RequestNormalizationMeshConfig{ + PathWithEscapedSlashesAction: "foo", + }, + }, + }, + }, + }, + expectedErrMsgs: []string{ + `spec.http.incoming.requestNormalization.pathWithEscapedSlashesAction: Invalid value: "foo": must be one of "IMPLEMENTATION_SPECIFIC_DEFAULT", "KEEP_UNCHANGED", "REJECT_REQUEST", "UNESCAPE_AND_REDIRECT", "UNESCAPE_AND_FORWARD", ""`, + }, + }, + "http.incoming.requestNormalization.headerWithUnderscoresAction valid": { + input: &Mesh{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + }, + Spec: MeshSpec{ + HTTP: &MeshHTTPConfig{ + Incoming: &MeshDirectionalHTTPConfig{ + RequestNormalization: &RequestNormalizationMeshConfig{ + HeadersWithUnderscoresAction: "REJECT_REQUEST", + }, + }, + }, + }, + }, + }, + "http.incoming.requestNormalization.headersWithUnderscoresAction invalid": { + input: &Mesh{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + }, + Spec: MeshSpec{ + HTTP: &MeshHTTPConfig{ + Incoming: &MeshDirectionalHTTPConfig{ + RequestNormalization: &RequestNormalizationMeshConfig{ + HeadersWithUnderscoresAction: "bar", + }, + }, + }, + }, + }, + expectedErrMsgs: []string{ + `spec.http.incoming.requestNormalization.headersWithUnderscoresAction: Invalid value: "bar": must be one of "ALLOW", "REJECT_REQUEST", "DROP_HEADER", ""`, + }, + }, "multiple errors": { input: &Mesh{ ObjectMeta: metav1.ObjectMeta{ @@ -386,6 +488,14 @@ func TestMesh_Validate(t *testing.T) { Peering: &PeeringMeshConfig{ PeerThroughMeshGateways: true, }, + HTTP: &MeshHTTPConfig{ + Incoming: &MeshDirectionalHTTPConfig{ + RequestNormalization: &RequestNormalizationMeshConfig{ + PathWithEscapedSlashesAction: "foo", + HeadersWithUnderscoresAction: "bar", + }, + }, + }, }, }, consulMeta: common.ConsulMeta{ @@ -398,6 +508,8 @@ func TestMesh_Validate(t *testing.T) { `spec.tls.outgoing.tlsMinVersion: Invalid value: "foo": must be one of "TLS_AUTO", "TLSv1_0", "TLSv1_1", "TLSv1_2", "TLSv1_3", ""`, `spec.tls.outgoing.tlsMaxVersion: Invalid value: "bar": must be one of "TLS_AUTO", "TLSv1_0", "TLSv1_1", "TLSv1_2", "TLSv1_3", ""`, `spec.peering.peerThroughMeshGateways: Forbidden: "peerThroughMeshGateways" is only valid in the "default" partition`, + `spec.http.incoming.requestNormalization.pathWithEscapedSlashesAction: Invalid value: "foo": must be one of "IMPLEMENTATION_SPECIFIC_DEFAULT", "KEEP_UNCHANGED", "REJECT_REQUEST", "UNESCAPE_AND_REDIRECT", "UNESCAPE_AND_FORWARD", ""`, + `spec.http.incoming.requestNormalization.headersWithUnderscoresAction: Invalid value: "bar": must be one of "ALLOW", "REJECT_REQUEST", "DROP_HEADER", ""`, }, }, } diff --git a/control-plane/api/v1alpha1/serviceintentions_types.go b/control-plane/api/v1alpha1/serviceintentions_types.go index d393f72a2d..17d0a9cf2a 100644 --- a/control-plane/api/v1alpha1/serviceintentions_types.go +++ b/control-plane/api/v1alpha1/serviceintentions_types.go @@ -140,8 +140,12 @@ type IntentionHTTPHeaderPermission struct { Prefix string `json:"prefix,omitempty"` // Suffix matches if the header with the given name has this suffix. Suffix string `json:"suffix,omitempty"` + // Contains matches if the header with the given name contains this value. + Contains string `json:"contains,omitempty"` // Regex matches if the header with the given name matches this pattern. Regex string `json:"regex,omitempty"` + // IgnoreCase ignores the case of the header value when matching with exact, prefix, suffix, or contains. + IgnoreCase bool `json:"ignoreCase,omitempty"` // Invert inverts the logic of the match. Invert bool `json:"invert,omitempty"` } @@ -448,13 +452,15 @@ func (in IntentionHTTPHeaderPermissions) toConsul() []capi.IntentionHTTPHeaderPe var headerPermissions []capi.IntentionHTTPHeaderPermission for _, permission := range in { headerPermissions = append(headerPermissions, capi.IntentionHTTPHeaderPermission{ - Name: permission.Name, - Present: permission.Present, - Exact: permission.Exact, - Prefix: permission.Prefix, - Suffix: permission.Suffix, - Regex: permission.Regex, - Invert: permission.Invert, + Name: permission.Name, + Present: permission.Present, + Exact: permission.Exact, + Prefix: permission.Prefix, + Suffix: permission.Suffix, + Contains: permission.Contains, + Regex: permission.Regex, + Invert: permission.Invert, + IgnoreCase: permission.IgnoreCase, }) } @@ -568,10 +574,14 @@ func (in IntentionHTTPHeaderPermissions) validate(path *field.Path) field.ErrorL if permission.Present { hdrParts++ } - hdrParts += numNotEmpty(permission.Exact, permission.Regex, permission.Prefix, permission.Suffix) + hdrParts += numNotEmpty(permission.Exact, permission.Regex, permission.Prefix, permission.Suffix, permission.Contains) if hdrParts > 1 { asJson, _ := json.Marshal(in[i]) - errs = append(errs, field.Invalid(path.Index(i), string(asJson), `at most only one of exact, prefix, suffix, regex, or present may be configured.`)) + errs = append(errs, field.Invalid(path.Index(i), string(asJson), `at most only one of exact, prefix, suffix, contains, regex, or present may be configured.`)) + } + if permission.IgnoreCase && (permission.Present || permission.Regex != "") { + asJson, _ := json.Marshal(in[i]) + errs = append(errs, field.Invalid(path.Index(i), string(asJson), `should set one of exact, prefix, suffix, or contains when using ignoreCase.`)) } } return errs diff --git a/control-plane/api/v1alpha1/serviceintentions_types_test.go b/control-plane/api/v1alpha1/serviceintentions_types_test.go index 8d0a6d907a..fb2ae743b5 100644 --- a/control-plane/api/v1alpha1/serviceintentions_types_test.go +++ b/control-plane/api/v1alpha1/serviceintentions_types_test.go @@ -151,13 +151,15 @@ func TestServiceIntentions_MatchesConsul(t *testing.T) { PathRegex: "/baz", Header: IntentionHTTPHeaderPermissions{ { - Name: "header", - Present: true, - Exact: "exact", - Prefix: "prefix", - Suffix: "suffix", - Regex: "regex", - Invert: true, + Name: "header", + Present: true, + Exact: "exact", + Prefix: "prefix", + Suffix: "suffix", + Contains: "contains", + Regex: "regex", + Invert: true, + IgnoreCase: true, }, }, Methods: []string{ @@ -232,13 +234,15 @@ func TestServiceIntentions_MatchesConsul(t *testing.T) { PathRegex: "/baz", Header: []capi.IntentionHTTPHeaderPermission{ { - Name: "header", - Present: true, - Exact: "exact", - Prefix: "prefix", - Suffix: "suffix", - Regex: "regex", - Invert: true, + Name: "header", + Present: true, + Exact: "exact", + Prefix: "prefix", + Suffix: "suffix", + Contains: "contains", + Regex: "regex", + Invert: true, + IgnoreCase: true, }, }, Methods: []string{ @@ -414,13 +418,15 @@ func TestServiceIntentions_ToConsul(t *testing.T) { PathRegex: "/baz", Header: IntentionHTTPHeaderPermissions{ { - Name: "header", - Present: true, - Exact: "exact", - Prefix: "prefix", - Suffix: "suffix", - Regex: "regex", - Invert: true, + Name: "header", + Present: true, + Exact: "exact", + Prefix: "prefix", + Suffix: "suffix", + Contains: "contains", + Regex: "regex", + Invert: true, + IgnoreCase: true, }, }, Methods: []string{ @@ -500,13 +506,15 @@ func TestServiceIntentions_ToConsul(t *testing.T) { PathRegex: "/baz", Header: []capi.IntentionHTTPHeaderPermission{ { - Name: "header", - Present: true, - Exact: "exact", - Prefix: "prefix", - Suffix: "suffix", - Regex: "regex", - Invert: true, + Name: "header", + Present: true, + Exact: "exact", + Prefix: "prefix", + Suffix: "suffix", + Contains: "contains", + Regex: "regex", + Invert: true, + IgnoreCase: true, }, }, Methods: []string{ @@ -1258,11 +1266,26 @@ func TestServiceIntentions_Validate(t *testing.T) { Suffix: "bar", Regex: "foo", }, + { + Name: "suffix-contains", + Suffix: "bar", + Contains: "foo", + }, { Name: "regex-present", Present: true, Regex: "foobar", }, + { + Name: "present-ignoreCase", + Present: true, + IgnoreCase: true, + }, + { + Name: "regex-ignoreCase", + Regex: "foobar", + IgnoreCase: true, + }, }, }, }, @@ -1273,11 +1296,14 @@ func TestServiceIntentions_Validate(t *testing.T) { }, namespacesEnabled: true, expectedErrMsgs: []string{ - `spec.sources[0].permissions[0].header[0]: Invalid value: "{\"name\":\"exact-present\",\"present\":true,\"exact\":\"foobar\"}": at most only one of exact, prefix, suffix, regex, or present may be configured.`, - `spec.sources[0].permissions[0].header[1]: Invalid value: "{\"name\":\"prefix-exact\",\"exact\":\"foobar\",\"prefix\":\"barfood\"}": at most only one of exact, prefix, suffix, regex, or present may be configured.`, - `spec.sources[0].permissions[0].header[2]: Invalid value: "{\"name\":\"suffix-prefix\",\"prefix\":\"foo\",\"suffix\":\"bar\"}": at most only one of exact, prefix, suffix, regex, or present may be configured.`, - `spec.sources[0].permissions[0].header[3]: Invalid value: "{\"name\":\"suffix-regex\",\"suffix\":\"bar\",\"regex\":\"foo\"}": at most only one of exact, prefix, suffix, regex, or present may be configured.`, - `spec.sources[0].permissions[0].header[4]: Invalid value: "{\"name\":\"regex-present\",\"present\":true,\"regex\":\"foobar\"}": at most only one of exact, prefix, suffix, regex, or present may be configured.`, + `spec.sources[0].permissions[0].header[0]: Invalid value: "{\"name\":\"exact-present\",\"present\":true,\"exact\":\"foobar\"}": at most only one of exact, prefix, suffix, contains, regex, or present may be configured.`, + `spec.sources[0].permissions[0].header[1]: Invalid value: "{\"name\":\"prefix-exact\",\"exact\":\"foobar\",\"prefix\":\"barfood\"}": at most only one of exact, prefix, suffix, contains, regex, or present may be configured.`, + `spec.sources[0].permissions[0].header[2]: Invalid value: "{\"name\":\"suffix-prefix\",\"prefix\":\"foo\",\"suffix\":\"bar\"}": at most only one of exact, prefix, suffix, contains, regex, or present may be configured.`, + `spec.sources[0].permissions[0].header[3]: Invalid value: "{\"name\":\"suffix-regex\",\"suffix\":\"bar\",\"regex\":\"foo\"}": at most only one of exact, prefix, suffix, contains, regex, or present may be configured.`, + `spec.sources[0].permissions[0].header[4]: Invalid value: "{\"name\":\"suffix-contains\",\"suffix\":\"bar\",\"contains\":\"foo\"}": at most only one of exact, prefix, suffix, contains, regex, or present may be configured.`, + `spec.sources[0].permissions[0].header[5]: Invalid value: "{\"name\":\"regex-present\",\"present\":true,\"regex\":\"foobar\"}": at most only one of exact, prefix, suffix, contains, regex, or present may be configured.`, + `spec.sources[0].permissions[0].header[6]: Invalid value: "{\"name\":\"present-ignoreCase\",\"present\":true,\"ignoreCase\":true}": should set one of exact, prefix, suffix, or contains when using ignoreCase.`, + `spec.sources[0].permissions[0].header[7]: Invalid value: "{\"name\":\"regex-ignoreCase\",\"regex\":\"foobar\",\"ignoreCase\":true}": should set one of exact, prefix, suffix, or contains when using ignoreCase.`, }, }, "invalid permissions.action": { diff --git a/control-plane/catalog/metrics/metrics.go b/control-plane/catalog/metrics/metrics.go index 17fabf0bdc..0979cb6f38 100644 --- a/control-plane/catalog/metrics/metrics.go +++ b/control-plane/catalog/metrics/metrics.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package metrics import ( diff --git a/control-plane/config/crd/bases/consul.hashicorp.com_meshes.yaml b/control-plane/config/crd/bases/consul.hashicorp.com_meshes.yaml index c5c15b3c5d..51ac810778 100644 --- a/control-plane/config/crd/bases/consul.hashicorp.com_meshes.yaml +++ b/control-plane/config/crd/bases/consul.hashicorp.com_meshes.yaml @@ -62,10 +62,55 @@ spec: http: description: HTTP defines the HTTP configuration for the service mesh. properties: + incoming: + description: Incoming configures settings for incoming HTTP traffic + to mesh proxies. + properties: + requestNormalization: + description: |- + RequestNormalizationMeshConfig contains options pertaining to the + normalization of HTTP requests processed by mesh proxies. + properties: + headersWithUnderscoresAction: + description: |- + HeadersWithUnderscoresAction sets the value of the \`headers_with_underscores_action\` option in the Envoy + listener's \`HttpConnectionManager\` under \`common_http_protocol_options\`. The default value of this option is + empty, which is equivalent to \`ALLOW\`. Refer to the Envoy documentation for more information on available + options. + type: string + insecureDisablePathNormalization: + description: |- + InsecureDisablePathNormalization sets the value of the \`normalize_path\` option in the Envoy listener's + `HttpConnectionManager`. The default value is \`false\`. When set to \`true\` in Consul, \`normalize_path\` is + set to \`false\` for the Envoy proxy. This parameter disables the normalization of request URL paths according to + RFC 3986, conversion of \`\\\` to \`/\`, and decoding non-reserved %-encoded characters. When using L7 intentions + with path match rules, we recommend enabling path normalization in order to avoid match rule circumvention with + non-normalized path values. + type: boolean + mergeSlashes: + description: |- + MergeSlashes sets the value of the \`merge_slashes\` option in the Envoy listener's \`HttpConnectionManager\`. + The default value is \`false\`. This option controls the normalization of request URL paths by merging + consecutive \`/\` characters. This normalization is not part of RFC 3986. When using L7 intentions with path + match rules, we recommend enabling this setting to avoid match rule circumvention through non-normalized path + values, unless legitimate service traffic depends on allowing for repeat \`/\` characters, or upstream services + are configured to differentiate between single and multiple slashes. + type: boolean + pathWithEscapedSlashesAction: + description: |- + PathWithEscapedSlashesAction sets the value of the \`path_with_escaped_slashes_action\` option in the Envoy + listener's \`HttpConnectionManager\`. The default value of this option is empty, which is equivalent to + \`IMPLEMENTATION_SPECIFIC_DEFAULT\`. This parameter controls the action taken in response to request URL paths + with escaped slashes in the path. When using L7 intentions with path match rules, we recommend enabling this + setting to avoid match rule circumvention through non-normalized path values, unless legitimate service traffic + depends on allowing for escaped \`/\` or \`\\\` characters, or upstream services are configured to differentiate + between escaped and unescaped slashes. Refer to the Envoy documentation for more information on available + options. + type: string + type: object + type: object sanitizeXForwardedClientCert: type: boolean - required: - - sanitizeXForwardedClientCert type: object peering: description: Peering defines the peering configuration for the service diff --git a/control-plane/config/crd/bases/consul.hashicorp.com_serviceintentions.yaml b/control-plane/config/crd/bases/consul.hashicorp.com_serviceintentions.yaml index 957295b18e..04b5c08522 100644 --- a/control-plane/config/crd/bases/consul.hashicorp.com_serviceintentions.yaml +++ b/control-plane/config/crd/bases/consul.hashicorp.com_serviceintentions.yaml @@ -164,10 +164,19 @@ spec: If more than one is configured all must match for the overall match to apply. items: properties: + contains: + description: Contains matches if the header + with the given name contains this value. + type: string exact: description: Exact matches if the header with the given name is this value. type: string + ignoreCase: + description: IgnoreCase ignores the case of + the header value when matching with exact, + prefix, suffix, or contains. + type: boolean invert: description: Invert inverts the logic of the match. diff --git a/control-plane/go.mod b/control-plane/go.mod index 3e23f2a593..f1cb7dd378 100644 --- a/control-plane/go.mod +++ b/control-plane/go.mod @@ -16,7 +16,7 @@ require ( github.com/hashicorp/consul-k8s/control-plane/cni v0.0.0-20240226161840-f3842c41cb2b github.com/hashicorp/consul-k8s/version v0.0.0 github.com/hashicorp/consul-server-connection-manager v0.1.6 - github.com/hashicorp/consul/api v1.29.4 + github.com/hashicorp/consul/api v1.30.0 github.com/hashicorp/consul/sdk v0.16.1 github.com/hashicorp/go-bexpr v0.1.11 github.com/hashicorp/go-discover v0.0.0-20230519164032-214571b6a530 @@ -37,6 +37,7 @@ require ( github.com/stretchr/testify v1.8.4 go.uber.org/zap v1.25.0 golang.org/x/exp v0.0.0-20240823005443-9b4947da3948 + golang.org/x/sync v0.8.0 golang.org/x/text v0.17.0 golang.org/x/time v0.3.0 gomodules.xyz/jsonpatch/v2 v2.4.0 @@ -146,7 +147,6 @@ require ( golang.org/x/mod v0.20.0 // indirect golang.org/x/net v0.28.0 // indirect golang.org/x/oauth2 v0.10.0 // indirect - golang.org/x/sync v0.8.0 // indirect golang.org/x/sys v0.24.0 // indirect golang.org/x/term v0.23.0 // indirect golang.org/x/tools v0.24.0 // indirect diff --git a/control-plane/go.sum b/control-plane/go.sum index 79bf109c1d..a733ec21ba 100644 --- a/control-plane/go.sum +++ b/control-plane/go.sum @@ -214,8 +214,8 @@ github.com/hashicorp/consul-k8s/control-plane/cni v0.0.0-20240226161840-f3842c41 github.com/hashicorp/consul-k8s/control-plane/cni v0.0.0-20240226161840-f3842c41cb2b/go.mod h1:9NKJHOcgmz/6P2y6MegNIOXhIKE/0ils/mHWd5sZgoU= github.com/hashicorp/consul-server-connection-manager v0.1.6 h1:ktj8Fi+dRXn9hhM+FXsfEJayhzzgTqfH08Ne5M6Fmug= github.com/hashicorp/consul-server-connection-manager v0.1.6/go.mod h1:HngMIv57MT+pqCVeRQMa1eTB5dqnyMm8uxjyv+Hn8cs= -github.com/hashicorp/consul/api v1.29.4 h1:P6slzxDLBOxUSj3fWo2o65VuKtbtOXFi7TSSgtXutuE= -github.com/hashicorp/consul/api v1.29.4/go.mod h1:HUlfw+l2Zy68ceJavv2zAyArl2fqhGWnMycyt56sBgg= +github.com/hashicorp/consul/api v1.30.0 h1:ArHVMMILb1nQv8vZSGIwwQd2gtc+oSQZ6CalyiyH2XQ= +github.com/hashicorp/consul/api v1.30.0/go.mod h1:B2uGchvaXVW2JhFoS8nqTxMD5PBykr4ebY4JWHTTeLM= github.com/hashicorp/consul/proto-public v0.6.2 h1:+DA/3g/IiKlJZb88NBn0ZgXrxJp2NlvCZdEyl+qxvL0= github.com/hashicorp/consul/proto-public v0.6.2/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg= github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg= diff --git a/control-plane/helper/test/test_util.go b/control-plane/helper/test/test_util.go index 542700c34f..9cd7944b11 100644 --- a/control-plane/helper/test/test_util.go +++ b/control-plane/helper/test/test_util.go @@ -282,7 +282,7 @@ func SetupK8sAuthMethodWithNamespaces(t *testing.T, consulClient *api.Client, se Description: "Kubernetes binding rule", AuthMethod: AuthMethod, BindType: api.BindingRuleBindTypeTemplatedPolicy, - BindName: api.ACLTemplatedPolicyWorkloadIdentityName, + BindName: "", //api.ACLTemplatedPolicyWorkloadIdentityName, TODO: remove w/ v2 code BindVars: &api.ACLTemplatedPolicyVariables{ Name: "${serviceaccount.name}", }, diff --git a/control-plane/subcommand/common/metrics_util.go b/control-plane/subcommand/common/metrics_util.go index d3866fdeef..343b4c9e51 100644 --- a/control-plane/subcommand/common/metrics_util.go +++ b/control-plane/subcommand/common/metrics_util.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package common import "strconv" From f31a6ba2c0ba1bdbefcbaaa06dd897d4209aa323 Mon Sep 17 00:00:00 2001 From: John Maguire Date: Mon, 21 Oct 2024 18:05:04 -0400 Subject: [PATCH 8/9] [NET-11297] Purge on disable (#4378) * purge services on disable of sync catalog * gofumpt * update tests for sync catalog * Added changelog * create separate jobs to handle deregistering services when disabling sync catalog * Check error * fix tests * rename field in values file, remove references to pod security policy * remove psp * added bats testing --- .changelog/4378.txt | 3 + ...sync-catalog-cleanup-on-uninstall-job.yaml | 174 +++++++++++++ .../sync-catalog-cleanup-on-upgrade-job.yaml | 175 +++++++++++++ .../sync-catalog-cleanup-serviceaccount.yaml | 23 ++ .../templates/sync-catalog-deployment.yaml | 3 +- charts/consul/values.yaml | 4 + .../subcommand/sync-catalog/command.go | 83 ++---- .../sync-catalog/command_ent_test.go | 150 ++++++++++- .../subcommand/sync-catalog/command_test.go | 244 ++++++++---------- 9 files changed, 650 insertions(+), 209 deletions(-) create mode 100644 .changelog/4378.txt create mode 100644 charts/consul/templates/sync-catalog-cleanup-on-uninstall-job.yaml create mode 100644 charts/consul/templates/sync-catalog-cleanup-on-upgrade-job.yaml create mode 100644 charts/consul/templates/sync-catalog-cleanup-serviceaccount.yaml diff --git a/.changelog/4378.txt b/.changelog/4378.txt new file mode 100644 index 0000000000..30a4287816 --- /dev/null +++ b/.changelog/4378.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +catalog-sync: Added field to helm chart to purge all services registered with catalog-sync from consul on disabling of catalog-sync. +``` diff --git a/charts/consul/templates/sync-catalog-cleanup-on-uninstall-job.yaml b/charts/consul/templates/sync-catalog-cleanup-on-uninstall-job.yaml new file mode 100644 index 0000000000..955042dd0a --- /dev/null +++ b/charts/consul/templates/sync-catalog-cleanup-on-uninstall-job.yaml @@ -0,0 +1,174 @@ +{{- if .Values.syncCatalog.cleanupNodeOnRemoval }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "consul.fullname" . }}-sync-catalog-cleanup-on-uninstall + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: sync-catalog-cleanup + {{- if .Values.global.extraLabels }} + {{- toYaml .Values.global.extraLabels | nindent 4 }} + {{- end }} + annotations: + "helm.sh/hook": pre-delete + "helm.sh/hook-weight": "0" + "helm.sh/hook-delete-policy": hook-succeeded,hook-failed +spec: + template: + metadata: + name: {{ template "consul.fullname" . }}-sync-catalog-cleanup-on-uninstall + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + release: {{ .Release.Name }} + component: sync-catalog-cleanup + {{- if .Values.syncCatalog.extraLabels }} + {{- toYaml .Values.syncCatalog.extraLabels | nindent 8 }} + {{- end }} + {{- if .Values.global.extraLabels }} + {{- toYaml .Values.global.extraLabels | nindent 8 }} + {{- end }} + annotations: + "consul.hashicorp.com/connect-inject": "false" + "consul.hashicorp.com/mesh-inject": "false" + {{- if .Values.syncCatalog.annotations }} + {{- tpl .Values.syncCatalog.annotations . | nindent 8 }} + {{- end }} + {{- if (and .Values.global.secretsBackend.vault.enabled .Values.global.tls.enabled) }} + "vault.hashicorp.com/agent-init-first": "true" + "vault.hashicorp.com/agent-inject": "true" + "vault.hashicorp.com/role": {{ .Values.global.secretsBackend.vault.consulCARole }} + "vault.hashicorp.com/agent-inject-secret-serverca.crt": {{ .Values.global.tls.caCert.secretName }} + "vault.hashicorp.com/agent-inject-template-serverca.crt": {{ template "consul.serverTLSCATemplate" . }} + {{- if and .Values.global.secretsBackend.vault.ca.secretName .Values.global.secretsBackend.vault.ca.secretKey }} + "vault.hashicorp.com/agent-extra-secret": "{{ .Values.global.secretsBackend.vault.ca.secretName }}" + "vault.hashicorp.com/ca-cert": "/vault/custom/{{ .Values.global.secretsBackend.vault.ca.secretKey }}" + {{- end }} + {{- if .Values.global.secretsBackend.vault.agentAnnotations }} + {{ tpl .Values.global.secretsBackend.vault.agentAnnotations . | nindent 8 | trim }} + {{- end }} + {{- if (and (.Values.global.secretsBackend.vault.vaultNamespace) (not (hasKey (default "" .Values.global.secretsBackend.vault.agentAnnotations | fromYaml) "vault.hashicorp.com/namespace")))}} + "vault.hashicorp.com/namespace": "{{ .Values.global.secretsBackend.vault.vaultNamespace }}" + {{- end }} + {{- end }} + {{- if .Values.syncCatalog.metrics.enabled | default .Values.global.metrics.enabled }} + "prometheus.io/scrape": "true" + {{- if not (hasKey (default "" .Values.syncCatalog.annotations | fromYaml) "prometheus.io/path")}} + "prometheus.io/path": {{ default "/metrics" .Values.syncCatalog.metrics.path }} + {{- end }} + "prometheus.io/port": {{ .Values.syncCatalog.metrics.port | default "20300" | quote }} + {{- end }} + spec: + restartPolicy: Never + serviceAccountName: {{ template "consul.fullname" . }}-sync-catalog-cleanup + containers: + - name: sync-catalog-cleanup-job + image: "{{ default .Values.global.imageK8S .Values.syncCatalog.image }}" + {{ template "consul.imagePullPolicy" . }} + {{- include "consul.restrictedSecurityContext" . | nindent 8 }} + env: + {{- include "consul.consulK8sConsulServerEnvVars" . | nindent 8 }} + {{- if .Values.global.acls.manageSystemACLs }} + - name: CONSUL_LOGIN_AUTH_METHOD + {{- if and .Values.global.federation.enabled .Values.global.federation.primaryDatacenter .Values.global.enableConsulNamespaces }} + value: {{ template "consul.fullname" . }}-k8s-component-auth-method-{{ .Values.global.datacenter }} + {{- else }} + value: {{ template "consul.fullname" . }}-k8s-component-auth-method + {{- end }} + - name: CONSUL_LOGIN_DATACENTER + {{- if and .Values.global.federation.enabled .Values.global.federation.primaryDatacenter .Values.global.enableConsulNamespaces }} + value: {{ .Values.global.federation.primaryDatacenter }} + {{- else }} + value: {{ .Values.global.datacenter }} + {{- end }} + - name: CONSUL_LOGIN_META + value: "component=sync-catalog,pod=$(NAMESPACE)/$(POD_NAME)" + {{- end }} + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if (and .Values.syncCatalog.aclSyncToken.secretName .Values.syncCatalog.aclSyncToken.secretKey) }} + - name: CONSUL_ACL_TOKEN + valueFrom: + secretKeyRef: + name: {{ .Values.syncCatalog.aclSyncToken.secretName }} + key: {{ .Values.syncCatalog.aclSyncToken.secretKey }} + {{- end }} + volumeMounts: + {{- if .Values.global.tls.enabled }} + {{- if not (or (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots) .Values.global.secretsBackend.vault.enabled) }} + - name: consul-ca-cert + mountPath: /consul/tls/ca + readOnly: true + {{- end }} + {{- end }} + command: + - "/bin/sh" + - "-ec" + - | + exec consul-k8s-control-plane sync-catalog \ + -log-level={{ default .Values.global.logLevel .Values.syncCatalog.logLevel }} \ + -log-json={{ .Values.global.logJSON }} \ + -k8s-default-sync={{ .Values.syncCatalog.default }} \ + {{- if .Values.global.adminPartitions.enabled }} + -partition={{ .Values.global.adminPartitions.name }} \ + {{- end }} + {{- if .Values.syncCatalog.consulNodeName }} + -consul-node-name={{ .Values.syncCatalog.consulNodeName }} \ + {{- end }} + {{- if .Values.syncCatalog.metrics.enabled | default .Values.global.metrics.enabled }} + -enable-metrics \ + {{- end }} + {{- if .Values.syncCatalog.metrics.path }} + -metrics-path={{ .Values.syncCatalog.metrics.path }} \ + {{- end }} + {{- if .Values.syncCatalog.metrics.port }} + -metrics-port={{ .Values.syncCatalog.metrics.port }} \ + {{- end }} + -prometheus-retention-time={{ .Values.global.metrics.agentMetricsRetentionTime }} \ + -purge-k8s-services-from-node + {{- with .Values.syncCatalog.resources }} + resources: + {{- toYaml . | nindent 10 }} + {{- end }} + {{- if or (eq (.Values.syncCatalog.metrics.enabled | toString) "-") .Values.syncCatalog.metrics.enabled .Values.global.metrics.enabled }} + ports: + - name: prometheus + containerPort: {{ .Values.syncCatalog.metrics.port | default "20300" | int }} + {{- end }} + {{- if .Values.syncCatalog.priorityClassName }} + priorityClassName: {{ .Values.syncCatalog.priorityClassName | quote }} + {{- end }} + {{- if .Values.syncCatalog.nodeSelector }} + nodeSelector: + {{ tpl .Values.syncCatalog.nodeSelector . | indent 8 | trim }} + {{- end }} + {{- if .Values.syncCatalog.affinity }} + affinity: + {{ tpl .Values.syncCatalog.affinity . | indent 8 | trim }} + {{- end }} + {{- if .Values.syncCatalog.tolerations }} + tolerations: + {{ tpl .Values.syncCatalog.tolerations . | indent 8 | trim }} + {{- end }} + volumes: + {{- if .Values.global.tls.enabled }} + {{- if not (or (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots) .Values.global.secretsBackend.vault.enabled) }} + - name: consul-ca-cert + secret: + {{- if .Values.global.tls.caCert.secretName }} + secretName: {{ .Values.global.tls.caCert.secretName }} + {{- else }} + secretName: {{ template "consul.fullname" . }}-ca-cert + {{- end }} + items: + - key: {{ default "tls.crt" .Values.global.tls.caCert.secretKey }} + path: tls.crt + {{- end }} + {{- end }} +{{- end }} diff --git a/charts/consul/templates/sync-catalog-cleanup-on-upgrade-job.yaml b/charts/consul/templates/sync-catalog-cleanup-on-upgrade-job.yaml new file mode 100644 index 0000000000..2349c513d4 --- /dev/null +++ b/charts/consul/templates/sync-catalog-cleanup-on-upgrade-job.yaml @@ -0,0 +1,175 @@ +{{- $syncCatalogEnabled := (or (and (ne (.Values.syncCatalog.enabled | toString) "-") .Values.syncCatalog.enabled) (and (eq (.Values.syncCatalog.enabled | toString) "-") .Values.global.enabled)) }} +{{- if and (not $syncCatalogEnabled) .Values.syncCatalog.cleanupNodeOnRemoval .Release.IsUpgrade }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "consul.fullname" . }}-sync-catalog-cleanup-on-upgrade + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: sync-catalog-cleanup + {{- if .Values.global.extraLabels }} + {{- toYaml .Values.global.extraLabels | nindent 4 }} + {{- end }} + annotations: + "helm.sh/hook": pre-upgrade + "helm.sh/hook-weight": "0" + "helm.sh/hook-delete-policy": hook-succeeded,hook-failed +spec: + template: + metadata: + name: {{ template "consul.fullname" . }}-sync-catalog-cleanup-on-upgrade + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + release: {{ .Release.Name }} + component: sync-catalog-cleanup + {{- if .Values.syncCatalog.extraLabels }} + {{- toYaml .Values.syncCatalog.extraLabels | nindent 8 }} + {{- end }} + {{- if .Values.global.extraLabels }} + {{- toYaml .Values.global.extraLabels | nindent 8 }} + {{- end }} + annotations: + "consul.hashicorp.com/connect-inject": "false" + "consul.hashicorp.com/mesh-inject": "false" + {{- if .Values.syncCatalog.annotations }} + {{- tpl .Values.syncCatalog.annotations . | nindent 8 }} + {{- end }} + {{- if (and .Values.global.secretsBackend.vault.enabled .Values.global.tls.enabled) }} + "vault.hashicorp.com/agent-init-first": "true" + "vault.hashicorp.com/agent-inject": "true" + "vault.hashicorp.com/role": {{ .Values.global.secretsBackend.vault.consulCARole }} + "vault.hashicorp.com/agent-inject-secret-serverca.crt": {{ .Values.global.tls.caCert.secretName }} + "vault.hashicorp.com/agent-inject-template-serverca.crt": {{ template "consul.serverTLSCATemplate" . }} + {{- if and .Values.global.secretsBackend.vault.ca.secretName .Values.global.secretsBackend.vault.ca.secretKey }} + "vault.hashicorp.com/agent-extra-secret": "{{ .Values.global.secretsBackend.vault.ca.secretName }}" + "vault.hashicorp.com/ca-cert": "/vault/custom/{{ .Values.global.secretsBackend.vault.ca.secretKey }}" + {{- end }} + {{- if .Values.global.secretsBackend.vault.agentAnnotations }} + {{ tpl .Values.global.secretsBackend.vault.agentAnnotations . | nindent 8 | trim }} + {{- end }} + {{- if (and (.Values.global.secretsBackend.vault.vaultNamespace) (not (hasKey (default "" .Values.global.secretsBackend.vault.agentAnnotations | fromYaml) "vault.hashicorp.com/namespace")))}} + "vault.hashicorp.com/namespace": "{{ .Values.global.secretsBackend.vault.vaultNamespace }}" + {{- end }} + {{- end }} + {{- if .Values.syncCatalog.metrics.enabled | default .Values.global.metrics.enabled }} + "prometheus.io/scrape": "true" + {{- if not (hasKey (default "" .Values.syncCatalog.annotations | fromYaml) "prometheus.io/path")}} + "prometheus.io/path": {{ default "/metrics" .Values.syncCatalog.metrics.path }} + {{- end }} + "prometheus.io/port": {{ .Values.syncCatalog.metrics.port | default "20300" | quote }} + {{- end }} + spec: + restartPolicy: Never + serviceAccountName: {{ template "consul.fullname" . }}-sync-catalog-cleanup + containers: + - name: sync-catalog-cleanup-job + image: "{{ default .Values.global.imageK8S .Values.syncCatalog.image }}" + {{ template "consul.imagePullPolicy" . }} + {{- include "consul.restrictedSecurityContext" . | nindent 8 }} + env: + {{- include "consul.consulK8sConsulServerEnvVars" . | nindent 8 }} + {{- if .Values.global.acls.manageSystemACLs }} + - name: CONSUL_LOGIN_AUTH_METHOD + {{- if and .Values.global.federation.enabled .Values.global.federation.primaryDatacenter .Values.global.enableConsulNamespaces }} + value: {{ template "consul.fullname" . }}-k8s-component-auth-method-{{ .Values.global.datacenter }} + {{- else }} + value: {{ template "consul.fullname" . }}-k8s-component-auth-method + {{- end }} + - name: CONSUL_LOGIN_DATACENTER + {{- if and .Values.global.federation.enabled .Values.global.federation.primaryDatacenter .Values.global.enableConsulNamespaces }} + value: {{ .Values.global.federation.primaryDatacenter }} + {{- else }} + value: {{ .Values.global.datacenter }} + {{- end }} + - name: CONSUL_LOGIN_META + value: "component=sync-catalog,pod=$(NAMESPACE)/$(POD_NAME)" + {{- end }} + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if (and .Values.syncCatalog.aclSyncToken.secretName .Values.syncCatalog.aclSyncToken.secretKey) }} + - name: CONSUL_ACL_TOKEN + valueFrom: + secretKeyRef: + name: {{ .Values.syncCatalog.aclSyncToken.secretName }} + key: {{ .Values.syncCatalog.aclSyncToken.secretKey }} + {{- end }} + volumeMounts: + {{- if .Values.global.tls.enabled }} + {{- if not (or (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots) .Values.global.secretsBackend.vault.enabled) }} + - name: consul-ca-cert + mountPath: /consul/tls/ca + readOnly: true + {{- end }} + {{- end }} + command: + - "/bin/sh" + - "-ec" + - | + exec consul-k8s-control-plane sync-catalog \ + -log-level={{ default .Values.global.logLevel .Values.syncCatalog.logLevel }} \ + -log-json={{ .Values.global.logJSON }} \ + -k8s-default-sync={{ .Values.syncCatalog.default }} \ + {{- if .Values.global.adminPartitions.enabled }} + -partition={{ .Values.global.adminPartitions.name }} \ + {{- end }} + {{- if .Values.syncCatalog.consulNodeName }} + -consul-node-name={{ .Values.syncCatalog.consulNodeName }} \ + {{- end }} + {{- if .Values.syncCatalog.metrics.enabled | default .Values.global.metrics.enabled }} + -enable-metrics \ + {{- end }} + {{- if .Values.syncCatalog.metrics.path }} + -metrics-path={{ .Values.syncCatalog.metrics.path }} \ + {{- end }} + {{- if .Values.syncCatalog.metrics.port }} + -metrics-port={{ .Values.syncCatalog.metrics.port }} \ + {{- end }} + -prometheus-retention-time={{ .Values.global.metrics.agentMetricsRetentionTime }} \ + -purge-k8s-services-from-node + {{- with .Values.syncCatalog.resources }} + resources: + {{- toYaml . | nindent 10 }} + {{- end }} + {{- if or (eq (.Values.syncCatalog.metrics.enabled | toString) "-") .Values.syncCatalog.metrics.enabled .Values.global.metrics.enabled }} + ports: + - name: prometheus + containerPort: {{ .Values.syncCatalog.metrics.port | default "20300" | int }} + {{- end }} + {{- if .Values.syncCatalog.priorityClassName }} + priorityClassName: {{ .Values.syncCatalog.priorityClassName | quote }} + {{- end }} + {{- if .Values.syncCatalog.nodeSelector }} + nodeSelector: + {{ tpl .Values.syncCatalog.nodeSelector . | indent 8 | trim }} + {{- end }} + {{- if .Values.syncCatalog.affinity }} + affinity: + {{ tpl .Values.syncCatalog.affinity . | indent 8 | trim }} + {{- end }} + {{- if .Values.syncCatalog.tolerations }} + tolerations: + {{ tpl .Values.syncCatalog.tolerations . | indent 8 | trim }} + {{- end }} + volumes: + {{- if .Values.global.tls.enabled }} + {{- if not (or (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots) .Values.global.secretsBackend.vault.enabled) }} + - name: consul-ca-cert + secret: + {{- if .Values.global.tls.caCert.secretName }} + secretName: {{ .Values.global.tls.caCert.secretName }} + {{- else }} + secretName: {{ template "consul.fullname" . }}-ca-cert + {{- end }} + items: + - key: {{ default "tls.crt" .Values.global.tls.caCert.secretKey }} + path: tls.crt + {{- end }} + {{- end }} +{{- end }} diff --git a/charts/consul/templates/sync-catalog-cleanup-serviceaccount.yaml b/charts/consul/templates/sync-catalog-cleanup-serviceaccount.yaml new file mode 100644 index 0000000000..c49852b14c --- /dev/null +++ b/charts/consul/templates/sync-catalog-cleanup-serviceaccount.yaml @@ -0,0 +1,23 @@ +{{- if .Values.syncCatalog.cleanupNodeOnRemoval }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "consul.fullname" . }}-sync-catalog-cleanup + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: sync-catalog-cleanup + {{- if .Values.syncCatalog.serviceAccount.annotations }} + annotations: + {{ tpl .Values.syncCatalog.serviceAccount.annotations . | nindent 4 | trim }} + {{- end }} +{{- with .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range . }} + - name: {{ .name }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/consul/templates/sync-catalog-deployment.yaml b/charts/consul/templates/sync-catalog-deployment.yaml index 94260b5e44..5e69110810 100644 --- a/charts/consul/templates/sync-catalog-deployment.yaml +++ b/charts/consul/templates/sync-catalog-deployment.yaml @@ -1,4 +1,5 @@ -{{- if (or (and (ne (.Values.syncCatalog.enabled | toString) "-") .Values.syncCatalog.enabled) (and (eq (.Values.syncCatalog.enabled | toString) "-") .Values.global.enabled)) }} +{{- $syncCatalogEnabled := (or (and (ne (.Values.syncCatalog.enabled | toString) "-") .Values.syncCatalog.enabled) (and (eq (.Values.syncCatalog.enabled | toString) "-") .Values.global.enabled)) }} +{{- if $syncCatalogEnabled }} {{- template "consul.reservedNamesFailer" (list .Values.syncCatalog.consulNamespaces.consulDestinationNamespace "syncCatalog.consulNamespaces.consulDestinationNamespace") }} {{ template "consul.validateRequiredCloudSecretsExist" . }} {{ template "consul.validateCloudSecretKeys" . }} diff --git a/charts/consul/values.yaml b/charts/consul/values.yaml index b9f9907e59..13615e716c 100644 --- a/charts/consul/values.yaml +++ b/charts/consul/values.yaml @@ -2090,6 +2090,10 @@ syncCatalog: # global.enabled. enabled: false + # True if you want to deregister all services in this cluster from consul that have been registered by the catalog sync when the `enabled` flag is set to false. + # @type: boolean + cleanupNodeOnRemoval: false + # The name of the Docker image (including any tag) for consul-k8s-control-plane # to run the sync program. # @type: string diff --git a/control-plane/subcommand/sync-catalog/command.go b/control-plane/subcommand/sync-catalog/command.go index c3965165ed..36bb996f2f 100644 --- a/control-plane/subcommand/sync-catalog/command.go +++ b/control-plane/subcommand/sync-catalog/command.go @@ -12,20 +12,17 @@ import ( "os" "os/signal" "regexp" - "strings" "sync" "syscall" "time" "github.com/armon/go-metrics/prometheus" - "github.com/cenkalti/backoff" mapset "github.com/deckarep/golang-set" "github.com/hashicorp/consul-server-connection-manager/discovery" "github.com/hashicorp/consul/api" "github.com/hashicorp/go-hclog" "github.com/mitchellh/cli" "github.com/prometheus/client_golang/prometheus/promhttp" - "golang.org/x/sync/errgroup" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" @@ -37,7 +34,6 @@ import ( "github.com/hashicorp/consul-k8s/control-plane/helper/controller" "github.com/hashicorp/consul-k8s/control-plane/subcommand" "github.com/hashicorp/consul-k8s/control-plane/subcommand/common" - metricsutil "github.com/hashicorp/consul-k8s/control-plane/subcommand/common" "github.com/hashicorp/consul-k8s/control-plane/subcommand/flags" ) @@ -67,7 +63,7 @@ type Command struct { flagAddK8SNamespaceSuffix bool flagLogLevel string flagLogJSON bool - flagPurgeK8SServicesFromNode string + flagPurgeK8SServicesFromNode bool flagFilter string // Flags to support namespaces @@ -156,8 +152,8 @@ func (c *Command) init() { "\"debug\", \"info\", \"warn\", and \"error\".") c.flags.BoolVar(&c.flagLogJSON, "log-json", false, "Enable or disable JSON output format for logging.") - c.flags.StringVar(&c.flagPurgeK8SServicesFromNode, "purge-k8s-services-from-node", "", - "Specifies the name of the Consul node for which to deregister synced Kubernetes services.") + c.flags.BoolVar(&c.flagPurgeK8SServicesFromNode, "purge-k8s-services-from-node", false, + "Specifies if services should be purged from the Consul node. If set, all K8S services will be removed from the Consul node.") c.flags.StringVar(&c.flagFilter, "filter", "", "Specifies the expression used to filter the services on the Consul node that will be deregistered. "+ "The syntax for this filter is the same as the syntax used in the List Services for Node API in the Consul catalog.") @@ -279,7 +275,7 @@ func (c *Command) Run(args []string) int { } c.ready = true - if c.flagPurgeK8SServicesFromNode != "" { + if c.flagPurgeK8SServicesFromNode { consulClient, err := consul.NewClientFromConnMgr(consulConfig, c.connMgr) if err != nil { c.UI.Error(fmt.Sprintf("unable to instantiate consul client: %s", err)) @@ -462,64 +458,15 @@ func (c *Command) Run(args []string) int { // remove all k8s services from Consul. func (c *Command) removeAllK8SServicesFromConsulNode(consulClient *api.Client) error { - node, _, err := consulClient.Catalog().NodeServiceList(c.flagPurgeK8SServicesFromNode, &api.QueryOptions{Filter: c.flagFilter}) + _, err := consulClient.Catalog().Deregister(&api.CatalogDeregistration{ + Node: c.flagConsulNodeName, + Partition: c.consul.Partition, + }, nil) if err != nil { + c.UI.Error(fmt.Sprintf("unable to deregister all K8S services from Consul: %s", err)) return err } - var firstErr error - services := node.Services - batchSize := 300 - maxRetries := 2 - retryDelay := 200 * time.Millisecond - - // Ask for user confirmation before purging services - for { - c.UI.Info(fmt.Sprintf("Are you sure you want to delete %v K8S services from %v? (y/n): ", len(services), c.flagPurgeK8SServicesFromNode)) - var input string - fmt.Scanln(&input) - if input = strings.ToLower(input); input == "y" { - break - } else if input == "n" { - return nil - } else { - c.UI.Info("Invalid input. Please enter 'y' or 'n'.") - } - } - - for i := 0; i < len(services); i += batchSize { - end := i + batchSize - if end > len(services) { - end = len(services) - } - - var eg errgroup.Group - for _, service := range services[i:end] { - s := service - eg.Go(func() error { - var b backoff.BackOff = backoff.NewConstantBackOff(retryDelay) - b = backoff.WithMaxRetries(b, uint64(maxRetries)) - return backoff.Retry(func() error { - _, err := consulClient.Catalog().Deregister(&api.CatalogDeregistration{ - Node: c.flagPurgeK8SServicesFromNode, - ServiceID: s.ID, - }, nil) - return err - }, b) - }) - } - if err := eg.Wait(); err != nil { - if firstErr == nil { - c.UI.Info("Some K8S services were not deregistered from Consul") - firstErr = err - } - } - c.UI.Info(fmt.Sprintf("Processed %v K8S services from %v", end-i, c.flagPurgeK8SServicesFromNode)) - } - - if firstErr != nil { - return firstErr - } c.UI.Info("All K8S services were deregistered from Consul") return nil } @@ -553,7 +500,7 @@ func (c *Command) validateFlags() error { // For the Consul node name to be discoverable via DNS, it must contain only // dashes and alphanumeric characters. Length is also constrained. // These restrictions match those defined in Consul's agent definition. - var invalidDnsRe = regexp.MustCompile(`[^A-Za-z0-9\\-]+`) + invalidDnsRe := regexp.MustCompile(`[^A-Za-z0-9\\-]+`) const maxDNSLabelLength = 63 if invalidDnsRe.MatchString(c.flagConsulNodeName) { @@ -570,7 +517,7 @@ func (c *Command) validateFlags() error { } if c.flagMetricsPort != "" { - if _, valid := metricsutil.ParseScrapePort(c.flagMetricsPort); !valid { + if _, valid := common.ParseScrapePort(c.flagMetricsPort); !valid { return errors.New("-metrics-port must be a valid unprivileged port number") } } @@ -586,7 +533,7 @@ func (c *Command) recordMetrics() (*prometheus.PrometheusSink, error) { return &prometheus.PrometheusSink{}, err } - var counters = [][]prometheus.CounterDefinition{ + counters := [][]prometheus.CounterDefinition{ catalogtoconsul.SyncToConsulCounters, catalogtok8s.SyncToK8sCounters, } @@ -621,8 +568,9 @@ func (c *Command) authorizeMiddleware() func(http.Handler) http.Handler { } } -const synopsis = "Sync Kubernetes services and Consul services." -const help = ` +const ( + synopsis = "Sync Kubernetes services and Consul services." + help = ` Usage: consul-k8s-control-plane sync-catalog [options] Sync K8S pods, services, and more with the Consul service catalog. @@ -631,3 +579,4 @@ Usage: consul-k8s-control-plane sync-catalog [options] K8S services. ` +) diff --git a/control-plane/subcommand/sync-catalog/command_ent_test.go b/control-plane/subcommand/sync-catalog/command_ent_test.go index f8bdccda6b..231b2755c6 100644 --- a/control-plane/subcommand/sync-catalog/command_ent_test.go +++ b/control-plane/subcommand/sync-catalog/command_ent_test.go @@ -272,7 +272,6 @@ func TestRun_ToConsulMirroringNamespaces(t *testing.T) { } } - }) }) } @@ -706,3 +705,152 @@ func TestRun_ToConsulNamespacesACLs(t *testing.T) { }) } } + +// Test services could be de-registered from Consul. +func TestRemoveAllK8SServicesFromConsulWithPartitions(t *testing.T) { + testCases := map[string]struct { + nodeName string + partitionName string + }{ + "default Node name in default partition": { + nodeName: "k8s-sync", + partitionName: "default", + }, + "non-default Node name in default partition": { + nodeName: "custom-node", + partitionName: "default", + }, + "default Node name in non-default partition": { + nodeName: "k8s-sync", + partitionName: "part-1", + }, + "non-default Node name in non-default partition": { + nodeName: "custom-node", + partitionName: "part-1", + }, + } + + for name, tc := range testCases { + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + var err error + + k8s, testClient := completeSetup(t) + consulClient := testClient.APIClient + + // Create a mock reader to simulate user input + // Run the command. + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + clientset: k8s, + logger: hclog.New(&hclog.LoggerOptions{ + Name: t.Name(), + Level: hclog.Debug, + }), + flagAllowK8sNamespacesList: []string{"*"}, + connMgr: testClient.Watcher, + } + + otherPartitionName := "the-other-one" + + // create another partition and register 2 services there to the same node to show that we won't delete them + _, _, err = consulClient.Partitions().Create(context.Background(), &api.Partition{Name: otherPartitionName}, nil) + require.NoError(t, err) + + _, err = consulClient.Catalog().Register( + &api.CatalogRegistration{ + Node: tc.nodeName, + Address: "5.5.5.5", + Service: &api.AgentService{ + ID: "other-service-1", + Service: "other-service-1", + Tags: []string{"other-k8s-cluster"}, + Meta: map[string]string{}, + Port: 0, + Address: "5.5.5.5", + Partition: otherPartitionName, + }, + Partition: otherPartitionName, + }, + &api.WriteOptions{Partition: otherPartitionName}, + ) + require.NoError(t, err) + + _, err = consulClient.Catalog().Register( + &api.CatalogRegistration{ + Node: tc.nodeName, + Address: "6.6.6.6", + Service: &api.AgentService{ + ID: "other-service-2", + Service: "other-service-2", + Tags: []string{"other-k8s-cluster"}, + Meta: map[string]string{}, + Port: 0, + Address: "6.6.6.6", + Partition: otherPartitionName, + }, + Partition: otherPartitionName, + }, + &api.WriteOptions{Partition: otherPartitionName}, + ) + require.NoError(t, err) + + // create partition if it does not exist + if tc.partitionName != "default" { + p, _, err := consulClient.Partitions().Create(context.Background(), &api.Partition{Name: tc.partitionName}, nil) + require.NoError(t, err) + require.NotNil(t, p) + } + + // create two services in k8s + _, err = k8s.CoreV1().Services("bar").Create(context.Background(), lbService("foo", "1.1.1.1"), metav1.CreateOptions{}) + require.NoError(t, err) + + _, err = k8s.CoreV1().Services("baz").Create(context.Background(), lbService("foo", "2.2.2.2"), metav1.CreateOptions{}) + require.NoError(t, err) + + longRunningChan := runCommandAsynchronously(&cmd, []string{ + "-addresses", "127.0.0.1", + "-http-port", strconv.Itoa(testClient.Cfg.HTTPPort), + "-consul-write-interval", "100ms", + "-partition", tc.partitionName, + "-consul-node-name", tc.nodeName, + "-add-k8s-namespace-suffix", + }) + + // check that the two K8s services have been synced into Consul + retry.Run(t, func(r *retry.R) { + svc, _, err := consulClient.Catalog().Service("foo-bar", "k8s", &api.QueryOptions{Partition: tc.partitionName}) + require.NoError(r, err) + require.Len(r, svc, 1) + require.Equal(r, "1.1.1.1", svc[0].ServiceAddress) + svc, _, err = consulClient.Catalog().Service("foo-baz", "k8s", &api.QueryOptions{Partition: tc.partitionName}) + require.NoError(r, err) + require.Len(r, svc, 1) + require.Equal(r, "2.2.2.2", svc[0].ServiceAddress) + }) + + defer stopCommand(t, &cmd, longRunningChan) + + exitChan := runCommandAsynchronously(&cmd, []string{ + "-addresses", "127.0.0.1", + "-http-port", strconv.Itoa(testClient.Cfg.HTTPPort), + "-purge-k8s-services-from-node=true", + "-partition", tc.partitionName, + "-consul-node-name", tc.nodeName, + }) + stopCommand(t, &cmd, exitChan) + + retry.Run(t, func(r *retry.R) { + serviceList, _, err := consulClient.Catalog().NodeServiceList(tc.nodeName, &api.QueryOptions{AllowStale: false, Partition: tc.partitionName}) + require.NoError(r, err) + require.Len(r, serviceList.Services, 0) + otherPartitionServiceList, _, err := consulClient.Catalog().NodeServiceList(tc.nodeName, &api.QueryOptions{AllowStale: false, Partition: otherPartitionName}) + require.NoError(r, err) + require.Len(r, otherPartitionServiceList.Services, 2) + }) + }) + } +} diff --git a/control-plane/subcommand/sync-catalog/command_test.go b/control-plane/subcommand/sync-catalog/command_test.go index b6aa63a903..d4d011b7e0 100644 --- a/control-plane/subcommand/sync-catalog/command_test.go +++ b/control-plane/subcommand/sync-catalog/command_test.go @@ -560,160 +560,124 @@ func TestRun_ToConsulChangingFlags(t *testing.T) { // Test services could be de-registered from Consul. func TestRemoveAllK8SServicesFromConsul(t *testing.T) { - t.Parallel() - - k8s, testClient := completeSetup(t) - consulClient := testClient.APIClient - - // Create a mock reader to simulate user input - input := "y\n" - reader, writer, err := os.Pipe() - require.NoError(t, err) - oldStdin := os.Stdin - os.Stdin = reader - defer func() { os.Stdin = oldStdin }() - - // Write the simulated user input to the mock reader - go func() { - defer writer.Close() - _, err := writer.WriteString(input) - require.NoError(t, err) - }() - - // Run the command. - ui := cli.NewMockUi() - cmd := Command{ - UI: ui, - clientset: k8s, - logger: hclog.New(&hclog.LoggerOptions{ - Name: t.Name(), - Level: hclog.Debug, - }), - flagAllowK8sNamespacesList: []string{"*"}, - connMgr: testClient.Watcher, + testCases := map[string]struct { + nodeToDeregisterName string + }{ + "default Node name in default partition": { + nodeToDeregisterName: "k8s-sync", + }, + "non-default Node name in default partition": { + nodeToDeregisterName: "custom-node", + }, } - // create two services in k8s - _, err = k8s.CoreV1().Services("bar").Create(context.Background(), lbService("foo", "1.1.1.1"), metav1.CreateOptions{}) - require.NoError(t, err) - - _, err = k8s.CoreV1().Services("baz").Create(context.Background(), lbService("foo", "2.2.2.2"), metav1.CreateOptions{}) - require.NoError(t, err) - - longRunningChan := runCommandAsynchronously(&cmd, []string{ - "-addresses", "127.0.0.1", - "-http-port", strconv.Itoa(testClient.Cfg.HTTPPort), - "-consul-write-interval", "100ms", - "-add-k8s-namespace-suffix", - }) - defer stopCommand(t, &cmd, longRunningChan) - - // check that the two K8s services have been synced into Consul - retry.Run(t, func(r *retry.R) { - svc, _, err := consulClient.Catalog().Service("foo-bar", "k8s", nil) - require.NoError(r, err) - require.Len(r, svc, 1) - require.Equal(r, "1.1.1.1", svc[0].ServiceAddress) - svc, _, err = consulClient.Catalog().Service("foo-baz", "k8s", nil) - require.NoError(r, err) - require.Len(r, svc, 1) - require.Equal(r, "2.2.2.2", svc[0].ServiceAddress) - }) + otherNodeName := "other-node" - exitChan := runCommandAsynchronously(&cmd, []string{ - "-addresses", "127.0.0.1", - "-http-port", strconv.Itoa(testClient.Cfg.HTTPPort), - "-purge-k8s-services-from-node=k8s-sync", - }) - stopCommand(t, &cmd, exitChan) + for name, tc := range testCases { + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + var err error - retry.Run(t, func(r *retry.R) { - serviceList, _, err := consulClient.Catalog().NodeServiceList("k8s-sync", &api.QueryOptions{AllowStale: false}) - require.NoError(r, err) - require.Len(r, serviceList.Services, 0) - }) -} + k8s, testClient := completeSetup(t) + consulClient := testClient.APIClient -// Test services could be de-registered from Consul with filter. -func TestRemoveAllK8SServicesFromConsulWithFilter(t *testing.T) { - t.Parallel() + // Create a mock reader to simulate user input + // Run the command. + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + clientset: k8s, + logger: hclog.New(&hclog.LoggerOptions{ + Name: t.Name(), + Level: hclog.Debug, + }), + flagAllowK8sNamespacesList: []string{"*"}, + connMgr: testClient.Watcher, + } - k8s, testClient := completeSetup(t) - consulClient := testClient.APIClient + _, err = consulClient.Catalog().Register( + &api.CatalogRegistration{ + Node: otherNodeName, + Address: "5.5.5.5", + Service: &api.AgentService{ + ID: "other-service-1", + Service: "other-service-1", + Tags: []string{"other-k8s-cluster"}, + Meta: map[string]string{}, + Port: 0, + Address: "5.5.5.5", + }, + }, + &api.WriteOptions{}, + ) + require.NoError(t, err) + + _, err = consulClient.Catalog().Register( + &api.CatalogRegistration{ + Node: otherNodeName, + Address: "6.6.6.6", + Service: &api.AgentService{ + ID: "other-service-2", + Service: "other-service-2", + Tags: []string{"other-k8s-cluster"}, + Meta: map[string]string{}, + Port: 0, + Address: "6.6.6.6", + }, + }, + &api.WriteOptions{}, + ) + require.NoError(t, err) - // Create a mock reader to simulate user input - input := "y\n" - reader, writer, err := os.Pipe() - require.NoError(t, err) - oldStdin := os.Stdin - os.Stdin = reader - defer func() { os.Stdin = oldStdin }() + // create two services in k8s + _, err = k8s.CoreV1().Services("bar").Create(context.Background(), lbService("foo", "1.1.1.1"), metav1.CreateOptions{}) + require.NoError(t, err) - // Write the simulated user input to the mock reader - go func() { - defer writer.Close() - _, err := writer.WriteString(input) - require.NoError(t, err) - }() + _, err = k8s.CoreV1().Services("baz").Create(context.Background(), lbService("foo", "2.2.2.2"), metav1.CreateOptions{}) + require.NoError(t, err) - // Run the command. - ui := cli.NewMockUi() - cmd := Command{ - UI: ui, - clientset: k8s, - logger: hclog.New(&hclog.LoggerOptions{ - Name: t.Name(), - Level: hclog.Debug, - }), - flagAllowK8sNamespacesList: []string{"*"}, - connMgr: testClient.Watcher, - } + longRunningChan := runCommandAsynchronously(&cmd, []string{ + "-addresses", "127.0.0.1", + "-http-port", strconv.Itoa(testClient.Cfg.HTTPPort), + "-consul-write-interval", "100ms", + "-consul-node-name", tc.nodeToDeregisterName, + "-add-k8s-namespace-suffix", + }) - // create two services in k8s - _, err = k8s.CoreV1().Services("bar").Create(context.Background(), lbService("foo", "1.1.1.1"), metav1.CreateOptions{}) - require.NoError(t, err) - _, err = k8s.CoreV1().Services("baz").Create(context.Background(), lbService("foo", "2.2.2.2"), metav1.CreateOptions{}) - require.NoError(t, err) - _, err = k8s.CoreV1().Services("bat").Create(context.Background(), lbService("foo", "3.3.3.3"), metav1.CreateOptions{}) - require.NoError(t, err) + // check that the two K8s services have been synced into Consul + retry.Run(t, func(r *retry.R) { + svc, _, err := consulClient.Catalog().Service("foo-bar", "k8s", &api.QueryOptions{}) + require.NoError(r, err) + require.Len(r, svc, 1) + require.Equal(r, "1.1.1.1", svc[0].ServiceAddress) + svc, _, err = consulClient.Catalog().Service("foo-baz", "k8s", &api.QueryOptions{}) + require.NoError(r, err) + require.Len(r, svc, 1) + require.Equal(r, "2.2.2.2", svc[0].ServiceAddress) + }) - longRunningChan := runCommandAsynchronously(&cmd, []string{ - "-addresses", "127.0.0.1", - "-http-port", strconv.Itoa(testClient.Cfg.HTTPPort), - "-consul-write-interval", "100ms", - "-add-k8s-namespace-suffix", - }) - defer stopCommand(t, &cmd, longRunningChan) + defer stopCommand(t, &cmd, longRunningChan) - // check that the name of the service is namespaced - retry.Run(t, func(r *retry.R) { - svc, _, err := consulClient.Catalog().Service("foo-bar", "k8s", nil) - require.NoError(r, err) - require.Len(r, svc, 1) - require.Equal(r, "1.1.1.1", svc[0].ServiceAddress) - svc, _, err = consulClient.Catalog().Service("foo-baz", "k8s", nil) - require.NoError(r, err) - require.Len(r, svc, 1) - require.Equal(r, "2.2.2.2", svc[0].ServiceAddress) - svc, _, err = consulClient.Catalog().Service("foo-bat", "k8s", nil) - require.NoError(r, err) - require.Len(r, svc, 1) - require.Equal(r, "3.3.3.3", svc[0].ServiceAddress) - }) + exitChan := runCommandAsynchronously(&cmd, []string{ + "-addresses", "127.0.0.1", + "-http-port", strconv.Itoa(testClient.Cfg.HTTPPort), + "-purge-k8s-services-from-node=true", + "-consul-node-name", tc.nodeToDeregisterName, + }) + stopCommand(t, &cmd, exitChan) - exitChan := runCommandAsynchronously(&cmd, []string{ - "-addresses", "127.0.0.1", - "-http-port", strconv.Itoa(testClient.Cfg.HTTPPort), - "-purge-k8s-services-from-node=k8s-sync", - "-filter=baz in ID", - }) - stopCommand(t, &cmd, exitChan) + retry.Run(t, func(r *retry.R) { + serviceList, _, err := consulClient.Catalog().NodeServiceList(tc.nodeToDeregisterName, &api.QueryOptions{AllowStale: false}) + require.NoError(r, err) + require.Len(r, serviceList.Services, 0) - retry.Run(t, func(r *retry.R) { - serviceList, _, err := consulClient.Catalog().NodeServiceList("k8s-sync", &api.QueryOptions{AllowStale: false}) - require.NoError(r, err) - require.Len(r, serviceList.Services, 2) - }) + otherNodeServiceList, _, err := consulClient.Catalog().NodeServiceList(otherNodeName, &api.QueryOptions{AllowStale: false}) + require.NoError(r, err) + require.Len(r, otherNodeServiceList.Services, 2) + }) + }) + } } // Set up test consul agent and fake kubernetes cluster client. From c5f07e809202ca360964d560f81572de094b7233 Mon Sep 17 00:00:00 2001 From: John Maguire Date: Tue, 22 Oct 2024 14:26:44 -0400 Subject: [PATCH 9/9] [NET-11297] added bats testing (#4406) added bats testing --- ...sync-catalog-cleanup-on-uninstall-job.bats | 839 ++++++++++++++++ .../sync-catalog-cleanup-on-upgrade-job.bats | 896 ++++++++++++++++++ .../sync-catalog-cleanup-serviceaccount.bats | 75 ++ 3 files changed, 1810 insertions(+) create mode 100644 charts/consul/test/unit/sync-catalog-cleanup-on-uninstall-job.bats create mode 100644 charts/consul/test/unit/sync-catalog-cleanup-on-upgrade-job.bats create mode 100644 charts/consul/test/unit/sync-catalog-cleanup-serviceaccount.bats diff --git a/charts/consul/test/unit/sync-catalog-cleanup-on-uninstall-job.bats b/charts/consul/test/unit/sync-catalog-cleanup-on-uninstall-job.bats new file mode 100644 index 0000000000..f7f4935ec3 --- /dev/null +++ b/charts/consul/test/unit/sync-catalog-cleanup-on-uninstall-job.bats @@ -0,0 +1,839 @@ +#!/usr/bin/env bats + +load _helpers + +target=templates/sync-catalog-cleanup-on-uninstall-job.yaml + +@test "syncCatalogCleanupJob/Uninstall: disabled by default" { + cd $(chart_dir) + assert_empty helm template \ + -s $target \ + . +} + +@test "syncCatalogCleanupJob/Uninstall: enable with syncCatalog.cleanupNodeOnRemoval true" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + . | tee /dev/stderr | + yq -s 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# image + +@test "syncCatalogCleanupJob/Uninstall: image defaults to global.imageK8S" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'global.imageK8S=bar' \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].image' | tee /dev/stderr) + [ "${actual}" = "bar" ] +} + +@test "syncCatalogCleanupJob/Uninstall: image can be overridden with server.image" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'global.imageK8S=foo' \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'syncCatalog.image=bar' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].image' | tee /dev/stderr) + [ "${actual}" = "bar" ] +} + +@test "syncCatalogCleanupJob/Uninstall: consul env defaults" { + cd $(chart_dir) + local env=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + + local actual=$(echo "$env" | + jq -r '. | select( .name == "CONSUL_ADDRESSES").value' | tee /dev/stderr) + [ "${actual}" = "release-name-consul-server.default.svc" ] + + local actual=$(echo "$env" | + jq -r '. | select( .name == "CONSUL_GRPC_PORT").value' | tee /dev/stderr) + [ "${actual}" = "8502" ] + + local actual=$(echo "$env" | + jq -r '. | select( .name == "CONSUL_HTTP_PORT").value' | tee /dev/stderr) + [ "${actual}" = "8500" ] + + local actual=$(echo "$env" | + jq -r '. | select( .name == "CONSUL_DATACENTER").value' | tee /dev/stderr) + [ "${actual}" = "dc1" ] + + local actual=$(echo "$env" | + jq -r '. | select( .name == "CONSUL_API_TIMEOUT").value' | tee /dev/stderr) + [ "${actual}" = "5s" ] +} + +#-------------------------------------------------------------------- +# consulNodeName + +@test "syncCatalogCleanupJob/Uninstall: consulNodeName defaults to k8s-sync" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-consul-node-name=k8s-sync"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "syncCatalogCleanupJob/Uninstall: consulNodeName set to empty" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'syncCatalog.consulNodeName=' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-consul-node-name"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "syncCatalogCleanupJob/Uninstall: can specify consulNodeName" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'syncCatalog.consulNodeName=aNodeName' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-consul-node-name=aNodeName"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# serviceAccount + +@test "syncCatalogCleanupJob/Uninstall: serviceAccount set when sync enabled" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.serviceAccountName | contains("sync-catalog-cleanup")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# aclSyncToken + +@test "syncCatalogCleanupJob/Uninstall: aclSyncToken disabled when secretName is missing" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'syncCatalog.aclSyncToken.secretKey=bar' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_ACL_TOKEN"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "syncCatalogCleanupJob/Uninstall: aclSyncToken disabled when secretKey is missing" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'syncCatalog.aclSyncToken.secretName=foo' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_ACL_TOKEN"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "syncCatalogCleanupJob/Uninstall: aclSyncToken enabled when secretName and secretKey is provided" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'syncCatalog.aclSyncToken.secretName=foo' \ + --set 'syncCatalog.aclSyncToken.secretKey=bar' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_ACL_TOKEN"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# affinity + +@test "syncCatalogCleanupJob/Uninstall: affinity not set by default" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.affinity == null' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "syncCatalogCleanupJob/Uninstall: affinity can be set" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'syncCatalog.affinity=foobar' \ + . | tee /dev/stderr | + yq '.spec.template.spec | .affinity == "foobar"' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# nodeSelector + +@test "syncCatalogCleanupJob/Uninstall: nodeSelector is not set by default" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.nodeSelector' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + +@test "syncCatalogCleanupJob/Uninstall: nodeSelector is not set by default with sync enabled" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.nodeSelector' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + +@test "syncCatalogCleanupJob/Uninstall: specified nodeSelector" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'syncCatalog.nodeSelector=testing' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.nodeSelector' | tee /dev/stderr) + [ "${actual}" = "testing" ] +} + +#-------------------------------------------------------------------- +# tolerations + +@test "syncCatalogCleanupJob/Uninstall: tolerations not set by default" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.tolerations == null' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "syncCatalogCleanupJob/Uninstall: tolerations can be set" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'syncCatalog.tolerations=foobar' \ + . | tee /dev/stderr | + yq '.spec.template.spec | .tolerations == "foobar"' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# global.acls.manageSystemACLs + +@test "syncCatalogCleanupJob/Uninstall: ACL auth method env vars are set when acls are enabled" { + cd $(chart_dir) + local env=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + + local actual=$(echo "$env" | + jq -r '. | select( .name == "CONSUL_LOGIN_AUTH_METHOD").value' | tee /dev/stderr) + [ "${actual}" = "release-name-consul-k8s-component-auth-method" ] + + local actual=$(echo "$env" | + jq -r '. | select( .name == "CONSUL_LOGIN_DATACENTER").value' | tee /dev/stderr) + [ "${actual}" = "dc1" ] + local actual=$(echo "$env" | + jq -r '. | select( .name == "CONSUL_LOGIN_META").value' | tee /dev/stderr) + [ "${actual}" = 'component=sync-catalog,pod=$(NAMESPACE)/$(POD_NAME)' ] +} + +@test "syncCatalogCleanupJob/Uninstall: sets global auth method and primary datacenter when federation and acls and namespaces are enabled" { + cd $(chart_dir) + local env=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.federation.enabled=true' \ + --set 'global.federation.primaryDatacenter=dc1' \ + --set 'global.datacenter=dc2' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'global.tls.enabled=true' \ + --set 'meshGateway.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + + local actual=$(echo "$env" | + jq -r '. | select( .name == "CONSUL_LOGIN_AUTH_METHOD").value' | tee /dev/stderr) + [ "${actual}" = "release-name-consul-k8s-component-auth-method-dc2" ] + + local actual=$(echo "$env" | + jq -r '. | select( .name == "CONSUL_LOGIN_DATACENTER").value' | tee /dev/stderr) + [ "${actual}" = "dc1" ] +} + +@test "syncCatalogCleanupJob/Uninstall: sets default login partition and acls and partitions are enabled" { + cd $(chart_dir) + local env=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.adminPartitions.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + + local actual=$(echo "$env" | + jq -r '. | select( .name == "CONSUL_LOGIN_PARTITION").value' | tee /dev/stderr) + [ "${actual}" = "default" ] +} + +@test "syncCatalogCleanupJob/Uninstall: sets non-default login partition and acls and partitions are enabled" { + cd $(chart_dir) + local env=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.adminPartitions.enabled=true' \ + --set 'global.adminPartitions.name=foo' \ + --set 'global.enableConsulNamespaces=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + + local actual=$(echo "$env" | + jq -r '. | select( .name == "CONSUL_LOGIN_PARTITION").value' | tee /dev/stderr) + [ "${actual}" = "foo" ] +} + +#-------------------------------------------------------------------- +# global.tls.enabled + +@test "syncCatalogCleanupJob/Uninstall: sets Consul environment variables when global.tls.enabled" { + cd $(chart_dir) + local env=$(helm template \ + -s $target \ + --set 'client.enabled=true' \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + + local actual=$(echo "$env" | + jq -r '. | select( .name == "CONSUL_HTTP_PORT").value' | tee /dev/stderr) + [ "${actual}" = "8501" ] + + local actual=$(echo "$env" | + jq -r '. | select( .name == "CONSUL_USE_TLS").value' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo "$env" | + jq -r '. | select( .name == "CONSUL_CACERT_FILE").value' | tee /dev/stderr) + [ "${actual}" = "/consul/tls/ca/tls.crt" ] +} + +@test "syncCatalogCleanupJob/Uninstall: can overwrite CA secret with the provided one" { + cd $(chart_dir) + local ca_cert_volume=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.caCert.secretName=foo-ca-cert' \ + --set 'global.tls.caCert.secretKey=key' \ + --set 'global.tls.caKey.secretName=foo-ca-key' \ + --set 'global.tls.caKey.secretKey=key' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name=="consul-ca-cert")' | tee /dev/stderr) + + # check that the provided ca cert secret is attached as a volume + local actual + actual=$(echo $ca_cert_volume | jq -r '.secret.secretName' | tee /dev/stderr) + [ "${actual}" = "foo-ca-cert" ] + + # check that the volume uses the provided secret key + actual=$(echo $ca_cert_volume | jq -r '.secret.items[0].key' | tee /dev/stderr) + [ "${actual}" = "key" ] +} + +@test "syncCatalogCleanupJob/Uninstall: consul-ca-cert volumeMount is added when TLS is enabled" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "consul-ca-cert") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "syncCatalogCleanupJob/Uninstall: consul-ca-cert volume is not added if externalServers.enabled=true and externalServers.useSystemRoots=true" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=foo.com' \ + --set 'externalServers.useSystemRoots=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" = "" ] +} + +#-------------------------------------------------------------------- +# resources + +@test "syncCatalogCleanupJob/Uninstall: default resources" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + . | tee /dev/stderr | + yq -rc '.spec.template.spec.containers[0].resources' | tee /dev/stderr) + [ "${actual}" = '{"limits":{"cpu":"50m","memory":"50Mi"},"requests":{"cpu":"50m","memory":"50Mi"}}' ] +} + +@test "syncCatalogCleanupJob/Uninstall: can set resources" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'syncCatalog.resources.requests.memory=100Mi' \ + --set 'syncCatalog.resources.requests.cpu=100m' \ + --set 'syncCatalog.resources.limits.memory=200Mi' \ + --set 'syncCatalog.resources.limits.cpu=200m' \ + . | tee /dev/stderr | + yq -rc '.spec.template.spec.containers[0].resources' | tee /dev/stderr) + [ "${actual}" = '{"limits":{"cpu":"200m","memory":"200Mi"},"requests":{"cpu":"100m","memory":"100Mi"}}' ] +} + +#-------------------------------------------------------------------- +# extraLabels + +@test "syncCatalogCleanupJob/Uninstall: no extra labels defined by default" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata.labels | del(."app") | del(."chart") | del(."release") | del(."component")' | tee /dev/stderr) + [ "${actual}" = "{}" ] +} + +@test "syncCatalogCleanupJob/Uninstall: can set extra labels" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'syncCatalog.extraLabels.foo=bar' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata.labels.foo' | tee /dev/stderr) + + [ "${actual}" = "bar" ] +} + +@test "syncCatalogCleanupJob/Uninstall: extra global labels can be set" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'global.extraLabels.foo=bar' \ + . | tee /dev/stderr) + local actualBar=$(echo "${actual}" | yq -r '.metadata.labels.foo' | tee /dev/stderr) + [ "${actualBar}" = "bar" ] + local actualTemplateBar=$(echo "${actual}" | yq -r '.spec.template.metadata.labels.foo' | tee /dev/stderr) + [ "${actualTemplateBar}" = "bar" ] +} + +@test "syncCatalogCleanupJob/Uninstall: multiple extra global labels can be set" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'global.extraLabels.foo=bar' \ + --set 'global.extraLabels.baz=qux' \ + . | tee /dev/stderr) + local actualFoo=$(echo "${actual}" | yq -r '.metadata.labels.foo' | tee /dev/stderr) + local actualBaz=$(echo "${actual}" | yq -r '.metadata.labels.baz' | tee /dev/stderr) + [ "${actualFoo}" = "bar" ] + [ "${actualBaz}" = "qux" ] + local actualTemplateFoo=$(echo "${actual}" | yq -r '.spec.template.metadata.labels.foo' | tee /dev/stderr) + local actualTemplateBaz=$(echo "${actual}" | yq -r '.spec.template.metadata.labels.baz' | tee /dev/stderr) + [ "${actualTemplateFoo}" = "bar" ] + [ "${actualTemplateBaz}" = "qux" ] +} + +#-------------------------------------------------------------------- +# annotations + +@test "syncCatalogCleanupJob/Uninstall: no annotations defined by default" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata.annotations | + del(."consul.hashicorp.com/connect-inject") | + del(."consul.hashicorp.com/mesh-inject")' | + tee /dev/stderr) + [ "${actual}" = "{}" ] +} + +@test "syncCatalogCleanupJob/Uninstall: annotations can be set" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'syncCatalog.annotations=foo: bar' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata.annotations.foo' | tee /dev/stderr) + [ "${actual}" = "bar" ] +} + +@test "syncCatalogCleanupJob/Uninstall: metrics annotations can be set" { + cd $(chart_dir) + local object=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'syncCatalog.metrics.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata.annotations | + del(."consul.hashicorp.com/connect-inject") | + del(."consul.hashicorp.com/mesh-inject")' | + tee /dev/stderr) + + # Annotations to check + annotations=("prometheus.io/scrape" "prometheus.io/path" "prometheus.io/port") + + # Check each annotation + for annotation in "${annotations[@]}"; do + actual=$(echo "$object" | yq -r "has(\"$annotation\")") + [ "$actual" = "true" ] + done +} + +#-------------------------------------------------------------------- +# logLevel + +@test "syncCatalogCleanupJob/Uninstall: logLevel info by default from global" { + cd $(chart_dir) + local cmd=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo "$cmd" | + yq 'any(contains("-log-level=info"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "syncCatalogCleanupJob/Uninstall: logLevel can be overridden" { + cd $(chart_dir) + local cmd=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.logLevel=debug' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo "$cmd" | + yq 'any(contains("-log-level=debug"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# Vault + +@test "syncCatalogCleanupJob/Uninstall: configures server CA to come from vault when vault is enabled" { + cd $(chart_dir) + local object=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.consulServerRole=foo' \ + --set 'global.secretsBackend.vault.consulCARole=carole' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + # Check annotations + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/agent-init-first"]' | tee /dev/stderr) + [ "${actual}" = "true" ] + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/agent-inject"]' | tee /dev/stderr) + [ "${actual}" = "true" ] + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/role"]' | tee /dev/stderr) + [ "${actual}" = "carole" ] + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/agent-inject-secret-serverca.crt"]' | tee /dev/stderr) + [ "${actual}" = "foo" ] + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/agent-inject-template-serverca.crt"]' | tee /dev/stderr) + [ "${actual}" = $'{{- with secret \"foo\" -}}\n{{- .Data.certificate -}}\n{{- end -}}' ] + + actual=$(echo $object | jq -r '.spec.volumes[] | select( .name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" = "" ] + + actual=$(echo $object | jq -r '.spec.containers[0].volumeMounts[] | select( .name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" = "" ] +} + +@test "syncCatalogCleanupJob/Uninstall: vault namespace annotations is set when global.secretsBackend.vault.vaultNamespace is set" { + cd $(chart_dir) + local cmd=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=bar' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + --set 'global.secretsBackend.vault.vaultNamespace=vns' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata' | tee /dev/stderr) + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/namespace"]' | tee /dev/stderr)" + [ "${actual}" = "vns" ] +} + +@test "syncCatalogCleanupJob/Uninstall: correct vault namespace annotations is set when global.secretsBackend.vault.vaultNamespace is set and agentAnnotations are also set without vaultNamespace annotation" { + cd $(chart_dir) + local cmd=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=bar' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + --set 'global.secretsBackend.vault.vaultNamespace=vns' \ + --set 'global.secretsBackend.vault.agentAnnotations=vault.hashicorp.com/agent-extra-secret: bar' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata' | tee /dev/stderr) + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/namespace"]' | tee /dev/stderr)" + [ "${actual}" = "vns" ] +} + +@test "syncCatalogCleanupJob/Uninstall: correct vault namespace annotations is set when global.secretsBackend.vault.vaultNamespace is set and agentAnnotations are also set with vaultNamespace annotation" { + cd $(chart_dir) + local cmd=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=bar' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + --set 'global.secretsBackend.vault.vaultNamespace=vns' \ + --set 'global.secretsBackend.vault.agentAnnotations=vault.hashicorp.com/namespace: bar' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata' | tee /dev/stderr) + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/namespace"]' | tee /dev/stderr)" + [ "${actual}" = "bar" ] +} + +@test "syncCatalogCleanupJob/Uninstall: vault CA is not configured by default" { + cd $(chart_dir) + local object=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.secretsBackend.vault.consulCARole=carole' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/agent-extra-secret")') + [ "${actual}" = "false" ] + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/ca-cert")') + [ "${actual}" = "false" ] +} + +@test "syncCatalogCleanupJob/Uninstall: vault CA is not configured when secretName is set but secretKey is not" { + cd $(chart_dir) + local object=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.secretsBackend.vault.consulCARole=carole' \ + --set 'global.secretsBackend.vault.ca.secretName=ca' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/agent-extra-secret")') + [ "${actual}" = "false" ] + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/ca-cert")') + [ "${actual}" = "false" ] +} + +@test "syncCatalogCleanupJob/Uninstall: vault CA is not configured when secretKey is set but secretName is not" { + cd $(chart_dir) + local object=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.secretsBackend.vault.consulCARole=carole' \ + --set 'global.secretsBackend.vault.ca.secretKey=tls.crt' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/agent-extra-secret")') + [ "${actual}" = "false" ] + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/ca-cert")') + [ "${actual}" = "false" ] +} + +@test "syncCatalogCleanupJob/Uninstall: vault CA is configured when both secretName and secretKey are set" { + cd $(chart_dir) + local object=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.secretsBackend.vault.consulCARole=carole' \ + --set 'global.secretsBackend.vault.ca.secretName=ca' \ + --set 'global.secretsBackend.vault.ca.secretKey=tls.crt' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.metadata.annotations."vault.hashicorp.com/agent-extra-secret"') + [ "${actual}" = "ca" ] + local actual=$(echo $object | yq -r '.metadata.annotations."vault.hashicorp.com/ca-cert"') + [ "${actual}" = "/vault/custom/tls.crt" ] +} + +#-------------------------------------------------------------------- +# Vault agent annotations + +@test "syncCatalogCleanupJob/Uninstall: no vault agent annotations defined by default" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.consulServerRole=foo' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.consulCARole=carole' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata.annotations | + del(."consul.hashicorp.com/connect-inject") | + del(."consul.hashicorp.com/mesh-inject") | + del(."vault.hashicorp.com/agent-inject") | + del(."vault.hashicorp.com/role")' | + tee /dev/stderr) + [ "${actual}" = "{}" ] +} + +@test "syncCatalogCleanupJob/Uninstall: vault agent annotations can be set" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.consulServerRole=foo' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.consulCARole=carole' \ + --set 'global.secretsBackend.vault.agentAnnotations=foo: bar' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata.annotations.foo' | tee /dev/stderr) + [ "${actual}" = "bar" ] +} diff --git a/charts/consul/test/unit/sync-catalog-cleanup-on-upgrade-job.bats b/charts/consul/test/unit/sync-catalog-cleanup-on-upgrade-job.bats new file mode 100644 index 0000000000..01c7a4f45e --- /dev/null +++ b/charts/consul/test/unit/sync-catalog-cleanup-on-upgrade-job.bats @@ -0,0 +1,896 @@ +#!/usr/bin/env bats + +load _helpers + +target=templates/sync-catalog-cleanup-on-upgrade-job.yaml + +@test "syncCatalogCleanupJob/Upgrade: disabled by default" { + cd $(chart_dir) + assert_empty helm template \ + -s $target \ + . +} + +@test "syncCatalogCleanupJob/Upgrade: disabled with syncCatalog.cleanupNodeOnRemoval true and syncCatalog.enabled true and Release.IsUpgrade true" { + cd $(chart_dir) + assert_empty helm template \ + -s $target \ + --set 'syncCatalog.enabled=true' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --is-upgrade \ + . +} + +@test "syncCatalogCleanupJob/Upgrade: enable with syncCatalog.cleanupNodeOnRemoval true and syncCatalog.enabled false and Release.IsUpgrade true" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=false' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --is-upgrade \ + . | tee /dev/stderr | + yq -s 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# image + +@test "syncCatalogCleanupJob/Upgrade: image defaults to global.imageK8S" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'global.imageK8S=bar' \ + --set 'syncCatalog.enabled=false' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --is-upgrade \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].image' | tee /dev/stderr) + [ "${actual}" = "bar" ] +} + +@test "syncCatalogCleanupJob/Upgrade: image can be overridden with server.image" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'global.imageK8S=foo' \ + --set 'syncCatalog.enabled=false' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'syncCatalog.image=bar' \ + --is-upgrade \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].image' | tee /dev/stderr) + [ "${actual}" = "bar" ] +} + +@test "syncCatalogCleanupJob/Upgrade: consul env defaults" { + cd $(chart_dir) + local env=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=false' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --is-upgrade \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + + local actual=$(echo "$env" | + jq -r '. | select( .name == "CONSUL_ADDRESSES").value' | tee /dev/stderr) + [ "${actual}" = "release-name-consul-server.default.svc" ] + + local actual=$(echo "$env" | + jq -r '. | select( .name == "CONSUL_GRPC_PORT").value' | tee /dev/stderr) + [ "${actual}" = "8502" ] + + local actual=$(echo "$env" | + jq -r '. | select( .name == "CONSUL_HTTP_PORT").value' | tee /dev/stderr) + [ "${actual}" = "8500" ] + + local actual=$(echo "$env" | + jq -r '. | select( .name == "CONSUL_DATACENTER").value' | tee /dev/stderr) + [ "${actual}" = "dc1" ] + + local actual=$(echo "$env" | + jq -r '. | select( .name == "CONSUL_API_TIMEOUT").value' | tee /dev/stderr) + [ "${actual}" = "5s" ] +} + +#-------------------------------------------------------------------- +# consulNodeName + +@test "syncCatalogCleanupJob/Upgrade: consulNodeName defaults to k8s-sync" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=false' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --is-upgrade \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-consul-node-name=k8s-sync"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "syncCatalogCleanupJob/Upgrade: consulNodeName set to empty" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=false' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --is-upgrade \ + --set 'syncCatalog.consulNodeName=' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-consul-node-name"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "syncCatalogCleanupJob/Upgrade: can specify consulNodeName" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=false' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --is-upgrade \ + --set 'syncCatalog.consulNodeName=aNodeName' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-consul-node-name=aNodeName"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# serviceAccount + +@test "syncCatalogCleanupJob/Upgrade: serviceAccount set when sync enabled" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=false' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --is-upgrade \ + . | tee /dev/stderr | + yq '.spec.template.spec.serviceAccountName | contains("sync-catalog-cleanup")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# aclSyncToken + +@test "syncCatalogCleanupJob/Upgrade: aclSyncToken disabled when secretName is missing" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=false' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --is-upgrade \ + --set 'syncCatalog.aclSyncToken.secretKey=bar' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_ACL_TOKEN"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "syncCatalogCleanupJob/Upgrade: aclSyncToken disabled when secretKey is missing" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=false' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --is-upgrade \ + --set 'syncCatalog.aclSyncToken.secretName=foo' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_ACL_TOKEN"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "syncCatalogCleanupJob/Upgrade: aclSyncToken enabled when secretName and secretKey is provided" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=false' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --is-upgrade \ + --set 'syncCatalog.aclSyncToken.secretName=foo' \ + --set 'syncCatalog.aclSyncToken.secretKey=bar' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_ACL_TOKEN"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# affinity + +@test "syncCatalogCleanupJob/Upgrade: affinity not set by default" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=false' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --is-upgrade \ + . | tee /dev/stderr | + yq '.spec.template.spec.affinity == null' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "syncCatalogCleanupJob/Upgrade: affinity can be set" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=false' \ + --is-upgrade \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'syncCatalog.affinity=foobar' \ + . | tee /dev/stderr | + yq '.spec.template.spec | .affinity == "foobar"' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# nodeSelector + +@test "syncCatalogCleanupJob/Upgrade: nodeSelector is not set by default" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=false' \ + --is-upgrade \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.nodeSelector' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + +@test "syncCatalogCleanupJob/Upgrade: nodeSelector is not set by default with sync enabled" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=false' \ + --is-upgrade \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.nodeSelector' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + +@test "syncCatalogCleanupJob/Upgrade: specified nodeSelector" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=false' \ + --is-upgrade \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'syncCatalog.nodeSelector=testing' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.nodeSelector' | tee /dev/stderr) + [ "${actual}" = "testing" ] +} + +#-------------------------------------------------------------------- +# tolerations + +@test "syncCatalogCleanupJob/Upgrade: tolerations not set by default" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=false' \ + --is-upgrade \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.tolerations == null' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "syncCatalogCleanupJob/Upgrade: tolerations can be set" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=false' \ + --is-upgrade \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'syncCatalog.tolerations=foobar' \ + . | tee /dev/stderr | + yq '.spec.template.spec | .tolerations == "foobar"' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# global.acls.manageSystemACLs + +@test "syncCatalogCleanupJob/Upgrade: ACL auth method env vars are set when acls are enabled" { + cd $(chart_dir) + local env=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=false' \ + --is-upgrade \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + + local actual=$(echo "$env" | + jq -r '. | select( .name == "CONSUL_LOGIN_AUTH_METHOD").value' | tee /dev/stderr) + [ "${actual}" = "release-name-consul-k8s-component-auth-method" ] + + local actual=$(echo "$env" | + jq -r '. | select( .name == "CONSUL_LOGIN_DATACENTER").value' | tee /dev/stderr) + [ "${actual}" = "dc1" ] + local actual=$(echo "$env" | + jq -r '. | select( .name == "CONSUL_LOGIN_META").value' | tee /dev/stderr) + [ "${actual}" = 'component=sync-catalog,pod=$(NAMESPACE)/$(POD_NAME)' ] +} + +@test "syncCatalogCleanupJob/Upgrade: sets global auth method and primary datacenter when federation and acls and namespaces are enabled" { + cd $(chart_dir) + local env=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=false' \ + --is-upgrade \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.federation.enabled=true' \ + --set 'global.federation.primaryDatacenter=dc1' \ + --set 'global.datacenter=dc2' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'global.tls.enabled=true' \ + --set 'meshGateway.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + + local actual=$(echo "$env" | + jq -r '. | select( .name == "CONSUL_LOGIN_AUTH_METHOD").value' | tee /dev/stderr) + [ "${actual}" = "release-name-consul-k8s-component-auth-method-dc2" ] + + local actual=$(echo "$env" | + jq -r '. | select( .name == "CONSUL_LOGIN_DATACENTER").value' | tee /dev/stderr) + [ "${actual}" = "dc1" ] +} + +@test "syncCatalogCleanupJob/Upgrade: sets default login partition and acls and partitions are enabled" { + cd $(chart_dir) + local env=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=false' \ + --is-upgrade \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.adminPartitions.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + + local actual=$(echo "$env" | + jq -r '. | select( .name == "CONSUL_LOGIN_PARTITION").value' | tee /dev/stderr) + [ "${actual}" = "default" ] +} + +@test "syncCatalogCleanupJob/Upgrade: sets non-default login partition and acls and partitions are enabled" { + cd $(chart_dir) + local env=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=false' \ + --is-upgrade \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.adminPartitions.enabled=true' \ + --set 'global.adminPartitions.name=foo' \ + --set 'global.enableConsulNamespaces=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + + local actual=$(echo "$env" | + jq -r '. | select( .name == "CONSUL_LOGIN_PARTITION").value' | tee /dev/stderr) + [ "${actual}" = "foo" ] +} + +#-------------------------------------------------------------------- +# global.tls.enabled + +@test "syncCatalogCleanupJob/Upgrade: sets Consul environment variables when global.tls.enabled" { + cd $(chart_dir) + local env=$(helm template \ + -s $target \ + --set 'client.enabled=true' \ + --is-upgrade \ + --set 'syncCatalog.enabled=false' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + + local actual=$(echo "$env" | + jq -r '. | select( .name == "CONSUL_HTTP_PORT").value' | tee /dev/stderr) + [ "${actual}" = "8501" ] + + local actual=$(echo "$env" | + jq -r '. | select( .name == "CONSUL_USE_TLS").value' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo "$env" | + jq -r '. | select( .name == "CONSUL_CACERT_FILE").value' | tee /dev/stderr) + [ "${actual}" = "/consul/tls/ca/tls.crt" ] +} + +@test "syncCatalogCleanupJob/Upgrade: can overwrite CA secret with the provided one" { + cd $(chart_dir) + local ca_cert_volume=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=false' \ + --is-upgrade \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.caCert.secretName=foo-ca-cert' \ + --set 'global.tls.caCert.secretKey=key' \ + --set 'global.tls.caKey.secretName=foo-ca-key' \ + --set 'global.tls.caKey.secretKey=key' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name=="consul-ca-cert")' | tee /dev/stderr) + + # check that the provided ca cert secret is attached as a volume + local actual + actual=$(echo $ca_cert_volume | jq -r '.secret.secretName' | tee /dev/stderr) + [ "${actual}" = "foo-ca-cert" ] + + # check that the volume uses the provided secret key + actual=$(echo $ca_cert_volume | jq -r '.secret.items[0].key' | tee /dev/stderr) + [ "${actual}" = "key" ] +} + +@test "syncCatalogCleanupJob/Upgrade: consul-ca-cert volumeMount is added when TLS is enabled" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=false' \ + --is-upgrade \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "consul-ca-cert") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "syncCatalogCleanupJob/Upgrade: consul-ca-cert volume is not added if externalServers.enabled=true and externalServers.useSystemRoots=true" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=false' \ + --is-upgrade \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=foo.com' \ + --set 'externalServers.useSystemRoots=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" = "" ] +} + +#-------------------------------------------------------------------- +# resources + +@test "syncCatalogCleanupJob/Upgrade: default resources" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=false' \ + --is-upgrade \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + . | tee /dev/stderr | + yq -rc '.spec.template.spec.containers[0].resources' | tee /dev/stderr) + [ "${actual}" = '{"limits":{"cpu":"50m","memory":"50Mi"},"requests":{"cpu":"50m","memory":"50Mi"}}' ] +} + +@test "syncCatalogCleanupJob/Upgrade: can set resources" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=false' \ + --is-upgrade \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'syncCatalog.resources.requests.memory=100Mi' \ + --set 'syncCatalog.resources.requests.cpu=100m' \ + --set 'syncCatalog.resources.limits.memory=200Mi' \ + --set 'syncCatalog.resources.limits.cpu=200m' \ + . | tee /dev/stderr | + yq -rc '.spec.template.spec.containers[0].resources' | tee /dev/stderr) + [ "${actual}" = '{"limits":{"cpu":"200m","memory":"200Mi"},"requests":{"cpu":"100m","memory":"100Mi"}}' ] +} + +#-------------------------------------------------------------------- +# extraLabels + +@test "syncCatalogCleanupJob/Upgrade: no extra labels defined by default" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=false' \ + --is-upgrade \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata.labels | del(."app") | del(."chart") | del(."release") | del(."component")' | tee /dev/stderr) + [ "${actual}" = "{}" ] +} + +@test "syncCatalogCleanupJob/Upgrade: can set extra labels" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=false' \ + --is-upgrade \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'syncCatalog.extraLabels.foo=bar' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata.labels.foo' | tee /dev/stderr) + + [ "${actual}" = "bar" ] +} + +@test "syncCatalogCleanupJob/Upgrade: extra global labels can be set" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=false' \ + --is-upgrade \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'global.extraLabels.foo=bar' \ + . | tee /dev/stderr) + local actualBar=$(echo "${actual}" | yq -r '.metadata.labels.foo' | tee /dev/stderr) + [ "${actualBar}" = "bar" ] + local actualTemplateBar=$(echo "${actual}" | yq -r '.spec.template.metadata.labels.foo' | tee /dev/stderr) + [ "${actualTemplateBar}" = "bar" ] +} + +@test "syncCatalogCleanupJob/Upgrade: multiple extra global labels can be set" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=false' \ + --is-upgrade \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'global.extraLabels.foo=bar' \ + --set 'global.extraLabels.baz=qux' \ + . | tee /dev/stderr) + local actualFoo=$(echo "${actual}" | yq -r '.metadata.labels.foo' | tee /dev/stderr) + local actualBaz=$(echo "${actual}" | yq -r '.metadata.labels.baz' | tee /dev/stderr) + [ "${actualFoo}" = "bar" ] + [ "${actualBaz}" = "qux" ] + local actualTemplateFoo=$(echo "${actual}" | yq -r '.spec.template.metadata.labels.foo' | tee /dev/stderr) + local actualTemplateBaz=$(echo "${actual}" | yq -r '.spec.template.metadata.labels.baz' | tee /dev/stderr) + [ "${actualTemplateFoo}" = "bar" ] + [ "${actualTemplateBaz}" = "qux" ] +} + +#-------------------------------------------------------------------- +# annotations + +@test "syncCatalogCleanupJob/Upgrade: no annotations defined by default" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=false' \ + --is-upgrade \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata.annotations | + del(."consul.hashicorp.com/connect-inject") | + del(."consul.hashicorp.com/mesh-inject")' | + tee /dev/stderr) + [ "${actual}" = "{}" ] +} + +@test "syncCatalogCleanupJob/Upgrade: annotations can be set" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=false' \ + --is-upgrade \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'syncCatalog.annotations=foo: bar' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata.annotations.foo' | tee /dev/stderr) + [ "${actual}" = "bar" ] +} + +@test "syncCatalogCleanupJob/Upgrade: metrics annotations can be set" { + cd $(chart_dir) + local object=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=false' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --is-upgrade \ + --set 'syncCatalog.metrics.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata.annotations | + del(."consul.hashicorp.com/connect-inject") | + del(."consul.hashicorp.com/mesh-inject")' | + tee /dev/stderr) + + # Annotations to check + annotations=("prometheus.io/scrape" "prometheus.io/path" "prometheus.io/port") + + # Check each annotation + for annotation in "${annotations[@]}"; do + actual=$(echo "$object" | yq -r "has(\"$annotation\")") + [ "$actual" = "true" ] + done +} + +#-------------------------------------------------------------------- +# logLevel + +@test "syncCatalogCleanupJob/Upgrade: logLevel info by default from global" { + cd $(chart_dir) + local cmd=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=false' \ + --is-upgrade \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo "$cmd" | + yq 'any(contains("-log-level=info"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "syncCatalogCleanupJob/Upgrade: logLevel can be overridden" { + cd $(chart_dir) + local cmd=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=false' \ + --is-upgrade \ + --set 'syncCatalog.logLevel=debug' \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo "$cmd" | + yq 'any(contains("-log-level=debug"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# Vault + +@test "syncCatalogCleanupJob/Upgrade: configures server CA to come from vault when vault is enabled" { + cd $(chart_dir) + local object=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=false' \ + --is-upgrade \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.consulServerRole=foo' \ + --set 'global.secretsBackend.vault.consulCARole=carole' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + # Check annotations + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/agent-init-first"]' | tee /dev/stderr) + [ "${actual}" = "true" ] + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/agent-inject"]' | tee /dev/stderr) + [ "${actual}" = "true" ] + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/role"]' | tee /dev/stderr) + [ "${actual}" = "carole" ] + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/agent-inject-secret-serverca.crt"]' | tee /dev/stderr) + [ "${actual}" = "foo" ] + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/agent-inject-template-serverca.crt"]' | tee /dev/stderr) + [ "${actual}" = $'{{- with secret \"foo\" -}}\n{{- .Data.certificate -}}\n{{- end -}}' ] + + actual=$(echo $object | jq -r '.spec.volumes[] | select( .name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" = "" ] + + actual=$(echo $object | jq -r '.spec.containers[0].volumeMounts[] | select( .name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" = "" ] +} + +@test "syncCatalogCleanupJob/Upgrade: vault namespace annotations is set when global.secretsBackend.vault.vaultNamespace is set" { + cd $(chart_dir) + local cmd=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=false' \ + --is-upgrade \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=bar' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + --set 'global.secretsBackend.vault.vaultNamespace=vns' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata' | tee /dev/stderr) + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/namespace"]' | tee /dev/stderr)" + [ "${actual}" = "vns" ] +} + +@test "syncCatalogCleanupJob/Upgrade: correct vault namespace annotations is set when global.secretsBackend.vault.vaultNamespace is set and agentAnnotations are also set without vaultNamespace annotation" { + cd $(chart_dir) + local cmd=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=false' \ + --is-upgrade \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=bar' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + --set 'global.secretsBackend.vault.vaultNamespace=vns' \ + --set 'global.secretsBackend.vault.agentAnnotations=vault.hashicorp.com/agent-extra-secret: bar' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata' | tee /dev/stderr) + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/namespace"]' | tee /dev/stderr)" + [ "${actual}" = "vns" ] +} + +@test "syncCatalogCleanupJob/Upgrade: correct vault namespace annotations is set when global.secretsBackend.vault.vaultNamespace is set and agentAnnotations are also set with vaultNamespace annotation" { + cd $(chart_dir) + local cmd=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=false' \ + --is-upgrade \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=bar' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + --set 'global.secretsBackend.vault.vaultNamespace=vns' \ + --set 'global.secretsBackend.vault.agentAnnotations=vault.hashicorp.com/namespace: bar' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata' | tee /dev/stderr) + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/namespace"]' | tee /dev/stderr)" + [ "${actual}" = "bar" ] +} + +@test "syncCatalogCleanupJob/Upgrade: vault CA is not configured by default" { + cd $(chart_dir) + local object=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=false' \ + --is-upgrade \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.secretsBackend.vault.consulCARole=carole' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/agent-extra-secret")') + [ "${actual}" = "false" ] + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/ca-cert")') + [ "${actual}" = "false" ] +} + +@test "syncCatalogCleanupJob/Upgrade: vault CA is not configured when secretName is set but secretKey is not" { + cd $(chart_dir) + local object=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=false' \ + --is-upgrade \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.secretsBackend.vault.consulCARole=carole' \ + --set 'global.secretsBackend.vault.ca.secretName=ca' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/agent-extra-secret")') + [ "${actual}" = "false" ] + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/ca-cert")') + [ "${actual}" = "false" ] +} + +@test "syncCatalogCleanupJob/Upgrade: vault CA is not configured when secretKey is set but secretName is not" { + cd $(chart_dir) + local object=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=false' \ + --is-upgrade \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.secretsBackend.vault.consulCARole=carole' \ + --set 'global.secretsBackend.vault.ca.secretKey=tls.crt' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/agent-extra-secret")') + [ "${actual}" = "false" ] + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/ca-cert")') + [ "${actual}" = "false" ] +} + +@test "syncCatalogCleanupJob/Upgrade: vault CA is configured when both secretName and secretKey are set" { + cd $(chart_dir) + local object=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=false' \ + --is-upgrade \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.secretsBackend.vault.consulCARole=carole' \ + --set 'global.secretsBackend.vault.ca.secretName=ca' \ + --set 'global.secretsBackend.vault.ca.secretKey=tls.crt' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.metadata.annotations."vault.hashicorp.com/agent-extra-secret"') + [ "${actual}" = "ca" ] + local actual=$(echo $object | yq -r '.metadata.annotations."vault.hashicorp.com/ca-cert"') + [ "${actual}" = "/vault/custom/tls.crt" ] +} + +#-------------------------------------------------------------------- +# Vault agent annotations + +@test "syncCatalogCleanupJob/Upgrade: no vault agent annotations defined by default" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=false' \ + --is-upgrade \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.consulServerRole=foo' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.consulCARole=carole' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata.annotations | + del(."consul.hashicorp.com/connect-inject") | + del(."consul.hashicorp.com/mesh-inject") | + del(."vault.hashicorp.com/agent-inject") | + del(."vault.hashicorp.com/role")' | + tee /dev/stderr) + [ "${actual}" = "{}" ] +} + +@test "syncCatalogCleanupJob/Upgrade: vault agent annotations can be set" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.enabled=false' \ + --is-upgrade \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.consulServerRole=foo' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.consulCARole=carole' \ + --set 'global.secretsBackend.vault.agentAnnotations=foo: bar' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata.annotations.foo' | tee /dev/stderr) + [ "${actual}" = "bar" ] +} diff --git a/charts/consul/test/unit/sync-catalog-cleanup-serviceaccount.bats b/charts/consul/test/unit/sync-catalog-cleanup-serviceaccount.bats new file mode 100644 index 0000000000..8362eb87c8 --- /dev/null +++ b/charts/consul/test/unit/sync-catalog-cleanup-serviceaccount.bats @@ -0,0 +1,75 @@ +#!/usr/bin/env bats + +load _helpers + +target=templates/sync-catalog-cleanup-serviceaccount.yaml + +@test "syncCatalogCleanup/ServiceAccount: disabled by default" { + cd $(chart_dir) + assert_empty helm template \ + -s $target \ + . +} + +@test "syncCatalogCleanup/ServiceAccount: disabled with cleanup disabled" { + cd $(chart_dir) + assert_empty helm template \ + -s $target \ + --set 'syncCatalog.cleanupNodeOnRemoval=false' \ + . +} + +@test "syncCatalogCleanup/ServiceAccount: enabled with cleanup enabled" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# global.imagePullSecrets + +@test "syncCatalogCleanup/ServiceAccount: can set image pull secrets" { + cd $(chart_dir) + local object=$(helm template \ + -s $target \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set 'global.imagePullSecrets[0].name=my-secret' \ + --set 'global.imagePullSecrets[1].name=my-secret2' \ + . | tee /dev/stderr) + + local actual=$(echo "$object" | + yq -r '.imagePullSecrets[0].name' | tee /dev/stderr) + [ "${actual}" = "my-secret" ] + + local actual=$(echo "$object" | + yq -r '.imagePullSecrets[1].name' | tee /dev/stderr) + [ "${actual}" = "my-secret2" ] +} + +#-------------------------------------------------------------------- +# syncCatalog.serviceAccount.annotations + +@test "syncCatalogCleanup/ServiceAccount: no annotations by default" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + . | tee /dev/stderr | + yq '.metadata.annotations | length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "syncCatalogCleanup/ServiceAccount: annotations when enabled" { + cd $(chart_dir) + local actual=$(helm template \ + -s $target \ + --set 'syncCatalog.cleanupNodeOnRemoval=true' \ + --set "syncCatalog.serviceAccount.annotations=foo: bar" \ + . | tee /dev/stderr | + yq -r '.metadata.annotations.foo' | tee /dev/stderr) + [ "${actual}" = "bar" ] +}